summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gem/i915_gem_userptr.c')
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c971
1 files changed, 329 insertions, 642 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index c31a6744daee..77cc3af3d518 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -1,727 +1,428 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2012-2014 Intel Corporation
+ *
+ * Based on amdgpu_mn, which bears the following notice:
+ *
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ * Christian König <christian.koenig@amd.com>
*/
#include <linux/mmu_context.h>
-#include <linux/mmu_notifier.h>
#include <linux/mempolicy.h>
#include <linux/swap.h>
#include <linux/sched/mm.h>
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
-struct i915_mm_struct {
- struct mm_struct *mm;
- struct drm_i915_private *i915;
- struct i915_mmu_notifier *mn;
- struct hlist_node node;
- struct kref kref;
- struct work_struct work;
-};
-
-#if defined(CONFIG_MMU_NOTIFIER)
-#include <linux/interval_tree.h>
-
-struct i915_mmu_notifier {
- spinlock_t lock;
- struct hlist_node node;
- struct mmu_notifier mn;
- struct rb_root_cached objects;
- struct i915_mm_struct *mm;
-};
-
-struct i915_mmu_object {
- struct i915_mmu_notifier *mn;
- struct drm_i915_gem_object *obj;
- struct interval_tree_node it;
-};
-
-static void add_object(struct i915_mmu_object *mo)
-{
- GEM_BUG_ON(!RB_EMPTY_NODE(&mo->it.rb));
- interval_tree_insert(&mo->it, &mo->mn->objects);
-}
-
-static void del_object(struct i915_mmu_object *mo)
-{
- if (RB_EMPTY_NODE(&mo->it.rb))
- return;
-
- interval_tree_remove(&mo->it, &mo->mn->objects);
- RB_CLEAR_NODE(&mo->it.rb);
-}
+#ifdef CONFIG_MMU_NOTIFIER
-static void
-__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value)
+/**
+ * i915_gem_userptr_invalidate - callback to notify about mm change
+ *
+ * @mni: the range (mm) is about to update
+ * @range: details on the invalidation
+ * @cur_seq: Value to pass to mmu_interval_set_seq()
+ *
+ * Block for operations on BOs to finish and mark pages as accessed and
+ * potentially dirty.
+ */
+static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
{
- struct i915_mmu_object *mo = obj->userptr.mmu_object;
-
- /*
- * During mm_invalidate_range we need to cancel any userptr that
- * overlaps the range being invalidated. Doing so requires the
- * struct_mutex, and that risks recursion. In order to cause
- * recursion, the user must alias the userptr address space with
- * a GTT mmapping (possible with a MAP_FIXED) - then when we have
- * to invalidate that mmaping, mm_invalidate_range is called with
- * the userptr address *and* the struct_mutex held. To prevent that
- * we set a flag under the i915_mmu_notifier spinlock to indicate
- * whether this object is valid.
- */
- if (!mo)
- return;
-
- spin_lock(&mo->mn->lock);
- if (value)
- add_object(mo);
- else
- del_object(mo);
- spin_unlock(&mo->mn->lock);
+ mmu_interval_set_seq(mni, cur_seq);
+ return true;
}
-static int
-userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
- const struct mmu_notifier_range *range)
-{
- struct i915_mmu_notifier *mn =
- container_of(_mn, struct i915_mmu_notifier, mn);
- struct interval_tree_node *it;
- unsigned long end;
- int ret = 0;
-
- if (RB_EMPTY_ROOT(&mn->objects.rb_root))
- return 0;
-
- /* interval ranges are inclusive, but invalidate range is exclusive */
- end = range->end - 1;
-
- spin_lock(&mn->lock);
- it = interval_tree_iter_first(&mn->objects, range->start, end);
- while (it) {
- struct drm_i915_gem_object *obj;
-
- if (!mmu_notifier_range_blockable(range)) {
- ret = -EAGAIN;
- break;
- }
-
- /*
- * The mmu_object is released late when destroying the
- * GEM object so it is entirely possible to gain a
- * reference on an object in the process of being freed
- * since our serialisation is via the spinlock and not
- * the struct_mutex - and consequently use it after it
- * is freed and then double free it. To prevent that
- * use-after-free we only acquire a reference on the
- * object if it is not in the process of being destroyed.
- */
- obj = container_of(it, struct i915_mmu_object, it)->obj;
- if (!kref_get_unless_zero(&obj->base.refcount)) {
- it = interval_tree_iter_next(it, range->start, end);
- continue;
- }
- spin_unlock(&mn->lock);
-
- ret = i915_gem_object_unbind(obj,
- I915_GEM_OBJECT_UNBIND_ACTIVE |
- I915_GEM_OBJECT_UNBIND_BARRIER);
- if (ret == 0)
- ret = __i915_gem_object_put_pages(obj);
- i915_gem_object_put(obj);
- if (ret)
- return ret;
-
- spin_lock(&mn->lock);
-
- /*
- * As we do not (yet) protect the mmu from concurrent insertion
- * over this range, there is no guarantee that this search will
- * terminate given a pathologic workload.
- */
- it = interval_tree_iter_first(&mn->objects, range->start, end);
- }
- spin_unlock(&mn->lock);
-
- return ret;
-
-}
-
-static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
- .invalidate_range_start = userptr_mn_invalidate_range_start,
+static const struct mmu_interval_notifier_ops i915_gem_userptr_notifier_ops = {
+ .invalidate = i915_gem_userptr_invalidate,
};
-static struct i915_mmu_notifier *
-i915_mmu_notifier_create(struct i915_mm_struct *mm)
+static int
+i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj)
{
- struct i915_mmu_notifier *mn;
-
- mn = kmalloc(sizeof(*mn), GFP_KERNEL);
- if (mn == NULL)
- return ERR_PTR(-ENOMEM);
-
- spin_lock_init(&mn->lock);
- mn->mn.ops = &i915_gem_userptr_notifier;
- mn->objects = RB_ROOT_CACHED;
- mn->mm = mm;
-
- return mn;
+ return mmu_interval_notifier_insert(&obj->userptr.notifier, current->mm,
+ obj->userptr.ptr, obj->base.size,
+ &i915_gem_userptr_notifier_ops);
}
-static void
-i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
+static void i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object *obj)
{
- struct i915_mmu_object *mo;
+ struct page **pvec = NULL;
- mo = fetch_and_zero(&obj->userptr.mmu_object);
- if (!mo)
- return;
-
- spin_lock(&mo->mn->lock);
- del_object(mo);
- spin_unlock(&mo->mn->lock);
- kfree(mo);
-}
+ assert_object_held_shared(obj);
-static struct i915_mmu_notifier *
-i915_mmu_notifier_find(struct i915_mm_struct *mm)
-{
- struct i915_mmu_notifier *mn;
- int err = 0;
-
- mn = mm->mn;
- if (mn)
- return mn;
-
- mn = i915_mmu_notifier_create(mm);
- if (IS_ERR(mn))
- err = PTR_ERR(mn);
-
- mmap_write_lock(mm->mm);
- mutex_lock(&mm->i915->mm_lock);
- if (mm->mn == NULL && !err) {
- /* Protected by mmap_lock (write-lock) */
- err = __mmu_notifier_register(&mn->mn, mm->mm);
- if (!err) {
- /* Protected by mm_lock */
- mm->mn = fetch_and_zero(&mn);
- }
- } else if (mm->mn) {
- /*
- * Someone else raced and successfully installed the mmu
- * notifier, we can cancel our own errors.
- */
- err = 0;
+ if (!--obj->userptr.page_ref) {
+ pvec = obj->userptr.pvec;
+ obj->userptr.pvec = NULL;
}
- mutex_unlock(&mm->i915->mm_lock);
- mmap_write_unlock(mm->mm);
+ GEM_BUG_ON(obj->userptr.page_ref < 0);
- if (mn && !IS_ERR(mn))
- kfree(mn);
+ if (pvec) {
+ const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
- return err ? ERR_PTR(err) : mm->mn;
+ unpin_user_pages(pvec, num_pages);
+ kvfree(pvec);
+ }
}
-static int
-i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
- unsigned flags)
+static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
{
- struct i915_mmu_notifier *mn;
- struct i915_mmu_object *mo;
-
- if (flags & I915_USERPTR_UNSYNCHRONIZED)
- return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
-
- if (WARN_ON(obj->userptr.mm == NULL))
- return -EINVAL;
+ unsigned int max_segment = i915_sg_segment_size(obj->base.dev->dev);
+ struct sg_table *st;
+ struct page **pvec;
+ unsigned int num_pages; /* limited by sg_alloc_table_from_pages_segment */
+ int ret;
- mn = i915_mmu_notifier_find(obj->userptr.mm);
- if (IS_ERR(mn))
- return PTR_ERR(mn);
+ if (overflows_type(obj->base.size >> PAGE_SHIFT, num_pages))
+ return -E2BIG;
- mo = kzalloc(sizeof(*mo), GFP_KERNEL);
- if (!mo)
+ num_pages = obj->base.size >> PAGE_SHIFT;
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
return -ENOMEM;
- mo->mn = mn;
- mo->obj = obj;
- mo->it.start = obj->userptr.ptr;
- mo->it.last = obj->userptr.ptr + obj->base.size - 1;
- RB_CLEAR_NODE(&mo->it.rb);
-
- obj->userptr.mmu_object = mo;
- return 0;
-}
+ if (!obj->userptr.page_ref) {
+ ret = -EAGAIN;
+ goto err_free;
+ }
-static void
-i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
- struct mm_struct *mm)
-{
- if (mn == NULL)
- return;
+ obj->userptr.page_ref++;
+ pvec = obj->userptr.pvec;
- mmu_notifier_unregister(&mn->mn, mm);
- kfree(mn);
-}
+alloc_table:
+ ret = sg_alloc_table_from_pages_segment(st, pvec, num_pages, 0,
+ num_pages << PAGE_SHIFT,
+ max_segment, GFP_KERNEL);
+ if (ret)
+ goto err;
-#else
+ ret = i915_gem_gtt_prepare_pages(obj, st);
+ if (ret) {
+ sg_free_table(st);
-static void
-__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value)
-{
-}
+ if (max_segment > PAGE_SIZE) {
+ max_segment = PAGE_SIZE;
+ goto alloc_table;
+ }
-static void
-i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
-{
-}
+ goto err;
+ }
-static int
-i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
- unsigned flags)
-{
- if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
- return -ENODEV;
+ WARN_ON_ONCE(!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE));
+ if (i915_gem_object_can_bypass_llc(obj))
+ obj->cache_dirty = true;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
+ __i915_gem_object_set_pages(obj, st);
return 0;
-}
-static void
-i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
- struct mm_struct *mm)
-{
+err:
+ i915_gem_object_userptr_drop_ref(obj);
+err_free:
+ kfree(st);
+ return ret;
}
-#endif
-
-static struct i915_mm_struct *
-__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
+static void
+i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
- struct i915_mm_struct *mm;
+ struct sgt_iter sgt_iter;
+ struct page *page;
- /* Protected by dev_priv->mm_lock */
- hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
- if (mm->mm == real)
- return mm;
+ if (!pages)
+ return;
- return NULL;
-}
+ __i915_gem_object_release_shmem(obj, pages, true);
+ i915_gem_gtt_finish_pages(obj, pages);
-static int
-i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- struct i915_mm_struct *mm;
- int ret = 0;
-
- /* During release of the GEM object we hold the struct_mutex. This
- * precludes us from calling mmput() at that time as that may be
- * the last reference and so call exit_mmap(). exit_mmap() will
- * attempt to reap the vma, and if we were holding a GTT mmap
- * would then call drm_gem_vm_close() and attempt to reacquire
- * the struct mutex. So in order to avoid that recursion, we have
- * to defer releasing the mm reference until after we drop the
- * struct_mutex, i.e. we need to schedule a worker to do the clean
- * up.
+ /*
+ * We always mark objects as dirty when they are used by the GPU,
+ * just in case. However, if we set the vma as being read-only we know
+ * that the object will never have been written to.
*/
- mutex_lock(&dev_priv->mm_lock);
- mm = __i915_mm_struct_find(dev_priv, current->mm);
- if (mm == NULL) {
- mm = kmalloc(sizeof(*mm), GFP_KERNEL);
- if (mm == NULL) {
- ret = -ENOMEM;
- goto out;
- }
-
- kref_init(&mm->kref);
- mm->i915 = to_i915(obj->base.dev);
+ if (i915_gem_object_is_readonly(obj))
+ obj->mm.dirty = false;
- mm->mm = current->mm;
- mmgrab(current->mm);
+ for_each_sgt_page(page, sgt_iter, pages) {
+ if (obj->mm.dirty && trylock_page(page)) {
+ /*
+ * As this may not be anonymous memory (e.g. shmem)
+ * but exist on a real mapping, we have to lock
+ * the page in order to dirty it -- holding
+ * the page reference is not sufficient to
+ * prevent the inode from being truncated.
+ * Play safe and take the lock.
+ *
+ * However...!
+ *
+ * The mmu-notifier can be invalidated for a
+ * migrate_folio, that is alreadying holding the lock
+ * on the folio. Such a try_to_unmap() will result
+ * in us calling put_pages() and so recursively try
+ * to lock the page. We avoid that deadlock with
+ * a trylock_page() and in exchange we risk missing
+ * some page dirtying.
+ */
+ set_page_dirty(page);
+ unlock_page(page);
+ }
- mm->mn = NULL;
+ mark_page_accessed(page);
+ }
+ obj->mm.dirty = false;
- /* Protected by dev_priv->mm_lock */
- hash_add(dev_priv->mm_structs,
- &mm->node, (unsigned long)mm->mm);
- } else
- kref_get(&mm->kref);
+ sg_free_table(pages);
+ kfree(pages);
- obj->userptr.mm = mm;
-out:
- mutex_unlock(&dev_priv->mm_lock);
- return ret;
+ i915_gem_object_userptr_drop_ref(obj);
}
-static void
-__i915_mm_struct_free__worker(struct work_struct *work)
+static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj)
{
- struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
- i915_mmu_notifier_free(mm->mn, mm->mm);
- mmdrop(mm->mm);
- kfree(mm);
-}
+ struct sg_table *pages;
+ int err;
-static void
-__i915_mm_struct_free(struct kref *kref)
-{
- struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
+ err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
+ if (err)
+ return err;
- /* Protected by dev_priv->mm_lock */
- hash_del(&mm->node);
- mutex_unlock(&mm->i915->mm_lock);
+ if (GEM_WARN_ON(i915_gem_object_has_pinned_pages(obj)))
+ return -EBUSY;
- INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
- queue_work(mm->i915->mm.userptr_wq, &mm->work);
-}
+ assert_object_held(obj);
-static void
-i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
-{
- if (obj->userptr.mm == NULL)
- return;
+ pages = __i915_gem_object_unset_pages(obj);
+ if (!IS_ERR_OR_NULL(pages))
+ i915_gem_userptr_put_pages(obj, pages);
- kref_put_mutex(&obj->userptr.mm->kref,
- __i915_mm_struct_free,
- &to_i915(obj->base.dev)->mm_lock);
- obj->userptr.mm = NULL;
+ return err;
}
-struct get_pages_work {
- struct work_struct work;
- struct drm_i915_gem_object *obj;
- struct task_struct *task;
-};
-
-static struct sg_table *
-__i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
- struct page **pvec, unsigned long num_pages)
+int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj)
{
- unsigned int max_segment = i915_sg_segment_size();
- struct sg_table *st;
- unsigned int sg_page_sizes;
- int ret;
-
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
- return ERR_PTR(-ENOMEM);
+ const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
+ struct page **pvec;
+ unsigned int gup_flags = 0;
+ unsigned long notifier_seq;
+ int pinned, ret;
-alloc_table:
- ret = __sg_alloc_table_from_pages(st, pvec, num_pages,
- 0, num_pages << PAGE_SHIFT,
- max_segment,
- GFP_KERNEL);
- if (ret) {
- kfree(st);
- return ERR_PTR(ret);
- }
+ if (obj->userptr.notifier.mm != current->mm)
+ return -EFAULT;
- ret = i915_gem_gtt_prepare_pages(obj, st);
- if (ret) {
- sg_free_table(st);
+ notifier_seq = mmu_interval_read_begin(&obj->userptr.notifier);
- if (max_segment > PAGE_SIZE) {
- max_segment = PAGE_SIZE;
- goto alloc_table;
- }
+ ret = i915_gem_object_lock_interruptible(obj, NULL);
+ if (ret)
+ return ret;
- kfree(st);
- return ERR_PTR(ret);
+ if (notifier_seq == obj->userptr.notifier_seq && obj->userptr.pvec) {
+ i915_gem_object_unlock(obj);
+ return 0;
}
- sg_page_sizes = i915_sg_page_sizes(st->sgl);
-
- __i915_gem_object_set_pages(obj, st, sg_page_sizes);
+ ret = i915_gem_object_userptr_unbind(obj);
+ i915_gem_object_unlock(obj);
+ if (ret)
+ return ret;
- return st;
-}
+ pvec = kvmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL);
+ if (!pvec)
+ return -ENOMEM;
-static void
-__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
-{
- struct get_pages_work *work = container_of(_work, typeof(*work), work);
- struct drm_i915_gem_object *obj = work->obj;
- const unsigned long npages = obj->base.size >> PAGE_SHIFT;
- unsigned long pinned;
- struct page **pvec;
- int ret;
+ if (!i915_gem_object_is_readonly(obj))
+ gup_flags |= FOLL_WRITE;
- ret = -ENOMEM;
pinned = 0;
+ while (pinned < num_pages) {
+ ret = pin_user_pages_fast(obj->userptr.ptr + pinned * PAGE_SIZE,
+ num_pages - pinned, gup_flags,
+ &pvec[pinned]);
+ if (ret < 0)
+ goto out;
- pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
- if (pvec != NULL) {
- struct mm_struct *mm = obj->userptr.mm->mm;
- unsigned int flags = 0;
- int locked = 0;
-
- if (!i915_gem_object_is_readonly(obj))
- flags |= FOLL_WRITE;
-
- ret = -EFAULT;
- if (mmget_not_zero(mm)) {
- while (pinned < npages) {
- if (!locked) {
- mmap_read_lock(mm);
- locked = 1;
- }
- ret = pin_user_pages_remote
- (work->task, mm,
- obj->userptr.ptr + pinned * PAGE_SIZE,
- npages - pinned,
- flags,
- pvec + pinned, NULL, &locked);
- if (ret < 0)
- break;
-
- pinned += ret;
- }
- if (locked)
- mmap_read_unlock(mm);
- mmput(mm);
- }
+ pinned += ret;
}
- mutex_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
- if (obj->userptr.work == &work->work) {
- struct sg_table *pages = ERR_PTR(ret);
-
- if (pinned == npages) {
- pages = __i915_gem_userptr_alloc_pages(obj, pvec,
- npages);
- if (!IS_ERR(pages)) {
- pinned = 0;
- pages = NULL;
- }
- }
+ ret = i915_gem_object_lock_interruptible(obj, NULL);
+ if (ret)
+ goto out;
- obj->userptr.work = ERR_CAST(pages);
- if (IS_ERR(pages))
- __i915_gem_userptr_set_active(obj, false);
+ if (mmu_interval_read_retry(&obj->userptr.notifier,
+ !obj->userptr.page_ref ? notifier_seq :
+ obj->userptr.notifier_seq)) {
+ ret = -EAGAIN;
+ goto out_unlock;
}
- mutex_unlock(&obj->mm.lock);
- unpin_user_pages(pvec, pinned);
- kvfree(pvec);
+ if (!obj->userptr.page_ref++) {
+ obj->userptr.pvec = pvec;
+ obj->userptr.notifier_seq = notifier_seq;
+ pvec = NULL;
+ ret = ____i915_gem_object_get_pages(obj);
+ }
- i915_gem_object_put(obj);
- put_task_struct(work->task);
- kfree(work);
-}
+ obj->userptr.page_ref--;
-static struct sg_table *
-__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
-{
- struct get_pages_work *work;
-
- /* Spawn a worker so that we can acquire the
- * user pages without holding our mutex. Access
- * to the user pages requires mmap_lock, and we have
- * a strict lock ordering of mmap_lock, struct_mutex -
- * we already hold struct_mutex here and so cannot
- * call gup without encountering a lock inversion.
- *
- * Userspace will keep on repeating the operation
- * (thanks to EAGAIN) until either we hit the fast
- * path or the worker completes. If the worker is
- * cancelled or superseded, the task is still run
- * but the results ignored. (This leads to
- * complications that we may have a stray object
- * refcount that we need to be wary of when
- * checking for existing objects during creation.)
- * If the worker encounters an error, it reports
- * that error back to this function through
- * obj->userptr.work = ERR_PTR.
- */
- work = kmalloc(sizeof(*work), GFP_KERNEL);
- if (work == NULL)
- return ERR_PTR(-ENOMEM);
+out_unlock:
+ i915_gem_object_unlock(obj);
- obj->userptr.work = &work->work;
+out:
+ if (pvec) {
+ unpin_user_pages(pvec, pinned);
+ kvfree(pvec);
+ }
- work->obj = i915_gem_object_get(obj);
+ return ret;
+}
- work->task = current;
- get_task_struct(work->task);
+int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj)
+{
+ if (mmu_interval_read_retry(&obj->userptr.notifier,
+ obj->userptr.notifier_seq)) {
+ /* We collided with the mmu notifier, need to retry */
- INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
- queue_work(to_i915(obj->base.dev)->mm.userptr_wq, &work->work);
+ return -EAGAIN;
+ }
- return ERR_PTR(-EAGAIN);
+ return 0;
}
-static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
+int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj)
{
- const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
- struct mm_struct *mm = obj->userptr.mm->mm;
- struct page **pvec;
- struct sg_table *pages;
- bool active;
- int pinned;
- unsigned int gup_flags = 0;
-
- /* If userspace should engineer that these pages are replaced in
- * the vma between us binding this page into the GTT and completion
- * of rendering... Their loss. If they change the mapping of their
- * pages they need to create a new bo to point to the new vma.
- *
- * However, that still leaves open the possibility of the vma
- * being copied upon fork. Which falls under the same userspace
- * synchronisation issue as a regular bo, except that this time
- * the process may not be expecting that a particular piece of
- * memory is tied to the GPU.
- *
- * Fortunately, we can hook into the mmu_notifier in order to
- * discard the page references prior to anything nasty happening
- * to the vma (discard or cloning) which should prevent the more
- * egregious cases from causing harm.
- */
-
- if (obj->userptr.work) {
- /* active flag should still be held for the pending work */
- if (IS_ERR(obj->userptr.work))
- return PTR_ERR(obj->userptr.work);
- else
- return -EAGAIN;
- }
+ int err;
- pvec = NULL;
- pinned = 0;
+ err = i915_gem_object_userptr_submit_init(obj);
+ if (err)
+ return err;
- if (mm == current->mm) {
- pvec = kvmalloc_array(num_pages, sizeof(struct page *),
- GFP_KERNEL |
- __GFP_NORETRY |
- __GFP_NOWARN);
+ err = i915_gem_object_lock_interruptible(obj, NULL);
+ if (!err) {
/*
- * Using __get_user_pages_fast() with a read-only
- * access is questionable. A read-only page may be
- * COW-broken, and then this might end up giving
- * the wrong side of the COW..
- *
- * We may or may not care.
+ * Since we only check validity, not use the pages,
+ * it doesn't matter if we collide with the mmu notifier,
+ * and -EAGAIN handling is not required.
*/
- if (pvec) {
- /* defer to worker if malloc fails */
- if (!i915_gem_object_is_readonly(obj))
- gup_flags |= FOLL_WRITE;
- pinned = pin_user_pages_fast_only(obj->userptr.ptr,
- num_pages, gup_flags,
- pvec);
- }
- }
+ err = i915_gem_object_pin_pages(obj);
+ if (!err)
+ i915_gem_object_unpin_pages(obj);
- active = false;
- if (pinned < 0) {
- pages = ERR_PTR(pinned);
- pinned = 0;
- } else if (pinned < num_pages) {
- pages = __i915_gem_userptr_get_pages_schedule(obj);
- active = pages == ERR_PTR(-EAGAIN);
- } else {
- pages = __i915_gem_userptr_alloc_pages(obj, pvec, num_pages);
- active = !IS_ERR(pages);
+ i915_gem_object_unlock(obj);
}
- if (active)
- __i915_gem_userptr_set_active(obj, true);
-
- if (IS_ERR(pages))
- unpin_user_pages(pvec, pinned);
- kvfree(pvec);
- return PTR_ERR_OR_ZERO(pages);
+ return err;
}
static void
-i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
- struct sg_table *pages)
+i915_gem_userptr_release(struct drm_i915_gem_object *obj)
{
- struct sgt_iter sgt_iter;
- struct page *page;
+ GEM_WARN_ON(obj->userptr.page_ref);
- /* Cancel any inflight work and force them to restart their gup */
- obj->userptr.work = NULL;
- __i915_gem_userptr_set_active(obj, false);
- if (!pages)
+ if (!obj->userptr.notifier.mm)
return;
- __i915_gem_object_release_shmem(obj, pages, true);
- i915_gem_gtt_finish_pages(obj, pages);
-
- /*
- * We always mark objects as dirty when they are used by the GPU,
- * just in case. However, if we set the vma as being read-only we know
- * that the object will never have been written to.
- */
- if (i915_gem_object_is_readonly(obj))
- obj->mm.dirty = false;
-
- for_each_sgt_page(page, sgt_iter, pages) {
- if (obj->mm.dirty && trylock_page(page)) {
- /*
- * As this may not be anonymous memory (e.g. shmem)
- * but exist on a real mapping, we have to lock
- * the page in order to dirty it -- holding
- * the page reference is not sufficient to
- * prevent the inode from being truncated.
- * Play safe and take the lock.
- *
- * However...!
- *
- * The mmu-notifier can be invalidated for a
- * migrate_page, that is alreadying holding the lock
- * on the page. Such a try_to_unmap() will result
- * in us calling put_pages() and so recursively try
- * to lock the page. We avoid that deadlock with
- * a trylock_page() and in exchange we risk missing
- * some page dirtying.
- */
- set_page_dirty(page);
- unlock_page(page);
- }
+ mmu_interval_notifier_remove(&obj->userptr.notifier);
+ obj->userptr.notifier.mm = NULL;
+}
- mark_page_accessed(page);
- unpin_user_page(page);
- }
- obj->mm.dirty = false;
+static int
+i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
+{
+ drm_dbg(obj->base.dev, "Exporting userptr no longer allowed\n");
- sg_free_table(pages);
- kfree(pages);
+ return -EINVAL;
}
-static void
-i915_gem_userptr_release(struct drm_i915_gem_object *obj)
+static int
+i915_gem_userptr_pwrite(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pwrite *args)
{
- i915_gem_userptr_release__mmu_notifier(obj);
- i915_gem_userptr_release__mm_struct(obj);
+ drm_dbg(obj->base.dev, "pwrite to userptr no longer allowed\n");
+
+ return -EINVAL;
}
static int
-i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
+i915_gem_userptr_pread(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pread *args)
{
- if (obj->userptr.mmu_object)
- return 0;
+ drm_dbg(obj->base.dev, "pread from userptr no longer allowed\n");
- return i915_gem_userptr_init__mmu_notifier(obj, 0);
+ return -EINVAL;
}
static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
- .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
- I915_GEM_OBJECT_IS_SHRINKABLE |
+ .name = "i915_gem_object_userptr",
+ .flags = I915_GEM_OBJECT_IS_SHRINKABLE |
I915_GEM_OBJECT_NO_MMAP |
- I915_GEM_OBJECT_ASYNC_CANCEL,
+ I915_GEM_OBJECT_IS_PROXY,
.get_pages = i915_gem_userptr_get_pages,
.put_pages = i915_gem_userptr_put_pages,
.dmabuf_export = i915_gem_userptr_dmabuf_export,
+ .pwrite = i915_gem_userptr_pwrite,
+ .pread = i915_gem_userptr_pread,
.release = i915_gem_userptr_release,
};
+#endif
+
+static int
+probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len)
+{
+ VMA_ITERATOR(vmi, mm, addr);
+ struct vm_area_struct *vma;
+ unsigned long end = addr + len;
+
+ mmap_read_lock(mm);
+ for_each_vma_range(vmi, vma, end) {
+ /* Check for holes, note that we also update the addr below */
+ if (vma->vm_start > addr)
+ break;
+
+ if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
+ break;
+
+ addr = vma->vm_end;
+ }
+ mmap_read_unlock(mm);
+
+ if (vma || addr < end)
+ return -EFAULT;
+ return 0;
+}
+
/*
* Creates a new mm object that wraps some normal memory from the process
* context - user memory.
@@ -762,14 +463,14 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
void *data,
struct drm_file *file)
{
- static struct lock_class_key lock_class;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ static struct lock_class_key __maybe_unused lock_class;
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_userptr *args = data;
- struct drm_i915_gem_object *obj;
- int ret;
- u32 handle;
+ struct drm_i915_gem_object __maybe_unused *obj;
+ int __maybe_unused ret;
+ u32 __maybe_unused handle;
- if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
+ if (!HAS_LLC(i915) && !HAS_SNOOP(i915)) {
/* We cannot support coherent userptr objects on hw without
* LLC and broken snooping.
*/
@@ -777,24 +478,11 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
}
if (args->flags & ~(I915_USERPTR_READ_ONLY |
- I915_USERPTR_UNSYNCHRONIZED))
+ I915_USERPTR_UNSYNCHRONIZED |
+ I915_USERPTR_PROBE))
return -EINVAL;
- /*
- * XXX: There is a prevalence of the assumption that we fit the
- * object's page count inside a 32bit _signed_ variable. Let's document
- * this and catch if we ever need to fix it. In the meantime, if you do
- * spot such a local variable, please consider fixing!
- *
- * Aside from our own locals (for which we have no excuse!):
- * - sg_table embeds unsigned int for num_pages
- * - get_user_pages*() mixed ints with longs
- */
-
- if (args->user_size >> PAGE_SHIFT > INT_MAX)
- return -E2BIG;
-
- if (overflows_type(args->user_size, obj->base.size))
+ if (i915_gem_object_size_2big(args->user_size))
return -E2BIG;
if (!args->user_size)
@@ -806,26 +494,43 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
if (!access_ok((char __user *)(unsigned long)args->user_ptr, args->user_size))
return -EFAULT;
+ if (args->flags & I915_USERPTR_UNSYNCHRONIZED)
+ return -ENODEV;
+
if (args->flags & I915_USERPTR_READ_ONLY) {
/*
* On almost all of the older hw, we cannot tell the GPU that
* a page is readonly.
*/
- if (!dev_priv->gt.vm->has_read_only)
+ if (!to_gt(i915)->vm->has_read_only)
return -ENODEV;
}
+ if (args->flags & I915_USERPTR_PROBE) {
+ /*
+ * Check that the range pointed to represents real struct
+ * pages and not iomappings (at this moment in time!)
+ */
+ ret = probe_range(current->mm, args->user_ptr, args->user_size);
+ if (ret)
+ return ret;
+ }
+
+#ifdef CONFIG_MMU_NOTIFIER
obj = i915_gem_object_alloc();
if (obj == NULL)
return -ENOMEM;
drm_gem_private_object_init(dev, &obj->base, args->user_size);
- i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class);
+ i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class,
+ I915_BO_ALLOC_USER);
+ obj->mem_flags = I915_BO_FLAG_STRUCT_PAGE;
obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
obj->userptr.ptr = args->user_ptr;
+ obj->userptr.notifier_seq = ULONG_MAX;
if (args->flags & I915_USERPTR_READ_ONLY)
i915_gem_object_set_readonly(obj);
@@ -833,9 +538,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
* at binding. This means that we need to hook into the mmu_notifier
* in order to detect if the mmu is destroyed.
*/
- ret = i915_gem_userptr_init__mm_struct(obj);
- if (ret == 0)
- ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
+ ret = i915_gem_userptr_init__mmu_notifier(obj);
if (ret == 0)
ret = drm_gem_handle_create(file, &obj->base, &handle);
@@ -846,24 +549,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
args->handle = handle;
return 0;
+#else
+ return -ENODEV;
+#endif
}
-int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
-{
- mutex_init(&dev_priv->mm_lock);
- hash_init(dev_priv->mm_structs);
-
- dev_priv->mm.userptr_wq =
- alloc_workqueue("i915-userptr-acquire",
- WQ_HIGHPRI | WQ_UNBOUND,
- 0);
- if (!dev_priv->mm.userptr_wq)
- return -ENOMEM;
-
- return 0;
-}
-
-void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv)
-{
- destroy_workqueue(dev_priv->mm.userptr_wq);
-}