diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gem/i915_gem_userptr.c')
| -rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 197 |
1 files changed, 85 insertions, 112 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 7487bab11f0b..77cc3af3d518 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -1,9 +1,8 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * * Copyright © 2012-2014 Intel Corporation * - * Based on amdgpu_mn, which bears the following notice: + * Based on amdgpu_mn, which bears the following notice: * * Copyright 2014 Advanced Micro Devices, Inc. * All Rights Reserved. @@ -39,6 +38,8 @@ #include <linux/swap.h> #include <linux/sched/mm.h> +#include <drm/drm_print.h> + #include "i915_drv.h" #include "i915_gem_ioctls.h" #include "i915_gem_object.h" @@ -60,36 +61,7 @@ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni, const struct mmu_notifier_range *range, unsigned long cur_seq) { - struct drm_i915_gem_object *obj = container_of(mni, struct drm_i915_gem_object, userptr.notifier); - struct drm_i915_private *i915 = to_i915(obj->base.dev); - long r; - - if (!mmu_notifier_range_blockable(range)) - return false; - - spin_lock(&i915->mm.notifier_lock); - mmu_interval_set_seq(mni, cur_seq); - - spin_unlock(&i915->mm.notifier_lock); - - /* - * We don't wait when the process is exiting. This is valid - * because the object will be cleaned up anyway. - * - * This is also temporarily required as a hack, because we - * cannot currently force non-consistent batch buffers to preempt - * and reschedule by waiting on it, hanging processes on exit. - */ - if (current->flags & PF_EXITING) - return true; - - /* we will unbind on next submission, still have userptr pins */ - r = dma_resv_wait_timeout(obj->base.resv, true, false, - MAX_SCHEDULE_TIMEOUT); - if (r <= 0) - drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r); - return true; } @@ -107,16 +79,15 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj) static void i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object *obj) { - struct drm_i915_private *i915 = to_i915(obj->base.dev); struct page **pvec = NULL; - spin_lock(&i915->mm.notifier_lock); + assert_object_held_shared(obj); + if (!--obj->userptr.page_ref) { pvec = obj->userptr.pvec; obj->userptr.pvec = NULL; } GEM_BUG_ON(obj->userptr.page_ref < 0); - spin_unlock(&i915->mm.notifier_lock); if (pvec) { const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; @@ -128,38 +99,34 @@ static void i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object *obj) static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) { - struct drm_i915_private *i915 = to_i915(obj->base.dev); - const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; - unsigned int max_segment = i915_sg_segment_size(); + unsigned int max_segment = i915_sg_segment_size(obj->base.dev->dev); struct sg_table *st; - unsigned int sg_page_sizes; - struct scatterlist *sg; struct page **pvec; + unsigned int num_pages; /* limited by sg_alloc_table_from_pages_segment */ int ret; + if (overflows_type(obj->base.size >> PAGE_SHIFT, num_pages)) + return -E2BIG; + + num_pages = obj->base.size >> PAGE_SHIFT; st = kmalloc(sizeof(*st), GFP_KERNEL); if (!st) return -ENOMEM; - spin_lock(&i915->mm.notifier_lock); - if (GEM_WARN_ON(!obj->userptr.page_ref)) { - spin_unlock(&i915->mm.notifier_lock); - ret = -EFAULT; + if (!obj->userptr.page_ref) { + ret = -EAGAIN; goto err_free; } obj->userptr.page_ref++; pvec = obj->userptr.pvec; - spin_unlock(&i915->mm.notifier_lock); alloc_table: - sg = __sg_alloc_table_from_pages(st, pvec, num_pages, 0, - num_pages << PAGE_SHIFT, max_segment, - NULL, 0, GFP_KERNEL); - if (IS_ERR(sg)) { - ret = PTR_ERR(sg); + ret = sg_alloc_table_from_pages_segment(st, pvec, num_pages, 0, + num_pages << PAGE_SHIFT, + max_segment, GFP_KERNEL); + if (ret) goto err; - } ret = i915_gem_gtt_prepare_pages(obj, st); if (ret) { @@ -173,9 +140,11 @@ alloc_table: goto err; } - sg_page_sizes = i915_sg_dma_sizes(st->sgl); + WARN_ON_ONCE(!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)); + if (i915_gem_object_can_bypass_llc(obj)) + obj->cache_dirty = true; - __i915_gem_object_set_pages(obj, st, sg_page_sizes); + __i915_gem_object_set_pages(obj, st); return 0; @@ -220,8 +189,8 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, * However...! * * The mmu-notifier can be invalidated for a - * migrate_page, that is alreadying holding the lock - * on the page. Such a try_to_unmap() will result + * migrate_folio, that is alreadying holding the lock + * on the folio. Such a try_to_unmap() will result * in us calling put_pages() and so recursively try * to lock the page. We avoid that deadlock with * a trylock_page() and in exchange we risk missing @@ -241,7 +210,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, i915_gem_object_userptr_drop_ref(obj); } -static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj, bool get_pages) +static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj) { struct sg_table *pages; int err; @@ -259,15 +228,11 @@ static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj, bool if (!IS_ERR_OR_NULL(pages)) i915_gem_userptr_put_pages(obj, pages); - if (get_pages) - err = ____i915_gem_object_get_pages(obj); - return err; } int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) { - struct drm_i915_private *i915 = to_i915(obj->base.dev); const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; struct page **pvec; unsigned int gup_flags = 0; @@ -277,39 +242,22 @@ int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) if (obj->userptr.notifier.mm != current->mm) return -EFAULT; + notifier_seq = mmu_interval_read_begin(&obj->userptr.notifier); + ret = i915_gem_object_lock_interruptible(obj, NULL); if (ret) return ret; - /* optimistically try to preserve current pages while unlocked */ - if (i915_gem_object_has_pages(obj) && - !mmu_interval_check_retry(&obj->userptr.notifier, - obj->userptr.notifier_seq)) { - spin_lock(&i915->mm.notifier_lock); - if (obj->userptr.pvec && - !mmu_interval_read_retry(&obj->userptr.notifier, - obj->userptr.notifier_seq)) { - obj->userptr.page_ref++; - - /* We can keep using the current binding, this is the fastpath */ - ret = 1; - } - spin_unlock(&i915->mm.notifier_lock); + if (notifier_seq == obj->userptr.notifier_seq && obj->userptr.pvec) { + i915_gem_object_unlock(obj); + return 0; } - if (!ret) { - /* Make sure userptr is unbound for next attempt, so we don't use stale pages. */ - ret = i915_gem_object_userptr_unbind(obj, false); - } + ret = i915_gem_object_userptr_unbind(obj); i915_gem_object_unlock(obj); - if (ret < 0) + if (ret) return ret; - if (ret > 0) - return 0; - - notifier_seq = mmu_interval_read_begin(&obj->userptr.notifier); - pvec = kvmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL); if (!pvec) return -ENOMEM; @@ -317,7 +265,7 @@ int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) if (!i915_gem_object_is_readonly(obj)) gup_flags |= FOLL_WRITE; - pinned = ret = 0; + pinned = 0; while (pinned < num_pages) { ret = pin_user_pages_fast(obj->userptr.ptr + pinned * PAGE_SIZE, num_pages - pinned, gup_flags, @@ -327,9 +275,10 @@ int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) pinned += ret; } - ret = 0; - spin_lock(&i915->mm.notifier_lock); + ret = i915_gem_object_lock_interruptible(obj, NULL); + if (ret) + goto out; if (mmu_interval_read_retry(&obj->userptr.notifier, !obj->userptr.page_ref ? notifier_seq : @@ -341,12 +290,14 @@ int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) if (!obj->userptr.page_ref++) { obj->userptr.pvec = pvec; obj->userptr.notifier_seq = notifier_seq; - pvec = NULL; + ret = ____i915_gem_object_get_pages(obj); } + obj->userptr.page_ref--; + out_unlock: - spin_unlock(&i915->mm.notifier_lock); + i915_gem_object_unlock(obj); out: if (pvec) { @@ -369,11 +320,6 @@ int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj) return 0; } -void i915_gem_object_userptr_submit_fini(struct drm_i915_gem_object *obj) -{ - i915_gem_object_userptr_drop_ref(obj); -} - int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj) { int err; @@ -396,7 +342,6 @@ int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj) i915_gem_object_unlock(obj); } - i915_gem_object_userptr_submit_fini(obj); return err; } @@ -405,6 +350,9 @@ i915_gem_userptr_release(struct drm_i915_gem_object *obj) { GEM_WARN_ON(obj->userptr.page_ref); + if (!obj->userptr.notifier.mm) + return; + mmu_interval_notifier_remove(&obj->userptr.notifier); obj->userptr.notifier.mm = NULL; } @@ -450,6 +398,31 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { #endif +static int +probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len) +{ + VMA_ITERATOR(vmi, mm, addr); + struct vm_area_struct *vma; + unsigned long end = addr + len; + + mmap_read_lock(mm); + for_each_vma_range(vmi, vma, end) { + /* Check for holes, note that we also update the addr below */ + if (vma->vm_start > addr) + break; + + if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) + break; + + addr = vma->vm_end; + } + mmap_read_unlock(mm); + + if (vma || addr < end) + return -EFAULT; + return 0; +} + /* * Creates a new mm object that wraps some normal memory from the process * context - user memory. @@ -491,13 +464,13 @@ i915_gem_userptr_ioctl(struct drm_device *dev, struct drm_file *file) { static struct lock_class_key __maybe_unused lock_class; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *i915 = to_i915(dev); struct drm_i915_gem_userptr *args = data; struct drm_i915_gem_object __maybe_unused *obj; int __maybe_unused ret; u32 __maybe_unused handle; - if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) { + if (!HAS_LLC(i915) && !HAS_SNOOP(i915)) { /* We cannot support coherent userptr objects on hw without * LLC and broken snooping. */ @@ -505,7 +478,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, } if (args->flags & ~(I915_USERPTR_READ_ONLY | - I915_USERPTR_UNSYNCHRONIZED)) + I915_USERPTR_UNSYNCHRONIZED | + I915_USERPTR_PROBE)) return -EINVAL; if (i915_gem_object_size_2big(args->user_size)) @@ -528,10 +502,20 @@ i915_gem_userptr_ioctl(struct drm_device *dev, * On almost all of the older hw, we cannot tell the GPU that * a page is readonly. */ - if (!dev_priv->gt.vm->has_read_only) + if (!to_gt(i915)->vm->has_read_only) return -ENODEV; } + if (args->flags & I915_USERPTR_PROBE) { + /* + * Check that the range pointed to represents real struct + * pages and not iomappings (at this moment in time!) + */ + ret = probe_range(current->mm, args->user_ptr, args->user_size); + if (ret) + return ret; + } + #ifdef CONFIG_MMU_NOTIFIER obj = i915_gem_object_alloc(); if (obj == NULL) @@ -539,7 +523,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, drm_gem_private_object_init(dev, &obj->base, args->user_size); i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class, - I915_BO_ALLOC_STRUCT_PAGE); + I915_BO_ALLOC_USER); + obj->mem_flags = I915_BO_FLAG_STRUCT_PAGE; obj->read_domains = I915_GEM_DOMAIN_CPU; obj->write_domain = I915_GEM_DOMAIN_CPU; i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); @@ -569,15 +554,3 @@ i915_gem_userptr_ioctl(struct drm_device *dev, #endif } -int i915_gem_init_userptr(struct drm_i915_private *dev_priv) -{ -#ifdef CONFIG_MMU_NOTIFIER - spin_lock_init(&dev_priv->mm.notifier_lock); -#endif - - return 0; -} - -void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv) -{ -} |
