diff options
author | Jani Nikula <jani.nikula@intel.com> | 2022-08-29 14:44:38 +0300 |
---|---|---|
committer | Jani Nikula <jani.nikula@intel.com> | 2022-08-29 15:14:59 +0300 |
commit | 917bda9ab155032a02be1a57ebd4d949ae9e1528 (patch) | |
tree | 7758d62783f5fb81777c37d0ad4e26eea6312c0f /drivers/gpu/drm/i915/gem | |
parent | 95086cb969b2cb8abe4984457f219ec70d24052e (diff) | |
parent | 2c2d7a67defa198a8b8148dbaddc9e5554efebc8 (diff) |
Merge drm/drm-next into drm-intel-next
Sync drm-intel-next with v6.0-rc as well as recent drm-intel-gt-next.
Since drm-next does not have commit f0c70d41e4e8 ("drm/i915/guc: remove
runtime info printing from time stamp logging") yet, only
drm-intel-gt-next, will need to do that as part of the merge here to
build.
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Diffstat (limited to 'drivers/gpu/drm/i915/gem')
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_object.c | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_object_types.h | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_pages.c | 25 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_shmem.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 4 |
7 files changed, 29 insertions, 29 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index b7b2c14fd9e1..cd75b0ca2555 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -6,7 +6,6 @@ #include <linux/dma-resv.h> #include <linux/highmem.h> -#include <linux/intel-iommu.h> #include <linux/sync_file.h> #include <linux/uaccess.h> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index ccec4055fde3..389e9f157ca5 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -268,7 +268,7 @@ static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj) */ void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj) { - assert_object_held(obj); + assert_object_held_shared(obj); if (!list_empty(&obj->vma.list)) { struct i915_vma *vma; @@ -331,15 +331,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, continue; } - if (!i915_gem_object_trylock(obj, NULL)) { - /* busy, toss it back to the pile */ - if (llist_add(&obj->freed, &i915->mm.free_list)) - queue_delayed_work(i915->wq, &i915->mm.free_work, msecs_to_jiffies(10)); - continue; - } - __i915_gem_object_pages_fini(obj); - i915_gem_object_unlock(obj); __i915_gem_free_object(obj); /* But keep the pointer alive for RCU-protected lookups */ @@ -359,7 +351,7 @@ void i915_gem_flush_free_objects(struct drm_i915_private *i915) static void __i915_gem_free_work(struct work_struct *work) { struct drm_i915_private *i915 = - container_of(work, struct drm_i915_private, mm.free_work.work); + container_of(work, struct drm_i915_private, mm.free_work); i915_gem_flush_free_objects(i915); } @@ -391,7 +383,7 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj) */ if (llist_add(&obj->freed, &i915->mm.free_list)) - queue_delayed_work(i915->wq, &i915->mm.free_work, 0); + queue_work(i915->wq, &i915->mm.free_work); } void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, @@ -745,7 +737,7 @@ bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj) void i915_gem_init__objects(struct drm_i915_private *i915) { - INIT_DELAYED_WORK(&i915->mm.free_work, __i915_gem_free_work); + INIT_WORK(&i915->mm.free_work, __i915_gem_free_work); } void i915_objects_module_exit(void) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index 5cf36a130061..9f6b14ec189a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -335,7 +335,6 @@ struct drm_i915_gem_object { #define I915_BO_READONLY BIT(7) #define I915_TILING_QUIRK_BIT 8 /* unknown swizzling; do not release! */ #define I915_BO_PROTECTED BIT(9) -#define I915_BO_WAS_BOUND_BIT 10 /** * @mem_flags - Mutable placement-related flags * @@ -616,6 +615,8 @@ struct drm_i915_gem_object { * pages were last acquired. */ bool dirty:1; + + u32 tlb; } mm; struct { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 8df8ae0e1dea..4df50b049cea 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -6,14 +6,15 @@ #include <drm/drm_cache.h> +#include "gt/intel_gt.h" +#include "gt/intel_gt_pm.h" + #include "i915_drv.h" #include "i915_gem_object.h" #include "i915_scatterlist.h" #include "i915_gem_lmem.h" #include "i915_gem_mman.h" -#include "gt/intel_gt.h" - void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, struct sg_table *pages, unsigned int sg_page_sizes) @@ -190,6 +191,18 @@ static void unmap_object(struct drm_i915_gem_object *obj, void *ptr) vunmap(ptr); } +static void flush_tlb_invalidate(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct intel_gt *gt = to_gt(i915); + + if (!obj->mm.tlb) + return; + + intel_gt_invalidate_tlb(gt, obj->mm.tlb); + obj->mm.tlb = 0; +} + struct sg_table * __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) { @@ -215,13 +228,7 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) __i915_gem_object_reset_page_iter(obj); obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0; - if (test_and_clear_bit(I915_BO_WAS_BOUND_BIT, &obj->flags)) { - struct drm_i915_private *i915 = to_i915(obj->base.dev); - intel_wakeref_t wakeref; - - with_intel_runtime_pm_if_active(&i915->runtime_pm, wakeref) - intel_gt_invalidate_tlbs(to_gt(i915)); - } + flush_tlb_invalidate(obj); return pages; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index 4eed3dd90ba8..f42ca1179f37 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -75,7 +75,7 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st, if (size > resource_size(&mr->region)) return -ENOMEM; - if (sg_alloc_table(st, page_count, GFP_KERNEL)) + if (sg_alloc_table(st, page_count, GFP_KERNEL | __GFP_NOWARN)) return -ENOMEM; /* @@ -137,7 +137,7 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st, * trigger the out-of-memory killer and for * this we want __GFP_RETRY_MAYFAIL. */ - gfp |= __GFP_RETRY_MAYFAIL; + gfp |= __GFP_RETRY_MAYFAIL | __GFP_NOWARN; } } while (1); @@ -209,7 +209,7 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj) GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); rebuild_st: - st = kmalloc(sizeof(*st), GFP_KERNEL); + st = kmalloc(sizeof(*st), GFP_KERNEL | __GFP_NOWARN); if (!st) return -ENOMEM; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index 1030053571a2..8dc5c8874d8a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -426,7 +426,8 @@ void i915_gem_driver_register__shrinker(struct drm_i915_private *i915) i915->mm.shrinker.count_objects = i915_gem_shrinker_count; i915->mm.shrinker.seeks = DEFAULT_SEEKS; i915->mm.shrinker.batch = 4096; - drm_WARN_ON(&i915->drm, register_shrinker(&i915->mm.shrinker)); + drm_WARN_ON(&i915->drm, register_shrinker(&i915->mm.shrinker, + "drm-i915_gem")); i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom; drm_WARN_ON(&i915->drm, register_oom_notifier(&i915->mm.oom_notifier)); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 094f06b4ce33..8423df021b71 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -216,8 +216,8 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, * However...! * * The mmu-notifier can be invalidated for a - * migrate_page, that is alreadying holding the lock - * on the page. Such a try_to_unmap() will result + * migrate_folio, that is alreadying holding the lock + * on the folio. Such a try_to_unmap() will result * in us calling put_pages() and so recursively try * to lock the page. We avoid that deadlock with * a trylock_page() and in exchange we risk missing |