diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c')
| -rw-r--r-- | drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c | 698 |
1 files changed, 575 insertions, 123 deletions
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 5575172c66f5..0d250d57496a 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -4,17 +4,29 @@ * Copyright © 2016 Intel Corporation */ +#include <linux/highmem.h> #include <linux/prime_numbers.h> +#include <drm/drm_print.h> + +#include "gem/i915_gem_internal.h" +#include "gem/i915_gem_lmem.h" +#include "gem/i915_gem_region.h" +#include "gem/i915_gem_ttm.h" +#include "gem/i915_gem_ttm_move.h" #include "gt/intel_engine_pm.h" #include "gt/intel_gpu_commands.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" -#include "gem/i915_gem_region.h" +#include "gt/intel_migrate.h" +#include "i915_reg.h" +#include "i915_ttm_buddy_manager.h" + #include "huge_gem_object.h" #include "i915_selftest.h" #include "selftests/i915_random.h" #include "selftests/igt_flush_test.h" +#include "selftests/igt_reset.h" #include "selftests/igt_mmap.h" struct tile { @@ -84,13 +96,14 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj, struct rnd_state *prng) { const unsigned long npages = obj->base.size / PAGE_SIZE; - struct i915_ggtt_view view; + struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct i915_gtt_view view; struct i915_vma *vma; + unsigned long offset; unsigned long page; u32 __iomem *io; struct page *p; unsigned int n; - u64 offset; u32 *cpu; int err; @@ -141,13 +154,13 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj, if (offset >= obj->base.size) goto out; - intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt); + intel_gt_flush_ggtt_writes(to_gt(i915)); p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); cpu = kmap(p) + offset_in_page(offset); drm_clflush_virt_range(cpu, sizeof(*cpu)); if (*cpu != (u32)page) { - pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n", + pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%lu + %u [0x%lx]) of 0x%x, found 0x%x\n", page, n, view.partial.offset, view.partial.size, @@ -165,7 +178,9 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj, kunmap(p); out: - __i915_vma_put(vma); + i915_gem_object_lock(obj, NULL); + i915_vma_destroy(vma); + i915_gem_object_unlock(obj); return err; } @@ -175,6 +190,7 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj, { const unsigned int nreal = obj->scratch / PAGE_SIZE; const unsigned long npages = obj->base.size / PAGE_SIZE; + struct drm_i915_private *i915 = to_i915(obj->base.dev); struct i915_vma *vma; unsigned long page; int err; @@ -198,12 +214,12 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj, } for_each_prime_number_from(page, 1, npages) { - struct i915_ggtt_view view = + struct i915_gtt_view view = compute_partial_view(obj, page, MIN_CHUNK_PAGES); + unsigned long offset; u32 __iomem *io; struct page *p; unsigned int n; - u64 offset; u32 *cpu; GEM_BUG_ON(view.partial.size > nreal); @@ -234,13 +250,13 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj, if (offset >= obj->base.size) continue; - intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt); + intel_gt_flush_ggtt_writes(to_gt(i915)); p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); cpu = kmap(p) + offset_in_page(offset); drm_clflush_virt_range(cpu, sizeof(*cpu)); if (*cpu != (u32)page) { - pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n", + pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%lu + %u [0x%lx]) of 0x%x, found 0x%x\n", page, n, view.partial.offset, view.partial.size, @@ -259,7 +275,9 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj, if (err) return err; - __i915_vma_put(vma); + i915_gem_object_lock(obj, NULL); + i915_vma_destroy(vma); + i915_gem_object_unlock(obj); if (igt_timeout(end_time, "%s: timed out after tiling=%d stride=%d\n", @@ -305,7 +323,7 @@ static int igt_partial_tiling(void *arg) int tiling; int err; - if (!i915_ggtt_has_aperture(&i915->ggtt)) + if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt)) return 0; /* We want to check the page mapping and fencing of a large object @@ -318,7 +336,7 @@ static int igt_partial_tiling(void *arg) obj = huge_gem_object(i915, nreal << PAGE_SHIFT, - (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT); + (1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT); if (IS_ERR(obj)) return PTR_ERR(obj); @@ -353,7 +371,7 @@ static int igt_partial_tiling(void *arg) unsigned int pitch; struct tile tile; - if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) + if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) /* * The swizzling pattern is actually unknown as it * varies based on physical address of each page. @@ -364,10 +382,10 @@ static int igt_partial_tiling(void *arg) tile.tiling = tiling; switch (tiling) { case I915_TILING_X: - tile.swizzle = i915->ggtt.bit_6_swizzle_x; + tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x; break; case I915_TILING_Y: - tile.swizzle = i915->ggtt.bit_6_swizzle_y; + tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y; break; } @@ -438,7 +456,7 @@ static int igt_smoke_tiling(void *arg) IGT_TIMEOUT(end); int err; - if (!i915_ggtt_has_aperture(&i915->ggtt)) + if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt)) return 0; /* @@ -450,12 +468,12 @@ static int igt_smoke_tiling(void *arg) * Remember to look at the st_seed if we see a flip-flop in BAT! */ - if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) + if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) return 0; obj = huge_gem_object(i915, nreal << PAGE_SHIFT, - (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT); + (1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT); if (IS_ERR(obj)) return PTR_ERR(obj); @@ -484,10 +502,10 @@ static int igt_smoke_tiling(void *arg) break; case I915_TILING_X: - tile.swizzle = i915->ggtt.bit_6_swizzle_x; + tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x; break; case I915_TILING_Y: - tile.swizzle = i915->ggtt.bit_6_swizzle_y; + tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y; break; } @@ -550,10 +568,8 @@ retry: goto err_unpin; } - err = i915_request_await_object(rq, vma->obj, true); - if (err == 0) - err = i915_vma_move_to_active(vma, rq, - EXEC_OBJECT_WRITE); + err = i915_vma_move_to_active(vma, rq, + EXEC_OBJECT_WRITE); i915_request_add(rq); err_unpin: @@ -573,34 +589,57 @@ err: return 0; } +static enum i915_mmap_type default_mapping(struct drm_i915_private *i915) +{ + if (HAS_LMEM(i915)) + return I915_MMAP_TYPE_FIXED; + + return I915_MMAP_TYPE_GTT; +} + +static struct drm_i915_gem_object * +create_sys_or_internal(struct drm_i915_private *i915, + unsigned long size) +{ + if (HAS_LMEM(i915)) { + struct intel_memory_region *sys_region = + i915->mm.regions[INTEL_REGION_SMEM]; + + return __i915_gem_object_create_user(i915, size, &sys_region, 1); + } + + return i915_gem_object_create_internal(i915, size); +} + static bool assert_mmap_offset(struct drm_i915_private *i915, unsigned long size, int expected) { struct drm_i915_gem_object *obj; - struct i915_mmap_offset *mmo; + u64 offset; + int ret; - obj = i915_gem_object_create_internal(i915, size); + obj = create_sys_or_internal(i915, size); if (IS_ERR(obj)) - return false; + return expected && expected == PTR_ERR(obj); - mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL); + ret = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL); i915_gem_object_put(obj); - return PTR_ERR_OR_ZERO(mmo) == expected; + return ret == expected; } static void disable_retire_worker(struct drm_i915_private *i915) { i915_gem_driver_unregister__shrinker(i915); - intel_gt_pm_get(&i915->gt); - cancel_delayed_work_sync(&i915->gt.requests.retire_work); + intel_gt_pm_get_untracked(to_gt(i915)); + cancel_delayed_work_sync(&to_gt(i915)->requests.retire_work); } static void restore_retire_worker(struct drm_i915_private *i915) { igt_flush_test(i915); - intel_gt_pm_put(&i915->gt); + intel_gt_pm_put_untracked(to_gt(i915)); i915_gem_driver_register__shrinker(i915); } @@ -622,13 +661,14 @@ static int igt_mmap_offset_exhaustion(void *arg) struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm; struct drm_i915_gem_object *obj; struct drm_mm_node *hole, *next; - struct i915_mmap_offset *mmo; int loop, err = 0; + u64 offset; + int enospc = HAS_LMEM(i915) ? -ENXIO : -ENOSPC; /* Disable background reaper */ disable_retire_worker(i915); - GEM_BUG_ON(!i915->gt.awake); - intel_gt_retire_requests(&i915->gt); + GEM_BUG_ON(!to_gt(i915)->awake); + intel_gt_retire_requests(to_gt(i915)); i915_gem_drain_freed_objects(i915); /* Trim the device mmap space to only a page */ @@ -674,27 +714,27 @@ static int igt_mmap_offset_exhaustion(void *arg) } /* Too large */ - if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, -ENOSPC)) { + if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) { pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n"); err = -EINVAL; goto out; } /* Fill the hole, further allocation attempts should then fail */ - obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + obj = create_sys_or_internal(i915, PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); + pr_err("Unable to create object for reclaimed hole\n"); goto out; } - mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL); - if (IS_ERR(mmo)) { + err = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL); + if (err) { pr_err("Unable to insert object into reclaimed hole\n"); - err = PTR_ERR(mmo); goto err_obj; } - if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) { + if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) { pr_err("Unexpectedly succeeded in inserting object into no holes!\n"); err = -EINVAL; goto err_obj; @@ -704,7 +744,7 @@ static int igt_mmap_offset_exhaustion(void *arg) /* Now fill with busy dead objects that we expect to reap */ for (loop = 0; loop < 3; loop++) { - if (intel_gt_is_wedged(&i915->gt)) + if (intel_gt_is_wedged(to_gt(i915))) break; obj = i915_gem_object_create_internal(i915, PAGE_SIZE); @@ -740,6 +780,7 @@ err_obj: static int gtt_set(struct drm_i915_gem_object *obj) { + intel_wakeref_t wakeref; struct i915_vma *vma; void __iomem *map; int err = 0; @@ -748,7 +789,7 @@ static int gtt_set(struct drm_i915_gem_object *obj) if (IS_ERR(vma)) return PTR_ERR(vma); - intel_gt_pm_get(vma->vm->gt); + wakeref = intel_gt_pm_get(vma->vm->gt); map = i915_vma_pin_iomap(vma); i915_vma_unpin(vma); if (IS_ERR(map)) { @@ -760,12 +801,13 @@ static int gtt_set(struct drm_i915_gem_object *obj) i915_vma_unpin_iomap(vma); out: - intel_gt_pm_put(vma->vm->gt); + intel_gt_pm_put(vma->vm->gt, wakeref); return err; } static int gtt_check(struct drm_i915_gem_object *obj) { + intel_wakeref_t wakeref; struct i915_vma *vma; void __iomem *map; int err = 0; @@ -774,7 +816,7 @@ static int gtt_check(struct drm_i915_gem_object *obj) if (IS_ERR(vma)) return PTR_ERR(vma); - intel_gt_pm_get(vma->vm->gt); + wakeref = intel_gt_pm_get(vma->vm->gt); map = i915_vma_pin_iomap(vma); i915_vma_unpin(vma); if (IS_ERR(map)) { @@ -790,7 +832,7 @@ static int gtt_check(struct drm_i915_gem_object *obj) i915_vma_unpin_iomap(vma); out: - intel_gt_pm_put(vma->vm->gt); + intel_gt_pm_put(vma->vm->gt, wakeref); return err; } @@ -830,34 +872,25 @@ static int wc_check(struct drm_i915_gem_object *obj) static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type) { - if (type == I915_MMAP_TYPE_GTT && - !i915_ggtt_has_aperture(&to_i915(obj->base.dev)->ggtt)) - return false; + struct drm_i915_private *i915 = to_i915(obj->base.dev); + bool no_map; - if (type != I915_MMAP_TYPE_GTT && - !i915_gem_object_has_struct_page(obj) && - !i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM)) + if (obj->ops->mmap_offset) + return type == I915_MMAP_TYPE_FIXED; + else if (type == I915_MMAP_TYPE_FIXED) return false; - return true; -} - -static void object_set_placements(struct drm_i915_gem_object *obj, - struct intel_memory_region **placements, - unsigned int n_placements) -{ - GEM_BUG_ON(!n_placements); + if (type == I915_MMAP_TYPE_GTT && + !i915_ggtt_has_aperture(to_gt(i915)->ggtt)) + return false; - if (n_placements == 1) { - struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct intel_memory_region *mr = placements[0]; + i915_gem_object_lock(obj, NULL); + no_map = (type != I915_MMAP_TYPE_GTT && + !i915_gem_object_has_struct_page(obj) && + !i915_gem_object_has_iomem(obj)); + i915_gem_object_unlock(obj); - obj->mm.placements = &i915->mm.regions[mr->id]; - obj->mm.n_placements = 1; - } else { - obj->mm.placements = placements; - obj->mm.n_placements = n_placements; - } + return !no_map; } #define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24)) @@ -865,10 +898,10 @@ static int __igt_mmap(struct drm_i915_private *i915, struct drm_i915_gem_object *obj, enum i915_mmap_type type) { - struct i915_mmap_offset *mmo; struct vm_area_struct *area; unsigned long addr; int err, i; + u64 offset; if (!can_mmap(obj, type)) return 0; @@ -879,17 +912,19 @@ static int __igt_mmap(struct drm_i915_private *i915, if (err) return err; - mmo = mmap_offset_attach(obj, type, NULL); - if (IS_ERR(mmo)) - return PTR_ERR(mmo); + err = __assign_mmap_offset(obj, type, &offset, NULL); + if (err) + return err; - addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED); + addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED); if (IS_ERR_VALUE(addr)) return addr; pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr); + mmap_read_lock(current->mm); area = vma_lookup(current->mm, addr); + mmap_read_unlock(current->mm); if (!area) { pr_err("%s: Did not create a vm_area_struct for the mmap\n", obj->mm.region->name); @@ -897,13 +932,6 @@ static int __igt_mmap(struct drm_i915_private *i915, goto out_unmap; } - if (area->vm_private_data != mmo) { - pr_err("%s: vm_area_struct did not point back to our mmap_offset object!\n", - obj->mm.region->name); - err = -EINVAL; - goto out_unmap; - } - for (i = 0; i < obj->base.size / sizeof(u32); i++) { u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux))); u32 x; @@ -933,7 +961,7 @@ static int __igt_mmap(struct drm_i915_private *i915, } if (type == I915_MMAP_TYPE_GTT) - intel_gt_flush_ggtt_writes(&i915->gt); + intel_gt_flush_ggtt_writes(to_gt(i915)); err = wc_check(obj); if (err == -ENXIO) @@ -957,22 +985,25 @@ static int igt_mmap(void *arg) }; int i; + if (mr->private) + continue; + for (i = 0; i < ARRAY_SIZE(sizes); i++) { struct drm_i915_gem_object *obj; int err; - obj = i915_gem_object_create_region(mr, sizes[i], 0); + obj = __i915_gem_object_create_user(i915, sizes[i], &mr, 1); if (obj == ERR_PTR(-ENODEV)) continue; if (IS_ERR(obj)) return PTR_ERR(obj); - object_set_placements(obj, &mr, 1); - err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT); if (err == 0) err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC); + if (err == 0) + err = __igt_mmap(i915, obj, I915_MMAP_TYPE_FIXED); i915_gem_object_put(obj); if (err) @@ -983,6 +1014,394 @@ static int igt_mmap(void *arg) return 0; } +static void igt_close_objects(struct drm_i915_private *i915, + struct list_head *objects) +{ + struct drm_i915_gem_object *obj, *on; + + list_for_each_entry_safe(obj, on, objects, st_link) { + i915_gem_object_lock(obj, NULL); + if (i915_gem_object_has_pinned_pages(obj)) + i915_gem_object_unpin_pages(obj); + /* No polluting the memory region between tests */ + __i915_gem_object_put_pages(obj); + i915_gem_object_unlock(obj); + list_del(&obj->st_link); + i915_gem_object_put(obj); + } + + cond_resched(); + + i915_gem_drain_freed_objects(i915); +} + +static void igt_make_evictable(struct list_head *objects) +{ + struct drm_i915_gem_object *obj; + + list_for_each_entry(obj, objects, st_link) { + i915_gem_object_lock(obj, NULL); + if (i915_gem_object_has_pinned_pages(obj)) + i915_gem_object_unpin_pages(obj); + i915_gem_object_unlock(obj); + } + + cond_resched(); +} + +static int igt_fill_mappable(struct intel_memory_region *mr, + struct list_head *objects) +{ + u64 size, total; + int err; + + total = 0; + size = resource_size(&mr->io); + do { + struct drm_i915_gem_object *obj; + + obj = i915_gem_object_create_region(mr, size, 0, 0); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto err_close; + } + + list_add(&obj->st_link, objects); + + err = i915_gem_object_pin_pages_unlocked(obj); + if (err) { + if (err != -ENXIO && err != -ENOMEM) + goto err_close; + + if (size == mr->min_page_size) { + err = 0; + break; + } + + size >>= 1; + continue; + } + + total += obj->base.size; + } while (1); + + pr_info("%s filled=%lluMiB\n", __func__, total >> 20); + return 0; + +err_close: + igt_close_objects(mr->i915, objects); + return err; +} + +static int ___igt_mmap_migrate(struct drm_i915_private *i915, + struct drm_i915_gem_object *obj, + unsigned long addr, + bool unfaultable) +{ + int i; + + pr_info("igt_mmap(%s, %d) @ %lx\n", + obj->mm.region->name, I915_MMAP_TYPE_FIXED, addr); + + for (i = 0; i < obj->base.size / sizeof(u32); i++) { + u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux))); + u32 x; + + if (get_user(x, ux)) { + if (!unfaultable) { + pr_err("%s: Unable to read from mmap, offset:%zd\n", + obj->mm.region->name, i * sizeof(x)); + return -EFAULT; + } + + continue; + } + + if (unfaultable) { + pr_err("%s: Faulted unmappable memory\n", + obj->mm.region->name); + return -EINVAL; + } + + if (x != expand32(POISON_INUSE)) { + pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n", + obj->mm.region->name, + i * sizeof(x), x, expand32(POISON_INUSE)); + return -EINVAL; + } + + x = expand32(POISON_FREE); + if (put_user(x, ux)) { + pr_err("%s: Unable to write to mmap, offset:%zd\n", + obj->mm.region->name, i * sizeof(x)); + return -EFAULT; + } + } + + if (unfaultable) + return 0; + + obj->flags &= ~I915_BO_ALLOC_GPU_ONLY; + return wc_check(obj); +} + +#define IGT_MMAP_MIGRATE_TOPDOWN (1 << 0) +#define IGT_MMAP_MIGRATE_FILL (1 << 1) +#define IGT_MMAP_MIGRATE_EVICTABLE (1 << 2) +#define IGT_MMAP_MIGRATE_UNFAULTABLE (1 << 3) +#define IGT_MMAP_MIGRATE_FAIL_GPU (1 << 4) +static int __igt_mmap_migrate(struct intel_memory_region **placements, + int n_placements, + struct intel_memory_region *expected_mr, + unsigned int flags) +{ + struct drm_i915_private *i915 = placements[0]->i915; + struct drm_i915_gem_object *obj; + struct i915_request *rq = NULL; + struct vm_area_struct *area; + unsigned long addr; + LIST_HEAD(objects); + u64 offset; + int err; + + obj = __i915_gem_object_create_user(i915, PAGE_SIZE, + placements, + n_placements); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + if (flags & IGT_MMAP_MIGRATE_TOPDOWN) + obj->flags |= I915_BO_ALLOC_GPU_ONLY; + + err = __assign_mmap_offset(obj, I915_MMAP_TYPE_FIXED, &offset, NULL); + if (err) + goto out_put; + + /* + * This will eventually create a GEM context, due to opening dummy drm + * file, which needs a tiny amount of mappable device memory for the top + * level paging structures(and perhaps scratch), so make sure we + * allocate early, to avoid tears. + */ + addr = igt_mmap_offset(i915, offset, obj->base.size, + PROT_WRITE, MAP_SHARED); + if (IS_ERR_VALUE(addr)) { + err = addr; + goto out_put; + } + + mmap_read_lock(current->mm); + area = vma_lookup(current->mm, addr); + mmap_read_unlock(current->mm); + if (!area) { + pr_err("%s: Did not create a vm_area_struct for the mmap\n", + obj->mm.region->name); + err = -EINVAL; + goto out_addr; + } + + if (flags & IGT_MMAP_MIGRATE_FILL) { + err = igt_fill_mappable(placements[0], &objects); + if (err) + goto out_addr; + } + + err = i915_gem_object_lock(obj, NULL); + if (err) + goto out_addr; + + err = i915_gem_object_pin_pages(obj); + if (err) { + i915_gem_object_unlock(obj); + goto out_addr; + } + + err = intel_context_migrate_clear(to_gt(i915)->migrate.context, NULL, + obj->mm.pages->sgl, obj->pat_index, + i915_gem_object_is_lmem(obj), + expand32(POISON_INUSE), &rq); + i915_gem_object_unpin_pages(obj); + if (rq && !err) { + err = dma_resv_reserve_fences(obj->base.resv, 1); + if (!err) + dma_resv_add_fence(obj->base.resv, &rq->fence, + DMA_RESV_USAGE_KERNEL); + i915_request_put(rq); + } + i915_gem_object_unlock(obj); + if (err) + goto out_addr; + + if (flags & IGT_MMAP_MIGRATE_EVICTABLE) + igt_make_evictable(&objects); + + if (flags & IGT_MMAP_MIGRATE_FAIL_GPU) { + err = i915_gem_object_lock(obj, NULL); + if (err) + goto out_addr; + + /* + * Ensure we only simulate the gpu failure when faulting the + * pages. + */ + err = i915_gem_object_wait_moving_fence(obj, true); + i915_gem_object_unlock(obj); + if (err) + goto out_addr; + i915_ttm_migrate_set_failure_modes(true, false); + } + + err = ___igt_mmap_migrate(i915, obj, addr, + flags & IGT_MMAP_MIGRATE_UNFAULTABLE); + + if (!err && obj->mm.region != expected_mr) { + pr_err("%s region mismatch %s\n", __func__, expected_mr->name); + err = -EINVAL; + } + + if (flags & IGT_MMAP_MIGRATE_FAIL_GPU) { + struct intel_gt *gt; + unsigned int id; + + i915_ttm_migrate_set_failure_modes(false, false); + + for_each_gt(gt, i915, id) { + intel_wakeref_t wakeref; + bool wedged; + + mutex_lock(>->reset.mutex); + wedged = test_bit(I915_WEDGED, >->reset.flags); + mutex_unlock(>->reset.mutex); + if (!wedged) { + pr_err("gt(%u) not wedged\n", id); + err = -EINVAL; + continue; + } + + wakeref = intel_runtime_pm_get(gt->uncore->rpm); + igt_global_reset_lock(gt); + intel_gt_reset(gt, ALL_ENGINES, NULL); + igt_global_reset_unlock(gt); + intel_runtime_pm_put(gt->uncore->rpm, wakeref); + } + + if (!i915_gem_object_has_unknown_state(obj)) { + pr_err("object missing unknown_state\n"); + err = -EINVAL; + } + } + +out_addr: + vm_munmap(addr, obj->base.size); + +out_put: + i915_gem_object_put(obj); + igt_close_objects(i915, &objects); + return err; +} + +static int igt_mmap_migrate(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_memory_region *system = i915->mm.regions[INTEL_REGION_SMEM]; + struct intel_memory_region *mr; + enum intel_region_id id; + + for_each_memory_region(mr, i915, id) { + struct intel_memory_region *mixed[] = { mr, system }; + struct intel_memory_region *single[] = { mr }; + struct ttm_resource_manager *man = mr->region_private; + struct resource saved_io; + int err; + + if (mr->private) + continue; + + if (!resource_size(&mr->io)) + continue; + + /* + * For testing purposes let's force small BAR, if not already + * present. + */ + saved_io = mr->io; + if (resource_size(&mr->io) == mr->total) { + resource_size_t io_size = resource_size(&mr->io); + + io_size = rounddown_pow_of_two(io_size >> 1); + if (io_size < PAGE_SIZE) + continue; + + mr->io = DEFINE_RES_MEM(mr->io.start, io_size); + i915_ttm_buddy_man_force_visible_size(man, + io_size >> PAGE_SHIFT); + } + + /* + * Allocate in the mappable portion, should be no surprises here. + */ + err = __igt_mmap_migrate(mixed, ARRAY_SIZE(mixed), mr, 0); + if (err) + goto out_io_size; + + /* + * Allocate in the non-mappable portion, but force migrating to + * the mappable portion on fault (LMEM -> LMEM) + */ + err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr, + IGT_MMAP_MIGRATE_TOPDOWN | + IGT_MMAP_MIGRATE_FILL | + IGT_MMAP_MIGRATE_EVICTABLE); + if (err) + goto out_io_size; + + /* + * Allocate in the non-mappable portion, but force spilling into + * system memory on fault (LMEM -> SMEM) + */ + err = __igt_mmap_migrate(mixed, ARRAY_SIZE(mixed), system, + IGT_MMAP_MIGRATE_TOPDOWN | + IGT_MMAP_MIGRATE_FILL); + if (err) + goto out_io_size; + + /* + * Allocate in the non-mappable portion, but since the mappable + * portion is already full, and we can't spill to system memory, + * then we should expect the fault to fail. + */ + err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr, + IGT_MMAP_MIGRATE_TOPDOWN | + IGT_MMAP_MIGRATE_FILL | + IGT_MMAP_MIGRATE_UNFAULTABLE); + if (err) + goto out_io_size; + + /* + * Allocate in the non-mappable portion, but force migrating to + * the mappable portion on fault (LMEM -> LMEM). We then also + * simulate a gpu error when moving the pages when faulting the + * pages, which should result in wedging the gpu and returning + * SIGBUS in the fault handler, since we can't fallback to + * memcpy. + */ + err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr, + IGT_MMAP_MIGRATE_TOPDOWN | + IGT_MMAP_MIGRATE_FILL | + IGT_MMAP_MIGRATE_EVICTABLE | + IGT_MMAP_MIGRATE_FAIL_GPU | + IGT_MMAP_MIGRATE_UNFAULTABLE); +out_io_size: + mr->io = saved_io; + i915_ttm_buddy_man_force_visible_size(man, + resource_size(&mr->io) >> PAGE_SHIFT); + if (err) + return err; + } + + return 0; +} + static const char *repr_mmap_type(enum i915_mmap_type type) { switch (type) { @@ -990,26 +1409,33 @@ static const char *repr_mmap_type(enum i915_mmap_type type) case I915_MMAP_TYPE_WB: return "wb"; case I915_MMAP_TYPE_WC: return "wc"; case I915_MMAP_TYPE_UC: return "uc"; + case I915_MMAP_TYPE_FIXED: return "fixed"; default: return "unknown"; } } -static bool can_access(const struct drm_i915_gem_object *obj) +static bool can_access(struct drm_i915_gem_object *obj) { - return i915_gem_object_has_struct_page(obj) || - i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM); + bool access; + + i915_gem_object_lock(obj, NULL); + access = i915_gem_object_has_struct_page(obj) || + i915_gem_object_has_iomem(obj); + i915_gem_object_unlock(obj); + + return access; } static int __igt_mmap_access(struct drm_i915_private *i915, struct drm_i915_gem_object *obj, enum i915_mmap_type type) { - struct i915_mmap_offset *mmo; unsigned long __user *ptr; unsigned long A, B; unsigned long x, y; unsigned long addr; int err; + u64 offset; memset(&A, 0xAA, sizeof(A)); memset(&B, 0xBB, sizeof(B)); @@ -1017,11 +1443,11 @@ static int __igt_mmap_access(struct drm_i915_private *i915, if (!can_mmap(obj, type) || !can_access(obj)) return 0; - mmo = mmap_offset_attach(obj, type, NULL); - if (IS_ERR(mmo)) - return PTR_ERR(mmo); + err = __assign_mmap_offset(obj, type, &offset, NULL); + if (err) + return err; - addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED); + addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED); if (IS_ERR_VALUE(addr)) return addr; ptr = (unsigned long __user *)addr; @@ -1033,7 +1459,7 @@ static int __igt_mmap_access(struct drm_i915_private *i915, goto out_unmap; } - intel_gt_flush_ggtt_writes(&i915->gt); + intel_gt_flush_ggtt_writes(to_gt(i915)); err = access_process_vm(current, addr, &x, sizeof(x), 0); if (err != sizeof(x)) { @@ -1049,7 +1475,7 @@ static int __igt_mmap_access(struct drm_i915_private *i915, goto out_unmap; } - intel_gt_flush_ggtt_writes(&i915->gt); + intel_gt_flush_ggtt_writes(to_gt(i915)); err = __get_user(y, ptr); if (err) { @@ -1081,15 +1507,16 @@ static int igt_mmap_access(void *arg) struct drm_i915_gem_object *obj; int err; - obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0); + if (mr->private) + continue; + + obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1); if (obj == ERR_PTR(-ENODEV)) continue; if (IS_ERR(obj)) return PTR_ERR(obj); - object_set_placements(obj, &mr, 1); - err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_GTT); if (err == 0) err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WB); @@ -1097,6 +1524,8 @@ static int igt_mmap_access(void *arg) err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WC); if (err == 0) err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_UC); + if (err == 0) + err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_FIXED); i915_gem_object_put(obj); if (err) @@ -1111,11 +1540,11 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915, enum i915_mmap_type type) { struct intel_engine_cs *engine; - struct i915_mmap_offset *mmo; unsigned long addr; u32 __user *ux; u32 bbe; int err; + u64 offset; /* * Verify that the mmap access into the backing store aligns with @@ -1132,11 +1561,11 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915, if (err) return err; - mmo = mmap_offset_attach(obj, type, NULL); - if (IS_ERR(mmo)) - return PTR_ERR(mmo); + err = __assign_mmap_offset(obj, type, &offset, NULL); + if (err) + return err; - addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED); + addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED); if (IS_ERR_VALUE(addr)) return addr; @@ -1149,7 +1578,7 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915, } if (type == I915_MMAP_TYPE_GTT) - intel_gt_flush_ggtt_writes(&i915->gt); + intel_gt_flush_ggtt_writes(to_gt(i915)); for_each_uabi_engine(engine, i915) { struct i915_request *rq; @@ -1176,11 +1605,9 @@ retry: goto out_unpin; } - err = i915_request_await_object(rq, vma->obj, false); - if (err == 0) - err = i915_vma_move_to_active(vma, rq, 0); + err = i915_vma_move_to_active(vma, rq, 0); - err = engine->emit_bb_start(rq, vma->node.start, 0, 0); + err = engine->emit_bb_start(rq, i915_vma_offset(vma), 0, 0); i915_request_get(rq); i915_request_add(rq); @@ -1226,18 +1653,21 @@ static int igt_mmap_gpu(void *arg) struct drm_i915_gem_object *obj; int err; - obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0); + if (mr->private) + continue; + + obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1); if (obj == ERR_PTR(-ENODEV)) continue; if (IS_ERR(obj)) return PTR_ERR(obj); - object_set_placements(obj, &mr, 1); - err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT); if (err == 0) err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC); + if (err == 0) + err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_FIXED); i915_gem_object_put(obj); if (err) @@ -1249,7 +1679,9 @@ static int igt_mmap_gpu(void *arg) static int check_present_pte(pte_t *pte, unsigned long addr, void *data) { - if (!pte_present(*pte) || pte_none(*pte)) { + pte_t ptent = ptep_get(pte); + + if (!pte_present(ptent) || pte_none(ptent)) { pr_err("missing PTE:%lx\n", (addr - (unsigned long)data) >> PAGE_SHIFT); return -EINVAL; @@ -1260,7 +1692,9 @@ static int check_present_pte(pte_t *pte, unsigned long addr, void *data) static int check_absent_pte(pte_t *pte, unsigned long addr, void *data) { - if (pte_present(*pte) && !pte_none(*pte)) { + pte_t ptent = ptep_get(pte); + + if (pte_present(ptent) && !pte_none(ptent)) { pr_err("present PTE:%lx; expected to be revoked\n", (addr - (unsigned long)data) >> PAGE_SHIFT); return -EINVAL; @@ -1303,18 +1737,18 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915, struct drm_i915_gem_object *obj, enum i915_mmap_type type) { - struct i915_mmap_offset *mmo; unsigned long addr; int err; + u64 offset; if (!can_mmap(obj, type)) return 0; - mmo = mmap_offset_attach(obj, type, NULL); - if (IS_ERR(mmo)) - return PTR_ERR(mmo); + err = __assign_mmap_offset(obj, type, &offset, NULL); + if (err) + return err; - addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED); + addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED); if (IS_ERR_VALUE(addr)) return addr; @@ -1333,7 +1767,9 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915, * for other objects. Ergo we have to revoke the previous mmap PTE * access as it no longer points to the same object. */ + i915_gem_object_lock(obj, NULL); err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE); + i915_gem_object_unlock(obj); if (err) { pr_err("Failed to unbind object!\n"); goto out_unmap; @@ -1371,18 +1807,21 @@ static int igt_mmap_revoke(void *arg) struct drm_i915_gem_object *obj; int err; - obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0); + if (mr->private) + continue; + + obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1); if (obj == ERR_PTR(-ENODEV)) continue; if (IS_ERR(obj)) return PTR_ERR(obj); - object_set_placements(obj, &mr, 1); - err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT); if (err == 0) err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC); + if (err == 0) + err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_FIXED); i915_gem_object_put(obj); if (err) @@ -1394,15 +1833,28 @@ static int igt_mmap_revoke(void *arg) int i915_gem_mman_live_selftests(struct drm_i915_private *i915) { + int ret; + bool unuse_mm = false; static const struct i915_subtest tests[] = { SUBTEST(igt_partial_tiling), SUBTEST(igt_smoke_tiling), SUBTEST(igt_mmap_offset_exhaustion), SUBTEST(igt_mmap), + SUBTEST(igt_mmap_migrate), SUBTEST(igt_mmap_access), SUBTEST(igt_mmap_revoke), SUBTEST(igt_mmap_gpu), }; - return i915_subtests(tests, i915); + if (!current->mm) { + kthread_use_mm(current->active_mm); + unuse_mm = true; + } + + ret = i915_live_subtests(tests, i915); + + if (unuse_mm) + kthread_unuse_mm(current->active_mm); + + return ret; } |
