diff options
Diffstat (limited to 'drivers/gpu/drm/i915/selftests/i915_gem_gtt.c')
| -rw-r--r-- | drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 785 |
1 files changed, 326 insertions, 459 deletions
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index f843a5040706..0a86e4857539 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -26,12 +26,17 @@ #include <linux/prime_numbers.h> #include "gem/i915_gem_context.h" +#include "gem/i915_gem_internal.h" +#include "gem/i915_gem_lmem.h" +#include "gem/i915_gem_region.h" #include "gem/selftests/mock_context.h" #include "gt/intel_context.h" #include "gt/intel_gpu_commands.h" +#include "gt/intel_gtt.h" #include "i915_random.h" #include "i915_selftest.h" +#include "i915_vma_resource.h" #include "mock_drm.h" #include "mock_gem_device.h" @@ -56,7 +61,6 @@ static int fake_get_pages(struct drm_i915_gem_object *obj) #define PFN_BIAS 0x1000 struct sg_table *pages; struct scatterlist *sg; - unsigned int sg_page_sizes; typeof(obj->base.size) rem; pages = kmalloc(sizeof(*pages), GFP); @@ -64,12 +68,17 @@ static int fake_get_pages(struct drm_i915_gem_object *obj) return -ENOMEM; rem = round_up(obj->base.size, BIT(31)) >> 31; + /* restricted by sg_alloc_table */ + if (overflows_type(rem, unsigned int)) { + kfree(pages); + return -E2BIG; + } + if (sg_alloc_table(pages, rem, GFP)) { kfree(pages); return -ENOMEM; } - sg_page_sizes = 0; rem = obj->base.size; for (sg = pages->sgl; sg; sg = sg_next(sg)) { unsigned long len = min_t(typeof(rem), rem, BIT(31)); @@ -78,13 +87,12 @@ static int fake_get_pages(struct drm_i915_gem_object *obj) sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0); sg_dma_address(sg) = page_to_phys(sg_page(sg)); sg_dma_len(sg) = len; - sg_page_sizes |= len; rem -= len; } GEM_BUG_ON(rem); - __i915_gem_object_set_pages(obj, pages, sg_page_sizes); + __i915_gem_object_set_pages(obj, pages); return 0; #undef GFP @@ -127,7 +135,7 @@ fake_dma_object(struct drm_i915_private *i915, u64 size) obj->write_domain = I915_GEM_DOMAIN_CPU; obj->read_domains = I915_GEM_DOMAIN_CPU; - obj->cache_level = I915_CACHE_NONE; + obj->pat_index = i915_gem_get_pat_index(i915, I915_CACHE_NONE); /* Preallocate the "backing storage" */ if (i915_gem_object_pin_pages_unlocked(obj)) @@ -155,12 +163,12 @@ static int igt_ppgtt_alloc(void *arg) if (!HAS_PPGTT(dev_priv)) return 0; - ppgtt = i915_ppgtt_create(&dev_priv->gt); + ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0); if (IS_ERR(ppgtt)) return PTR_ERR(ppgtt); if (!ppgtt->vm.allocate_va_range) - goto err_ppgtt_cleanup; + goto ppgtt_vm_put; /* * While we only allocate the page tables here and so we could @@ -228,7 +236,7 @@ err_ppgtt_cleanup: goto retry; } i915_gem_ww_ctx_fini(&ww); - +ppgtt_vm_put: i915_vm_put(&ppgtt->vm); return err; } @@ -237,12 +245,14 @@ static int lowlevel_hole(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time) { + const unsigned int min_alignment = + i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM); I915_RND_STATE(seed_prng); - struct i915_vma *mock_vma; + struct i915_vma_resource *mock_vma_res; unsigned int size; - mock_vma = kzalloc(sizeof(*mock_vma), GFP_KERNEL); - if (!mock_vma) + mock_vma_res = kzalloc(sizeof(*mock_vma_res), GFP_KERNEL); + if (!mock_vma_res) return -ENOMEM; /* Keep creating larger objects until one cannot fit into the hole */ @@ -250,9 +260,10 @@ static int lowlevel_hole(struct i915_address_space *vm, I915_RND_SUBSTATE(prng, seed_prng); struct drm_i915_gem_object *obj; unsigned int *order, count, n; - u64 hole_size; + u64 hole_size, aligned_size; - hole_size = (hole_end - hole_start) >> size; + aligned_size = max_t(u32, ilog2(min_alignment), size); + hole_size = (hole_end - hole_start) >> aligned_size; if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32)) hole_size = KMALLOC_MAX_SIZE / sizeof(u32); count = hole_size >> 1; @@ -268,15 +279,16 @@ static int lowlevel_hole(struct i915_address_space *vm, break; } while (count >>= 1); if (!count) { - kfree(mock_vma); + kfree(mock_vma_res); return -ENOMEM; } GEM_BUG_ON(!order); - GEM_BUG_ON(count * BIT_ULL(size) > vm->total); - GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end); + GEM_BUG_ON(count * BIT_ULL(aligned_size) > vm->total); + GEM_BUG_ON(hole_start + count * BIT_ULL(aligned_size) > hole_end); - /* Ignore allocation failures (i.e. don't report them as + /* + * Ignore allocation failures (i.e. don't report them as * a test failure) as we are purposefully allocating very * large objects without checking that we have sufficient * memory. We expect to hit -ENOMEM. @@ -297,10 +309,10 @@ static int lowlevel_hole(struct i915_address_space *vm, } for (n = 0; n < count; n++) { - u64 addr = hole_start + order[n] * BIT_ULL(size); + u64 addr = hole_start + order[n] * BIT_ULL(aligned_size); intel_wakeref_t wakeref; - GEM_BUG_ON(addr + BIT_ULL(size) > vm->total); + GEM_BUG_ON(addr + BIT_ULL(aligned_size) > vm->total); if (igt_timeout(end_time, "%s timed out before %d/%d\n", @@ -342,19 +354,21 @@ alloc_vm_end: break; } - mock_vma->pages = obj->mm.pages; - mock_vma->node.size = BIT_ULL(size); - mock_vma->node.start = addr; + mock_vma_res->bi.pages = obj->mm.pages; + mock_vma_res->node_size = BIT_ULL(aligned_size); + mock_vma_res->start = addr; with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref) - vm->insert_entries(vm, mock_vma, - I915_CACHE_NONE, 0); + vm->insert_entries(vm, mock_vma_res, + i915_gem_get_pat_index(vm->i915, + I915_CACHE_NONE), + 0); } count = n; i915_random_reorder(order, count, &prng); for (n = 0; n < count; n++) { - u64 addr = hole_start + order[n] * BIT_ULL(size); + u64 addr = hole_start + order[n] * BIT_ULL(aligned_size); intel_wakeref_t wakeref; GEM_BUG_ON(addr + BIT_ULL(size) > vm->total); @@ -370,7 +384,7 @@ alloc_vm_end: cleanup_freed_objects(vm->i915); } - kfree(mock_vma); + kfree(mock_vma_res); return 0; } @@ -378,14 +392,14 @@ static void close_object_list(struct list_head *objects, struct i915_address_space *vm) { struct drm_i915_gem_object *obj, *on; - int ignored; + int __maybe_unused ignored; list_for_each_entry_safe(obj, on, objects, st_link) { struct i915_vma *vma; vma = i915_vma_instance(obj, vm, NULL); if (!IS_ERR(vma)) - ignored = i915_vma_unbind(vma); + ignored = i915_vma_unbind_unlocked(vma); list_del(&obj->st_link); i915_gem_object_put(obj); @@ -398,8 +412,10 @@ static int fill_hole(struct i915_address_space *vm, { const u64 hole_size = hole_end - hole_start; struct drm_i915_gem_object *obj; + const unsigned int min_alignment = + i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM); const unsigned long max_pages = - min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT); + min_t(u64, ULONG_MAX - 1, (hole_size / 2) >> ilog2(min_alignment)); const unsigned long max_step = max(int_sqrt(max_pages), 2UL); unsigned long npages, prime, flags; struct i915_vma *vma; @@ -431,7 +447,8 @@ static int fill_hole(struct i915_address_space *vm, list_add(&obj->st_link, &objects); - /* Align differing sized objects against the edges, and + /* + * Align differing sized objects against the edges, and * check we don't walk off into the void when binding * them into the GTT. */ @@ -440,14 +457,17 @@ static int fill_hole(struct i915_address_space *vm, offset = p->offset; list_for_each_entry(obj, &objects, st_link) { + u64 aligned_size = round_up(obj->base.size, + min_alignment); + vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) continue; if (p->step < 0) { - if (offset < hole_start + obj->base.size) + if (offset < hole_start + aligned_size) break; - offset -= obj->base.size; + offset -= aligned_size; } err = i915_vma_pin(vma, 0, 0, offset | flags); @@ -469,22 +489,25 @@ static int fill_hole(struct i915_address_space *vm, i915_vma_unpin(vma); if (p->step > 0) { - if (offset + obj->base.size > hole_end) + if (offset + aligned_size > hole_end) break; - offset += obj->base.size; + offset += aligned_size; } } offset = p->offset; list_for_each_entry(obj, &objects, st_link) { + u64 aligned_size = round_up(obj->base.size, + min_alignment); + vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) continue; if (p->step < 0) { - if (offset < hole_start + obj->base.size) + if (offset < hole_start + aligned_size) break; - offset -= obj->base.size; + offset -= aligned_size; } if (!drm_mm_node_allocated(&vma->node) || @@ -496,7 +519,7 @@ static int fill_hole(struct i915_address_space *vm, goto err; } - err = i915_vma_unbind(vma); + err = i915_vma_unbind_unlocked(vma); if (err) { pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n", __func__, p->name, vma->node.start, vma->node.size, @@ -505,22 +528,25 @@ static int fill_hole(struct i915_address_space *vm, } if (p->step > 0) { - if (offset + obj->base.size > hole_end) + if (offset + aligned_size > hole_end) break; - offset += obj->base.size; + offset += aligned_size; } } offset = p->offset; list_for_each_entry_reverse(obj, &objects, st_link) { + u64 aligned_size = round_up(obj->base.size, + min_alignment); + vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) continue; if (p->step < 0) { - if (offset < hole_start + obj->base.size) + if (offset < hole_start + aligned_size) break; - offset -= obj->base.size; + offset -= aligned_size; } err = i915_vma_pin(vma, 0, 0, offset | flags); @@ -542,22 +568,25 @@ static int fill_hole(struct i915_address_space *vm, i915_vma_unpin(vma); if (p->step > 0) { - if (offset + obj->base.size > hole_end) + if (offset + aligned_size > hole_end) break; - offset += obj->base.size; + offset += aligned_size; } } offset = p->offset; list_for_each_entry_reverse(obj, &objects, st_link) { + u64 aligned_size = round_up(obj->base.size, + min_alignment); + vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) continue; if (p->step < 0) { - if (offset < hole_start + obj->base.size) + if (offset < hole_start + aligned_size) break; - offset -= obj->base.size; + offset -= aligned_size; } if (!drm_mm_node_allocated(&vma->node) || @@ -569,7 +598,7 @@ static int fill_hole(struct i915_address_space *vm, goto err; } - err = i915_vma_unbind(vma); + err = i915_vma_unbind_unlocked(vma); if (err) { pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n", __func__, p->name, vma->node.start, vma->node.size, @@ -578,9 +607,9 @@ static int fill_hole(struct i915_address_space *vm, } if (p->step > 0) { - if (offset + obj->base.size > hole_end) + if (offset + aligned_size > hole_end) break; - offset += obj->base.size; + offset += aligned_size; } } } @@ -610,6 +639,7 @@ static int walk_hole(struct i915_address_space *vm, const u64 hole_size = hole_end - hole_start; const unsigned long max_pages = min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT); + unsigned long min_alignment; unsigned long flags; u64 size; @@ -619,6 +649,8 @@ static int walk_hole(struct i915_address_space *vm, if (i915_is_ggtt(vm)) flags |= PIN_GLOBAL; + min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM); + for_each_prime_number_from(size, 1, max_pages) { struct drm_i915_gem_object *obj; struct i915_vma *vma; @@ -637,7 +669,7 @@ static int walk_hole(struct i915_address_space *vm, for (addr = hole_start; addr + obj->base.size < hole_end; - addr += obj->base.size) { + addr += round_up(obj->base.size, min_alignment)) { err = i915_vma_pin(vma, 0, 0, addr | flags); if (err) { pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n", @@ -655,7 +687,7 @@ static int walk_hole(struct i915_address_space *vm, goto err_put; } - err = i915_vma_unbind(vma); + err = i915_vma_unbind_unlocked(vma); if (err) { pr_err("%s unbind failed at %llx + %llx with err=%d\n", __func__, addr, vma->size, err); @@ -689,6 +721,7 @@ static int pot_hole(struct i915_address_space *vm, { struct drm_i915_gem_object *obj; struct i915_vma *vma; + unsigned int min_alignment; unsigned long flags; unsigned int pot; int err = 0; @@ -697,6 +730,8 @@ static int pot_hole(struct i915_address_space *vm, if (i915_is_ggtt(vm)) flags |= PIN_GLOBAL; + min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM); + obj = i915_gem_object_create_internal(vm->i915, 2 * I915_GTT_PAGE_SIZE); if (IS_ERR(obj)) return PTR_ERR(obj); @@ -709,13 +744,13 @@ static int pot_hole(struct i915_address_space *vm, /* Insert a pair of pages across every pot boundary within the hole */ for (pot = fls64(hole_end - 1) - 1; - pot > ilog2(2 * I915_GTT_PAGE_SIZE); + pot > ilog2(2 * min_alignment); pot--) { u64 step = BIT_ULL(pot); u64 addr; - for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE; - addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE; + for (addr = round_up(hole_start + min_alignment, step) - min_alignment; + hole_end > addr && hole_end - addr >= 2 * min_alignment; addr += step) { err = i915_vma_pin(vma, 0, 0, addr | flags); if (err) { @@ -732,13 +767,13 @@ static int pot_hole(struct i915_address_space *vm, pr_err("%s incorrect at %llx + %llx\n", __func__, addr, vma->size); i915_vma_unpin(vma); - err = i915_vma_unbind(vma); + err = i915_vma_unbind_unlocked(vma); err = -EINVAL; goto err_obj; } i915_vma_unpin(vma); - err = i915_vma_unbind(vma); + err = i915_vma_unbind_unlocked(vma); GEM_BUG_ON(err); } @@ -760,6 +795,7 @@ static int drunk_hole(struct i915_address_space *vm, unsigned long end_time) { I915_RND_STATE(prng); + unsigned int min_alignment; unsigned int size; unsigned long flags; @@ -767,15 +803,18 @@ static int drunk_hole(struct i915_address_space *vm, if (i915_is_ggtt(vm)) flags |= PIN_GLOBAL; + min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM); + /* Keep creating larger objects until one cannot fit into the hole */ for (size = 12; (hole_end - hole_start) >> size; size++) { struct drm_i915_gem_object *obj; unsigned int *order, count, n; struct i915_vma *vma; - u64 hole_size; + u64 hole_size, aligned_size; int err = -ENODEV; - hole_size = (hole_end - hole_start) >> size; + aligned_size = max_t(u32, ilog2(min_alignment), size); + hole_size = (hole_end - hole_start) >> aligned_size; if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32)) hole_size = KMALLOC_MAX_SIZE / sizeof(u32); count = hole_size >> 1; @@ -794,7 +833,8 @@ static int drunk_hole(struct i915_address_space *vm, return -ENOMEM; GEM_BUG_ON(!order); - /* Ignore allocation failures (i.e. don't report them as + /* + * Ignore allocation failures (i.e. don't report them as * a test failure) as we are purposefully allocating very * large objects without checking that we have sufficient * memory. We expect to hit -ENOMEM. @@ -815,7 +855,7 @@ static int drunk_hole(struct i915_address_space *vm, GEM_BUG_ON(vma->size != BIT_ULL(size)); for (n = 0; n < count; n++) { - u64 addr = hole_start + order[n] * BIT_ULL(size); + u64 addr = hole_start + order[n] * BIT_ULL(aligned_size); err = i915_vma_pin(vma, 0, 0, addr | flags); if (err) { @@ -832,13 +872,13 @@ static int drunk_hole(struct i915_address_space *vm, pr_err("%s incorrect at %llx + %llx\n", __func__, addr, BIT_ULL(size)); i915_vma_unpin(vma); - err = i915_vma_unbind(vma); + err = i915_vma_unbind_unlocked(vma); err = -EINVAL; goto err_obj; } i915_vma_unpin(vma); - err = i915_vma_unbind(vma); + err = i915_vma_unbind_unlocked(vma); GEM_BUG_ON(err); if (igt_timeout(end_time, @@ -867,11 +907,14 @@ static int __shrink_hole(struct i915_address_space *vm, { struct drm_i915_gem_object *obj; unsigned long flags = PIN_OFFSET_FIXED | PIN_USER; + unsigned int min_alignment; unsigned int order = 12; LIST_HEAD(objects); int err = 0; u64 addr; + min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM); + /* Keep creating larger objects until one cannot fit into the hole */ for (addr = hole_start; addr < hole_end; ) { struct i915_vma *vma; @@ -906,13 +949,13 @@ static int __shrink_hole(struct i915_address_space *vm, pr_err("%s incorrect at %llx + %llx\n", __func__, addr, size); i915_vma_unpin(vma); - err = i915_vma_unbind(vma); + err = i915_vma_unbind_unlocked(vma); err = -EINVAL; break; } i915_vma_unpin(vma); - addr += size; + addr += round_up(size, min_alignment); /* * Since we are injecting allocation faults at random intervals, @@ -924,7 +967,7 @@ static int __shrink_hole(struct i915_address_space *vm, break; if (igt_timeout(end_time, - "%s timed out at ofset %llx [%llx - %llx]\n", + "%s timed out at offset %llx [%llx - %llx]\n", __func__, addr, hole_start, hole_end)) { err = -EINTR; break; @@ -971,7 +1014,7 @@ static int shrink_boom(struct i915_address_space *vm, /* * Catch the case which shrink_hole seems to miss. The setup here * requires invoking the shrinker as we do the alloc_pt/alloc_pd, while - * ensuring that all vma assiocated with the respective pd/pdp are + * ensuring that all vma associated with the respective pd/pdp are * unpinned at the time. */ @@ -1036,6 +1079,121 @@ err_purge: return err; } +static int misaligned_case(struct i915_address_space *vm, struct intel_memory_region *mr, + u64 addr, u64 size, unsigned long flags) +{ + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + int err = 0; + u64 expected_vma_size, expected_node_size; + bool is_stolen = mr->type == INTEL_MEMORY_STOLEN_SYSTEM || + mr->type == INTEL_MEMORY_STOLEN_LOCAL; + + obj = i915_gem_object_create_region(mr, size, 0, I915_BO_ALLOC_GPU_ONLY); + if (IS_ERR(obj)) { + /* if iGVT-g or DMAR is active, stolen mem will be uninitialized */ + if (PTR_ERR(obj) == -ENODEV && is_stolen) + return 0; + return PTR_ERR(obj); + } + + vma = i915_vma_instance(obj, vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_put; + } + + err = i915_vma_pin(vma, 0, 0, addr | flags); + if (err) + goto err_put; + i915_vma_unpin(vma); + + if (!drm_mm_node_allocated(&vma->node)) { + err = -EINVAL; + goto err_put; + } + + if (i915_vma_misplaced(vma, 0, 0, addr | flags)) { + err = -EINVAL; + goto err_put; + } + + /* make sure page_sizes_gtt has been populated before use */ + if (i915_is_ggtt(vm) && intel_vm_no_concurrent_access_wa(vm->i915)) + i915_vma_wait_for_bind(vma); + + expected_vma_size = round_up(size, 1 << (ffs(vma->resource->page_sizes_gtt) - 1)); + expected_node_size = expected_vma_size; + + if (HAS_64K_PAGES(vm->i915) && i915_gem_object_is_lmem(obj)) { + expected_vma_size = round_up(size, I915_GTT_PAGE_SIZE_64K); + expected_node_size = round_up(size, I915_GTT_PAGE_SIZE_64K); + } + + if (vma->size != expected_vma_size || vma->node.size != expected_node_size) { + err = i915_vma_unbind_unlocked(vma); + err = -EBADSLT; + goto err_put; + } + + err = i915_vma_unbind_unlocked(vma); + if (err) + goto err_put; + + GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); + +err_put: + i915_gem_object_put(obj); + cleanup_freed_objects(vm->i915); + return err; +} + +static int misaligned_pin(struct i915_address_space *vm, + u64 hole_start, u64 hole_end, + unsigned long end_time) +{ + struct intel_memory_region *mr; + enum intel_region_id id; + unsigned long flags = PIN_OFFSET_FIXED | PIN_USER; + int err = 0; + u64 hole_size = hole_end - hole_start; + + if (i915_is_ggtt(vm)) + flags |= PIN_GLOBAL; + + for_each_memory_region(mr, vm->i915, id) { + u64 min_alignment = i915_vm_min_alignment(vm, mr->type); + u64 size = min_alignment; + u64 addr = round_down(hole_start + (hole_size / 2), min_alignment); + + /* avoid -ENOSPC on very small hole setups */ + if (hole_size < 3 * min_alignment) + continue; + + /* we can't test < 4k alignment due to flags being encoded in lower bits */ + if (min_alignment != I915_GTT_PAGE_SIZE_4K) { + err = misaligned_case(vm, mr, addr + (min_alignment / 2), size, flags); + /* misaligned should error with -EINVAL*/ + if (!err) + err = -EBADSLT; + if (err != -EINVAL) + return err; + } + + /* test for vma->size expansion to min page size */ + err = misaligned_case(vm, mr, addr, PAGE_SIZE, flags); + if (err) + return err; + + /* test for intermediate size not expanding vma->size for large alignments */ + err = misaligned_case(vm, mr, addr, size / 2, flags); + if (err) + return err; + } + + return 0; +} + static int exercise_ppgtt(struct drm_i915_private *dev_priv, int (*func)(struct i915_address_space *vm, u64 hole_start, u64 hole_end, @@ -1053,13 +1211,13 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv, if (IS_ERR(file)) return PTR_ERR(file); - ppgtt = i915_ppgtt_create(&dev_priv->gt); + ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0); if (IS_ERR(ppgtt)) { err = PTR_ERR(ppgtt); goto out_free; } GEM_BUG_ON(offset_in_page(ppgtt->vm.total)); - GEM_BUG_ON(!atomic_read(&ppgtt->vm.open)); + assert_vm_alive(&ppgtt->vm); err = func(&ppgtt->vm, 0, ppgtt->vm.total, end_time); @@ -1105,6 +1263,11 @@ static int igt_ppgtt_shrink_boom(void *arg) return exercise_ppgtt(arg, shrink_boom); } +static int igt_ppgtt_misaligned_pin(void *arg) +{ + return exercise_ppgtt(arg, misaligned_pin); +} + static int sort_holes(void *priv, const struct list_head *A, const struct list_head *B) { @@ -1122,7 +1285,7 @@ static int exercise_ggtt(struct drm_i915_private *i915, u64 hole_start, u64 hole_end, unsigned long end_time)) { - struct i915_ggtt *ggtt = &i915->ggtt; + struct i915_ggtt *ggtt = to_gt(i915)->ggtt; u64 hole_start, hole_end, last = 0; struct drm_mm_node *node; IGT_TIMEOUT(end_time); @@ -1177,12 +1340,17 @@ static int igt_ggtt_lowlevel(void *arg) return exercise_ggtt(arg, lowlevel_hole); } +static int igt_ggtt_misaligned_pin(void *arg) +{ + return exercise_ggtt(arg, misaligned_pin); +} + static int igt_ggtt_page(void *arg) { const unsigned int count = PAGE_SIZE/sizeof(u32); I915_RND_STATE(prng); struct drm_i915_private *i915 = arg; - struct i915_ggtt *ggtt = &i915->ggtt; + struct i915_ggtt *ggtt = to_gt(i915)->ggtt; struct drm_i915_gem_object *obj; intel_wakeref_t wakeref; struct drm_mm_node tmp; @@ -1218,7 +1386,10 @@ static int igt_ggtt_page(void *arg) ggtt->vm.insert_page(&ggtt->vm, i915_gem_object_get_dma_address(obj, 0), - offset, I915_CACHE_NONE, 0); + offset, + i915_gem_get_pat_index(i915, + I915_CACHE_NONE), + 0); } order = i915_random_order(count, &prng); @@ -1275,13 +1446,14 @@ static void track_vma_bind(struct i915_vma *vma) __i915_gem_object_pin_pages(obj); - GEM_BUG_ON(vma->pages); + GEM_BUG_ON(atomic_read(&vma->pages_count)); atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE); __i915_gem_object_pin_pages(obj); vma->pages = obj->mm.pages; + vma->resource->bi.pages = vma->pages; mutex_lock(&vma->vm->mutex); - list_add_tail(&vma->vm_link, &vma->vm->bound_list); + list_move_tail(&vma->vm_link, &vma->vm->bound_list); mutex_unlock(&vma->vm->mutex); } @@ -1300,7 +1472,7 @@ static int exercise_mock(struct drm_i915_private *i915, if (!ctx) return -ENOMEM; - vm = i915_gem_context_get_vm_rcu(ctx); + vm = i915_gem_context_get_eb_vm(ctx); err = func(vm, 0, min(vm->total, limit), end_time); i915_vm_put(vm); @@ -1336,6 +1508,33 @@ static int igt_mock_drunk(void *arg) return exercise_mock(ggtt->vm.i915, drunk_hole); } +static int reserve_gtt_with_resource(struct i915_vma *vma, u64 offset) +{ + struct i915_address_space *vm = vma->vm; + struct i915_vma_resource *vma_res; + struct drm_i915_gem_object *obj = vma->obj; + int err; + + vma_res = i915_vma_resource_alloc(); + if (IS_ERR(vma_res)) + return PTR_ERR(vma_res); + + mutex_lock(&vm->mutex); + err = i915_gem_gtt_reserve(vm, NULL, &vma->node, obj->base.size, + offset, + obj->pat_index, + 0); + if (!err) { + i915_vma_resource_init_from_vma(vma_res, vma); + vma->resource = vma_res; + } else { + kfree(vma_res); + } + mutex_unlock(&vm->mutex); + + return err; +} + static int igt_gtt_reserve(void *arg) { struct i915_ggtt *ggtt = arg; @@ -1345,9 +1544,10 @@ static int igt_gtt_reserve(void *arg) u64 total; int err = -ENODEV; - /* i915_gem_gtt_reserve() tries to reserve the precise range + /* + * i915_gem_gtt_reserve() tries to reserve the precise range * for the node, and evicts if it has to. So our test checks that - * it can give us the requsted space and prevent overlaps. + * it can give us the requested space and prevent overlaps. */ /* Start by filling the GGTT */ @@ -1370,20 +1570,13 @@ static int igt_gtt_reserve(void *arg) } list_add(&obj->st_link, &objects); - vma = i915_vma_instance(obj, &ggtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out; } - mutex_lock(&ggtt->vm.mutex); - err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node, - obj->base.size, - total, - obj->cache_level, - 0); - mutex_unlock(&ggtt->vm.mutex); + err = reserve_gtt_with_resource(vma, total); if (err) { pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n", total, ggtt->vm.total, err); @@ -1429,13 +1622,7 @@ static int igt_gtt_reserve(void *arg) goto out; } - mutex_lock(&ggtt->vm.mutex); - err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node, - obj->base.size, - total, - obj->cache_level, - 0); - mutex_unlock(&ggtt->vm.mutex); + err = reserve_gtt_with_resource(vma, total); if (err) { pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n", total, ggtt->vm.total, err); @@ -1465,7 +1652,7 @@ static int igt_gtt_reserve(void *arg) goto out; } - err = i915_vma_unbind(vma); + err = i915_vma_unbind_unlocked(vma); if (err) { pr_err("i915_vma_unbind failed with err=%d!\n", err); goto out; @@ -1476,13 +1663,7 @@ static int igt_gtt_reserve(void *arg) 2 * I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT); - mutex_lock(&ggtt->vm.mutex); - err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node, - obj->base.size, - offset, - obj->cache_level, - 0); - mutex_unlock(&ggtt->vm.mutex); + err = reserve_gtt_with_resource(vma, offset); if (err) { pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n", total, ggtt->vm.total, err); @@ -1509,6 +1690,31 @@ out: return err; } +static int insert_gtt_with_resource(struct i915_vma *vma) +{ + struct i915_address_space *vm = vma->vm; + struct i915_vma_resource *vma_res; + struct drm_i915_gem_object *obj = vma->obj; + int err; + + vma_res = i915_vma_resource_alloc(); + if (IS_ERR(vma_res)) + return PTR_ERR(vma_res); + + mutex_lock(&vm->mutex); + err = i915_gem_gtt_insert(vm, NULL, &vma->node, obj->base.size, 0, + obj->pat_index, 0, vm->total, 0); + if (!err) { + i915_vma_resource_init_from_vma(vma_res, vma); + vma->resource = vma_res; + } else { + kfree(vma_res); + } + mutex_unlock(&vm->mutex); + + return err; +} + static int igt_gtt_insert(void *arg) { struct i915_ggtt *ggtt = arg; @@ -1545,14 +1751,15 @@ static int igt_gtt_insert(void *arg) u64 total; int err = -ENODEV; - /* i915_gem_gtt_insert() tries to allocate some free space in the GTT + /* + * i915_gem_gtt_insert() tries to allocate some free space in the GTT * to the node, evicting if required. */ /* Check a couple of obviously invalid requests */ for (ii = invalid_insert; ii->size; ii++) { mutex_lock(&ggtt->vm.mutex); - err = i915_gem_gtt_insert(&ggtt->vm, &tmp, + err = i915_gem_gtt_insert(&ggtt->vm, NULL, &tmp, ii->size, ii->alignment, I915_COLOR_UNEVICTABLE, ii->start, ii->end, @@ -1593,12 +1800,7 @@ static int igt_gtt_insert(void *arg) goto out; } - mutex_lock(&ggtt->vm.mutex); - err = i915_gem_gtt_insert(&ggtt->vm, &vma->node, - obj->base.size, 0, obj->cache_level, - 0, ggtt->vm.total, - 0); - mutex_unlock(&ggtt->vm.mutex); + err = insert_gtt_with_resource(vma); if (err == -ENOSPC) { /* maxed out the GGTT space */ i915_gem_object_put(obj); @@ -1647,18 +1849,13 @@ static int igt_gtt_insert(void *arg) GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); offset = vma->node.start; - err = i915_vma_unbind(vma); + err = i915_vma_unbind_unlocked(vma); if (err) { pr_err("i915_vma_unbind failed with err=%d!\n", err); goto out; } - mutex_lock(&ggtt->vm.mutex); - err = i915_gem_gtt_insert(&ggtt->vm, &vma->node, - obj->base.size, 0, obj->cache_level, - 0, ggtt->vm.total, - 0); - mutex_unlock(&ggtt->vm.mutex); + err = insert_gtt_with_resource(vma); if (err) { pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n", total, ggtt->vm.total, err); @@ -1702,12 +1899,7 @@ static int igt_gtt_insert(void *arg) goto out; } - mutex_lock(&ggtt->vm.mutex); - err = i915_gem_gtt_insert(&ggtt->vm, &vma->node, - obj->base.size, 0, obj->cache_level, - 0, ggtt->vm.total, - 0); - mutex_unlock(&ggtt->vm.mutex); + err = insert_gtt_with_resource(vma); if (err) { pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n", total, ggtt->vm.total, err); @@ -1737,359 +1929,33 @@ int i915_gem_gtt_mock_selftests(void) SUBTEST(igt_gtt_insert), }; struct drm_i915_private *i915; - struct i915_ggtt *ggtt; + struct intel_gt *gt; int err; i915 = mock_gem_device(); if (!i915) return -ENOMEM; - ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL); - if (!ggtt) { - err = -ENOMEM; + /* allocate the ggtt */ + err = intel_gt_assign_ggtt(to_gt(i915)); + if (err) goto out_put; - } - mock_init_ggtt(i915, ggtt); - err = i915_subtests(tests, ggtt); + gt = to_gt(i915); + + mock_init_ggtt(gt); + + err = i915_subtests(tests, gt->ggtt); mock_device_flush(i915); i915_gem_drain_freed_objects(i915); - mock_fini_ggtt(ggtt); - kfree(ggtt); + mock_fini_ggtt(gt->ggtt); + out_put: mock_destroy_device(i915); return err; } -static int context_sync(struct intel_context *ce) -{ - struct i915_request *rq; - long timeout; - - rq = intel_context_create_request(ce); - if (IS_ERR(rq)) - return PTR_ERR(rq); - - i915_request_get(rq); - i915_request_add(rq); - - timeout = i915_request_wait(rq, 0, HZ / 5); - i915_request_put(rq); - - return timeout < 0 ? -EIO : 0; -} - -static struct i915_request * -submit_batch(struct intel_context *ce, u64 addr) -{ - struct i915_request *rq; - int err; - - rq = intel_context_create_request(ce); - if (IS_ERR(rq)) - return rq; - - err = 0; - if (rq->engine->emit_init_breadcrumb) /* detect a hang */ - err = rq->engine->emit_init_breadcrumb(rq); - if (err == 0) - err = rq->engine->emit_bb_start(rq, addr, 0, 0); - - if (err == 0) - i915_request_get(rq); - i915_request_add(rq); - - return err ? ERR_PTR(err) : rq; -} - -static u32 *spinner(u32 *batch, int i) -{ - return batch + i * 64 / sizeof(*batch) + 4; -} - -static void end_spin(u32 *batch, int i) -{ - *spinner(batch, i) = MI_BATCH_BUFFER_END; - wmb(); -} - -static int igt_cs_tlb(void *arg) -{ - const unsigned int count = PAGE_SIZE / 64; - const unsigned int chunk_size = count * PAGE_SIZE; - struct drm_i915_private *i915 = arg; - struct drm_i915_gem_object *bbe, *act, *out; - struct i915_gem_engines_iter it; - struct i915_address_space *vm; - struct i915_gem_context *ctx; - struct intel_context *ce; - struct i915_vma *vma; - I915_RND_STATE(prng); - struct file *file; - unsigned int i; - u32 *result; - u32 *batch; - int err = 0; - - /* - * Our mission here is to fool the hardware to execute something - * from scratch as it has not seen the batch move (due to missing - * the TLB invalidate). - */ - - file = mock_file(i915); - if (IS_ERR(file)) - return PTR_ERR(file); - - ctx = live_context(i915, file); - if (IS_ERR(ctx)) { - err = PTR_ERR(ctx); - goto out_unlock; - } - - vm = i915_gem_context_get_vm_rcu(ctx); - if (i915_is_ggtt(vm)) - goto out_vm; - - /* Create two pages; dummy we prefill the TLB, and intended */ - bbe = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(bbe)) { - err = PTR_ERR(bbe); - goto out_vm; - } - - batch = i915_gem_object_pin_map_unlocked(bbe, I915_MAP_WC); - if (IS_ERR(batch)) { - err = PTR_ERR(batch); - goto out_put_bbe; - } - memset32(batch, MI_BATCH_BUFFER_END, PAGE_SIZE / sizeof(u32)); - i915_gem_object_flush_map(bbe); - i915_gem_object_unpin_map(bbe); - - act = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(act)) { - err = PTR_ERR(act); - goto out_put_bbe; - } - - /* Track the execution of each request by writing into different slot */ - batch = i915_gem_object_pin_map_unlocked(act, I915_MAP_WC); - if (IS_ERR(batch)) { - err = PTR_ERR(batch); - goto out_put_act; - } - for (i = 0; i < count; i++) { - u32 *cs = batch + i * 64 / sizeof(*cs); - u64 addr = (vm->total - PAGE_SIZE) + i * sizeof(u32); - - GEM_BUG_ON(GRAPHICS_VER(i915) < 6); - cs[0] = MI_STORE_DWORD_IMM_GEN4; - if (GRAPHICS_VER(i915) >= 8) { - cs[1] = lower_32_bits(addr); - cs[2] = upper_32_bits(addr); - cs[3] = i; - cs[4] = MI_NOOP; - cs[5] = MI_BATCH_BUFFER_START_GEN8; - } else { - cs[1] = 0; - cs[2] = lower_32_bits(addr); - cs[3] = i; - cs[4] = MI_NOOP; - cs[5] = MI_BATCH_BUFFER_START; - } - } - - out = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(out)) { - err = PTR_ERR(out); - goto out_put_batch; - } - i915_gem_object_set_cache_coherency(out, I915_CACHING_CACHED); - - vma = i915_vma_instance(out, vm, NULL); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto out_put_out; - } - - err = i915_vma_pin(vma, 0, 0, - PIN_USER | - PIN_OFFSET_FIXED | - (vm->total - PAGE_SIZE)); - if (err) - goto out_put_out; - GEM_BUG_ON(vma->node.start != vm->total - PAGE_SIZE); - - result = i915_gem_object_pin_map_unlocked(out, I915_MAP_WB); - if (IS_ERR(result)) { - err = PTR_ERR(result); - goto out_put_out; - } - - for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { - IGT_TIMEOUT(end_time); - unsigned long pass = 0; - - if (!intel_engine_can_store_dword(ce->engine)) - continue; - - while (!__igt_timeout(end_time, NULL)) { - struct i915_vm_pt_stash stash = {}; - struct i915_request *rq; - struct i915_gem_ww_ctx ww; - u64 offset; - - offset = igt_random_offset(&prng, - 0, vm->total - PAGE_SIZE, - chunk_size, PAGE_SIZE); - - memset32(result, STACK_MAGIC, PAGE_SIZE / sizeof(u32)); - - vma = i915_vma_instance(bbe, vm, NULL); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto end; - } - - err = vma->ops->set_pages(vma); - if (err) - goto end; - - i915_gem_ww_ctx_init(&ww, false); -retry: - err = i915_vm_lock_objects(vm, &ww); - if (err) - goto end_ww; - - err = i915_vm_alloc_pt_stash(vm, &stash, chunk_size); - if (err) - goto end_ww; - - err = i915_vm_map_pt_stash(vm, &stash); - if (!err) - vm->allocate_va_range(vm, &stash, offset, chunk_size); - i915_vm_free_pt_stash(vm, &stash); -end_ww: - if (err == -EDEADLK) { - err = i915_gem_ww_ctx_backoff(&ww); - if (!err) - goto retry; - } - i915_gem_ww_ctx_fini(&ww); - if (err) - goto end; - - /* Prime the TLB with the dummy pages */ - for (i = 0; i < count; i++) { - vma->node.start = offset + i * PAGE_SIZE; - vm->insert_entries(vm, vma, I915_CACHE_NONE, 0); - - rq = submit_batch(ce, vma->node.start); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto end; - } - i915_request_put(rq); - } - - vma->ops->clear_pages(vma); - - err = context_sync(ce); - if (err) { - pr_err("%s: dummy setup timed out\n", - ce->engine->name); - goto end; - } - - vma = i915_vma_instance(act, vm, NULL); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto end; - } - - err = vma->ops->set_pages(vma); - if (err) - goto end; - - /* Replace the TLB with target batches */ - for (i = 0; i < count; i++) { - struct i915_request *rq; - u32 *cs = batch + i * 64 / sizeof(*cs); - u64 addr; - - vma->node.start = offset + i * PAGE_SIZE; - vm->insert_entries(vm, vma, I915_CACHE_NONE, 0); - - addr = vma->node.start + i * 64; - cs[4] = MI_NOOP; - cs[6] = lower_32_bits(addr); - cs[7] = upper_32_bits(addr); - wmb(); - - rq = submit_batch(ce, addr); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto end; - } - - /* Wait until the context chain has started */ - if (i == 0) { - while (READ_ONCE(result[i]) && - !i915_request_completed(rq)) - cond_resched(); - } else { - end_spin(batch, i - 1); - } - - i915_request_put(rq); - } - end_spin(batch, count - 1); - - vma->ops->clear_pages(vma); - - err = context_sync(ce); - if (err) { - pr_err("%s: writes timed out\n", - ce->engine->name); - goto end; - } - - for (i = 0; i < count; i++) { - if (result[i] != i) { - pr_err("%s: Write lost on pass %lu, at offset %llx, index %d, found %x, expected %x\n", - ce->engine->name, pass, - offset, i, result[i], i); - err = -EINVAL; - goto end; - } - } - - vm->clear_range(vm, offset, chunk_size); - pass++; - } - } -end: - if (igt_flush_test(i915)) - err = -EIO; - i915_gem_context_unlock_engines(ctx); - i915_gem_object_unpin_map(out); -out_put_out: - i915_gem_object_put(out); -out_put_batch: - i915_gem_object_unpin_map(act); -out_put_act: - i915_gem_object_put(act); -out_put_bbe: - i915_gem_object_put(bbe); -out_vm: - i915_vm_put(vm); -out_unlock: - fput(file); - return err; -} - int i915_gem_gtt_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { @@ -2101,16 +1967,17 @@ int i915_gem_gtt_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_ppgtt_fill), SUBTEST(igt_ppgtt_shrink), SUBTEST(igt_ppgtt_shrink_boom), + SUBTEST(igt_ppgtt_misaligned_pin), SUBTEST(igt_ggtt_lowlevel), SUBTEST(igt_ggtt_drunk), SUBTEST(igt_ggtt_walk), SUBTEST(igt_ggtt_pot), SUBTEST(igt_ggtt_fill), SUBTEST(igt_ggtt_page), - SUBTEST(igt_cs_tlb), + SUBTEST(igt_ggtt_misaligned_pin), }; - GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total)); + GEM_BUG_ON(offset_in_page(to_gt(i915)->ggtt->vm.total)); - return i915_subtests(tests, i915); + return i915_live_subtests(tests, i915); } |
