summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/selftests
diff options
context:
space:
mode:
authorJani Nikula <jani.nikula@intel.com>2022-04-11 16:01:56 +0300
committerJani Nikula <jani.nikula@intel.com>2022-04-11 16:01:56 +0300
commit83970cd63b9f864525761137b500113ab0b49c94 (patch)
tree747b97113d60666cbb44f8a5633b961b7eda3c86 /drivers/gpu/drm/i915/selftests
parent618f5df1f6a5a3f29fad824116da291a7d14ab5e (diff)
parent3123109284176b1532874591f7c81f3837bbdc17 (diff)
Merge drm/drm-next into drm-intel-next
Sync up with v5.18-rc1, in particular to get 5e3094cfd9fb ("drm/i915/xehpsdv: Add has_flat_ccs to device info"). Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Diffstat (limited to 'drivers/gpu/drm/i915/selftests')
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c222
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c159
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_region.c13
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_region.h3
4 files changed, 350 insertions, 47 deletions
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index e7e6c4b2c81d..ab751192eb3b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -27,9 +27,11 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_internal.h"
+#include "gem/i915_gem_region.h"
#include "gem/selftests/mock_context.h"
#include "gt/intel_context.h"
#include "gt/intel_gpu_commands.h"
+#include "gt/intel_gtt.h"
#include "i915_random.h"
#include "i915_selftest.h"
@@ -239,6 +241,8 @@ static int lowlevel_hole(struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
unsigned long end_time)
{
+ const unsigned int min_alignment =
+ i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
I915_RND_STATE(seed_prng);
struct i915_vma_resource *mock_vma_res;
unsigned int size;
@@ -252,9 +256,10 @@ static int lowlevel_hole(struct i915_address_space *vm,
I915_RND_SUBSTATE(prng, seed_prng);
struct drm_i915_gem_object *obj;
unsigned int *order, count, n;
- u64 hole_size;
+ u64 hole_size, aligned_size;
- hole_size = (hole_end - hole_start) >> size;
+ aligned_size = max_t(u32, ilog2(min_alignment), size);
+ hole_size = (hole_end - hole_start) >> aligned_size;
if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
count = hole_size >> 1;
@@ -275,8 +280,8 @@ static int lowlevel_hole(struct i915_address_space *vm,
}
GEM_BUG_ON(!order);
- GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
- GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
+ GEM_BUG_ON(count * BIT_ULL(aligned_size) > vm->total);
+ GEM_BUG_ON(hole_start + count * BIT_ULL(aligned_size) > hole_end);
/* Ignore allocation failures (i.e. don't report them as
* a test failure) as we are purposefully allocating very
@@ -299,10 +304,10 @@ static int lowlevel_hole(struct i915_address_space *vm,
}
for (n = 0; n < count; n++) {
- u64 addr = hole_start + order[n] * BIT_ULL(size);
+ u64 addr = hole_start + order[n] * BIT_ULL(aligned_size);
intel_wakeref_t wakeref;
- GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
+ GEM_BUG_ON(addr + BIT_ULL(aligned_size) > vm->total);
if (igt_timeout(end_time,
"%s timed out before %d/%d\n",
@@ -345,7 +350,7 @@ alloc_vm_end:
}
mock_vma_res->bi.pages = obj->mm.pages;
- mock_vma_res->node_size = BIT_ULL(size);
+ mock_vma_res->node_size = BIT_ULL(aligned_size);
mock_vma_res->start = addr;
with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
@@ -356,7 +361,7 @@ alloc_vm_end:
i915_random_reorder(order, count, &prng);
for (n = 0; n < count; n++) {
- u64 addr = hole_start + order[n] * BIT_ULL(size);
+ u64 addr = hole_start + order[n] * BIT_ULL(aligned_size);
intel_wakeref_t wakeref;
GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
@@ -400,8 +405,10 @@ static int fill_hole(struct i915_address_space *vm,
{
const u64 hole_size = hole_end - hole_start;
struct drm_i915_gem_object *obj;
+ const unsigned int min_alignment =
+ i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
const unsigned long max_pages =
- min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
+ min_t(u64, ULONG_MAX - 1, (hole_size / 2) >> ilog2(min_alignment));
const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
unsigned long npages, prime, flags;
struct i915_vma *vma;
@@ -442,14 +449,17 @@ static int fill_hole(struct i915_address_space *vm,
offset = p->offset;
list_for_each_entry(obj, &objects, st_link) {
+ u64 aligned_size = round_up(obj->base.size,
+ min_alignment);
+
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma))
continue;
if (p->step < 0) {
- if (offset < hole_start + obj->base.size)
+ if (offset < hole_start + aligned_size)
break;
- offset -= obj->base.size;
+ offset -= aligned_size;
}
err = i915_vma_pin(vma, 0, 0, offset | flags);
@@ -471,22 +481,25 @@ static int fill_hole(struct i915_address_space *vm,
i915_vma_unpin(vma);
if (p->step > 0) {
- if (offset + obj->base.size > hole_end)
+ if (offset + aligned_size > hole_end)
break;
- offset += obj->base.size;
+ offset += aligned_size;
}
}
offset = p->offset;
list_for_each_entry(obj, &objects, st_link) {
+ u64 aligned_size = round_up(obj->base.size,
+ min_alignment);
+
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma))
continue;
if (p->step < 0) {
- if (offset < hole_start + obj->base.size)
+ if (offset < hole_start + aligned_size)
break;
- offset -= obj->base.size;
+ offset -= aligned_size;
}
if (!drm_mm_node_allocated(&vma->node) ||
@@ -507,22 +520,25 @@ static int fill_hole(struct i915_address_space *vm,
}
if (p->step > 0) {
- if (offset + obj->base.size > hole_end)
+ if (offset + aligned_size > hole_end)
break;
- offset += obj->base.size;
+ offset += aligned_size;
}
}
offset = p->offset;
list_for_each_entry_reverse(obj, &objects, st_link) {
+ u64 aligned_size = round_up(obj->base.size,
+ min_alignment);
+
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma))
continue;
if (p->step < 0) {
- if (offset < hole_start + obj->base.size)
+ if (offset < hole_start + aligned_size)
break;
- offset -= obj->base.size;
+ offset -= aligned_size;
}
err = i915_vma_pin(vma, 0, 0, offset | flags);
@@ -544,22 +560,25 @@ static int fill_hole(struct i915_address_space *vm,
i915_vma_unpin(vma);
if (p->step > 0) {
- if (offset + obj->base.size > hole_end)
+ if (offset + aligned_size > hole_end)
break;
- offset += obj->base.size;
+ offset += aligned_size;
}
}
offset = p->offset;
list_for_each_entry_reverse(obj, &objects, st_link) {
+ u64 aligned_size = round_up(obj->base.size,
+ min_alignment);
+
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma))
continue;
if (p->step < 0) {
- if (offset < hole_start + obj->base.size)
+ if (offset < hole_start + aligned_size)
break;
- offset -= obj->base.size;
+ offset -= aligned_size;
}
if (!drm_mm_node_allocated(&vma->node) ||
@@ -580,9 +599,9 @@ static int fill_hole(struct i915_address_space *vm,
}
if (p->step > 0) {
- if (offset + obj->base.size > hole_end)
+ if (offset + aligned_size > hole_end)
break;
- offset += obj->base.size;
+ offset += aligned_size;
}
}
}
@@ -612,6 +631,7 @@ static int walk_hole(struct i915_address_space *vm,
const u64 hole_size = hole_end - hole_start;
const unsigned long max_pages =
min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
+ unsigned long min_alignment;
unsigned long flags;
u64 size;
@@ -621,6 +641,8 @@ static int walk_hole(struct i915_address_space *vm,
if (i915_is_ggtt(vm))
flags |= PIN_GLOBAL;
+ min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
+
for_each_prime_number_from(size, 1, max_pages) {
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
@@ -639,7 +661,7 @@ static int walk_hole(struct i915_address_space *vm,
for (addr = hole_start;
addr + obj->base.size < hole_end;
- addr += obj->base.size) {
+ addr += round_up(obj->base.size, min_alignment)) {
err = i915_vma_pin(vma, 0, 0, addr | flags);
if (err) {
pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
@@ -691,6 +713,7 @@ static int pot_hole(struct i915_address_space *vm,
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
+ unsigned int min_alignment;
unsigned long flags;
unsigned int pot;
int err = 0;
@@ -699,6 +722,8 @@ static int pot_hole(struct i915_address_space *vm,
if (i915_is_ggtt(vm))
flags |= PIN_GLOBAL;
+ min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
+
obj = i915_gem_object_create_internal(vm->i915, 2 * I915_GTT_PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -711,13 +736,13 @@ static int pot_hole(struct i915_address_space *vm,
/* Insert a pair of pages across every pot boundary within the hole */
for (pot = fls64(hole_end - 1) - 1;
- pot > ilog2(2 * I915_GTT_PAGE_SIZE);
+ pot > ilog2(2 * min_alignment);
pot--) {
u64 step = BIT_ULL(pot);
u64 addr;
- for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
- addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
+ for (addr = round_up(hole_start + min_alignment, step) - min_alignment;
+ addr <= round_down(hole_end - (2 * min_alignment), step) - min_alignment;
addr += step) {
err = i915_vma_pin(vma, 0, 0, addr | flags);
if (err) {
@@ -762,6 +787,7 @@ static int drunk_hole(struct i915_address_space *vm,
unsigned long end_time)
{
I915_RND_STATE(prng);
+ unsigned int min_alignment;
unsigned int size;
unsigned long flags;
@@ -769,15 +795,18 @@ static int drunk_hole(struct i915_address_space *vm,
if (i915_is_ggtt(vm))
flags |= PIN_GLOBAL;
+ min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
+
/* Keep creating larger objects until one cannot fit into the hole */
for (size = 12; (hole_end - hole_start) >> size; size++) {
struct drm_i915_gem_object *obj;
unsigned int *order, count, n;
struct i915_vma *vma;
- u64 hole_size;
+ u64 hole_size, aligned_size;
int err = -ENODEV;
- hole_size = (hole_end - hole_start) >> size;
+ aligned_size = max_t(u32, ilog2(min_alignment), size);
+ hole_size = (hole_end - hole_start) >> aligned_size;
if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
count = hole_size >> 1;
@@ -817,7 +846,7 @@ static int drunk_hole(struct i915_address_space *vm,
GEM_BUG_ON(vma->size != BIT_ULL(size));
for (n = 0; n < count; n++) {
- u64 addr = hole_start + order[n] * BIT_ULL(size);
+ u64 addr = hole_start + order[n] * BIT_ULL(aligned_size);
err = i915_vma_pin(vma, 0, 0, addr | flags);
if (err) {
@@ -869,11 +898,14 @@ static int __shrink_hole(struct i915_address_space *vm,
{
struct drm_i915_gem_object *obj;
unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
+ unsigned int min_alignment;
unsigned int order = 12;
LIST_HEAD(objects);
int err = 0;
u64 addr;
+ min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
+
/* Keep creating larger objects until one cannot fit into the hole */
for (addr = hole_start; addr < hole_end; ) {
struct i915_vma *vma;
@@ -914,7 +946,7 @@ static int __shrink_hole(struct i915_address_space *vm,
}
i915_vma_unpin(vma);
- addr += size;
+ addr += round_up(size, min_alignment);
/*
* Since we are injecting allocation faults at random intervals,
@@ -1038,6 +1070,118 @@ err_purge:
return err;
}
+static int misaligned_case(struct i915_address_space *vm, struct intel_memory_region *mr,
+ u64 addr, u64 size, unsigned long flags)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err = 0;
+ u64 expected_vma_size, expected_node_size;
+ bool is_stolen = mr->type == INTEL_MEMORY_STOLEN_SYSTEM ||
+ mr->type == INTEL_MEMORY_STOLEN_LOCAL;
+
+ obj = i915_gem_object_create_region(mr, size, 0, 0);
+ if (IS_ERR(obj)) {
+ /* if iGVT-g or DMAR is active, stolen mem will be uninitialized */
+ if (PTR_ERR(obj) == -ENODEV && is_stolen)
+ return 0;
+ return PTR_ERR(obj);
+ }
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_put;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, addr | flags);
+ if (err)
+ goto err_put;
+ i915_vma_unpin(vma);
+
+ if (!drm_mm_node_allocated(&vma->node)) {
+ err = -EINVAL;
+ goto err_put;
+ }
+
+ if (i915_vma_misplaced(vma, 0, 0, addr | flags)) {
+ err = -EINVAL;
+ goto err_put;
+ }
+
+ expected_vma_size = round_up(size, 1 << (ffs(vma->resource->page_sizes_gtt) - 1));
+ expected_node_size = expected_vma_size;
+
+ if (NEEDS_COMPACT_PT(vm->i915) && i915_gem_object_is_lmem(obj)) {
+ /* compact-pt should expand lmem node to 2MB */
+ expected_vma_size = round_up(size, I915_GTT_PAGE_SIZE_64K);
+ expected_node_size = round_up(size, I915_GTT_PAGE_SIZE_2M);
+ }
+
+ if (vma->size != expected_vma_size || vma->node.size != expected_node_size) {
+ err = i915_vma_unbind_unlocked(vma);
+ err = -EBADSLT;
+ goto err_put;
+ }
+
+ err = i915_vma_unbind_unlocked(vma);
+ if (err)
+ goto err_put;
+
+ GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
+
+err_put:
+ i915_gem_object_put(obj);
+ cleanup_freed_objects(vm->i915);
+ return err;
+}
+
+static int misaligned_pin(struct i915_address_space *vm,
+ u64 hole_start, u64 hole_end,
+ unsigned long end_time)
+{
+ struct intel_memory_region *mr;
+ enum intel_region_id id;
+ unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
+ int err = 0;
+ u64 hole_size = hole_end - hole_start;
+
+ if (i915_is_ggtt(vm))
+ flags |= PIN_GLOBAL;
+
+ for_each_memory_region(mr, vm->i915, id) {
+ u64 min_alignment = i915_vm_min_alignment(vm, (enum intel_memory_type)id);
+ u64 size = min_alignment;
+ u64 addr = round_down(hole_start + (hole_size / 2), min_alignment);
+
+ /* avoid -ENOSPC on very small hole setups */
+ if (hole_size < 3 * min_alignment)
+ continue;
+
+ /* we can't test < 4k alignment due to flags being encoded in lower bits */
+ if (min_alignment != I915_GTT_PAGE_SIZE_4K) {
+ err = misaligned_case(vm, mr, addr + (min_alignment / 2), size, flags);
+ /* misaligned should error with -EINVAL*/
+ if (!err)
+ err = -EBADSLT;
+ if (err != -EINVAL)
+ return err;
+ }
+
+ /* test for vma->size expansion to min page size */
+ err = misaligned_case(vm, mr, addr, PAGE_SIZE, flags);
+ if (err)
+ return err;
+
+ /* test for intermediate size not expanding vma->size for large alignments */
+ err = misaligned_case(vm, mr, addr, size / 2, flags);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int exercise_ppgtt(struct drm_i915_private *dev_priv,
int (*func)(struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
@@ -1107,6 +1251,11 @@ static int igt_ppgtt_shrink_boom(void *arg)
return exercise_ppgtt(arg, shrink_boom);
}
+static int igt_ppgtt_misaligned_pin(void *arg)
+{
+ return exercise_ppgtt(arg, misaligned_pin);
+}
+
static int sort_holes(void *priv, const struct list_head *A,
const struct list_head *B)
{
@@ -1179,6 +1328,11 @@ static int igt_ggtt_lowlevel(void *arg)
return exercise_ggtt(arg, lowlevel_hole);
}
+static int igt_ggtt_misaligned_pin(void *arg)
+{
+ return exercise_ggtt(arg, misaligned_pin);
+}
+
static int igt_ggtt_page(void *arg)
{
const unsigned int count = PAGE_SIZE/sizeof(u32);
@@ -2151,12 +2305,14 @@ int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_ppgtt_fill),
SUBTEST(igt_ppgtt_shrink),
SUBTEST(igt_ppgtt_shrink_boom),
+ SUBTEST(igt_ppgtt_misaligned_pin),
SUBTEST(igt_ggtt_lowlevel),
SUBTEST(igt_ggtt_drunk),
SUBTEST(igt_ggtt_walk),
SUBTEST(igt_ggtt_pot),
SUBTEST(igt_ggtt_fill),
SUBTEST(igt_ggtt_page),
+ SUBTEST(igt_ggtt_misaligned_pin),
SUBTEST(igt_cs_tlb),
};
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 7acba1d2135e..ba32893e0873 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -17,6 +17,7 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
+#include "gem/i915_gem_ttm.h"
#include "gem/selftests/igt_gem_utils.h"
#include "gem/selftests/mock_context.h"
#include "gt/intel_engine_pm.h"
@@ -170,7 +171,7 @@ static int igt_mock_reserve(void *arg)
if (!order)
return 0;
- mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
+ mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0, 0);
if (IS_ERR(mem)) {
pr_err("failed to create memory region\n");
err = PTR_ERR(mem);
@@ -383,7 +384,7 @@ static int igt_mock_splintered_region(void *arg)
*/
size = (SZ_4G - 1) & PAGE_MASK;
- mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0);
+ mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0, 0);
if (IS_ERR(mem))
return PTR_ERR(mem);
@@ -471,7 +472,7 @@ static int igt_mock_max_segment(void *arg)
*/
size = SZ_8G;
- mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0);
+ mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0, 0);
if (IS_ERR(mem))
return PTR_ERR(mem);
@@ -512,6 +513,147 @@ out_put:
return err;
}
+static u64 igt_object_mappable_total(struct drm_i915_gem_object *obj)
+{
+ struct intel_memory_region *mr = obj->mm.region;
+ struct i915_ttm_buddy_resource *bman_res =
+ to_ttm_buddy_resource(obj->mm.res);
+ struct drm_buddy *mm = bman_res->mm;
+ struct drm_buddy_block *block;
+ u64 total;
+
+ total = 0;
+ list_for_each_entry(block, &bman_res->blocks, link) {
+ u64 start = drm_buddy_block_offset(block);
+ u64 end = start + drm_buddy_block_size(mm, block);
+
+ if (start < mr->io_size)
+ total += min_t(u64, end, mr->io_size) - start;
+ }
+
+ return total;
+}
+
+static int igt_mock_io_size(void *arg)
+{
+ struct intel_memory_region *mr = arg;
+ struct drm_i915_private *i915 = mr->i915;
+ struct drm_i915_gem_object *obj;
+ u64 mappable_theft_total;
+ u64 io_size;
+ u64 total;
+ u64 ps;
+ u64 rem;
+ u64 size;
+ I915_RND_STATE(prng);
+ LIST_HEAD(objects);
+ int err = 0;
+
+ ps = SZ_4K;
+ if (i915_prandom_u64_state(&prng) & 1)
+ ps = SZ_64K; /* For something like DG2 */
+
+ div64_u64_rem(i915_prandom_u64_state(&prng), SZ_8G, &total);
+ total = round_down(total, ps);
+ total = max_t(u64, total, SZ_1G);
+
+ div64_u64_rem(i915_prandom_u64_state(&prng), total - ps, &io_size);
+ io_size = round_down(io_size, ps);
+ io_size = max_t(u64, io_size, SZ_256M); /* 256M seems to be the common lower limit */
+
+ pr_info("%s with ps=%llx, io_size=%llx, total=%llx\n",
+ __func__, ps, io_size, total);
+
+ mr = mock_region_create(i915, 0, total, ps, 0, io_size);
+ if (IS_ERR(mr)) {
+ err = PTR_ERR(mr);
+ goto out_err;
+ }
+
+ mappable_theft_total = 0;
+ rem = total - io_size;
+ do {
+ div64_u64_rem(i915_prandom_u64_state(&prng), rem, &size);
+ size = round_down(size, ps);
+ size = max(size, ps);
+
+ obj = igt_object_create(mr, &objects, size,
+ I915_BO_ALLOC_GPU_ONLY);
+ if (IS_ERR(obj)) {
+ pr_err("%s TOPDOWN failed with rem=%llx, size=%llx\n",
+ __func__, rem, size);
+ err = PTR_ERR(obj);
+ goto out_close;
+ }
+
+ mappable_theft_total += igt_object_mappable_total(obj);
+ rem -= size;
+ } while (rem);
+
+ pr_info("%s mappable theft=(%lluMiB/%lluMiB), total=%lluMiB\n",
+ __func__,
+ (u64)mappable_theft_total >> 20,
+ (u64)io_size >> 20,
+ (u64)total >> 20);
+
+ /*
+ * Even if we allocate all of the non-mappable portion, we should still
+ * be able to dip into the mappable portion.
+ */
+ obj = igt_object_create(mr, &objects, io_size,
+ I915_BO_ALLOC_GPU_ONLY);
+ if (IS_ERR(obj)) {
+ pr_err("%s allocation unexpectedly failed\n", __func__);
+ err = PTR_ERR(obj);
+ goto out_close;
+ }
+
+ close_objects(mr, &objects);
+
+ rem = io_size;
+ do {
+ div64_u64_rem(i915_prandom_u64_state(&prng), rem, &size);
+ size = round_down(size, ps);
+ size = max(size, ps);
+
+ obj = igt_object_create(mr, &objects, size, 0);
+ if (IS_ERR(obj)) {
+ pr_err("%s MAPPABLE failed with rem=%llx, size=%llx\n",
+ __func__, rem, size);
+ err = PTR_ERR(obj);
+ goto out_close;
+ }
+
+ if (igt_object_mappable_total(obj) != size) {
+ pr_err("%s allocation is not mappable(size=%llx)\n",
+ __func__, size);
+ err = -EINVAL;
+ goto out_close;
+ }
+ rem -= size;
+ } while (rem);
+
+ /*
+ * We assume CPU access is required by default, which should result in a
+ * failure here, even though the non-mappable portion is free.
+ */
+ obj = igt_object_create(mr, &objects, ps, 0);
+ if (!IS_ERR(obj)) {
+ pr_err("%s allocation unexpectedly succeeded\n", __func__);
+ err = -EINVAL;
+ goto out_close;
+ }
+
+out_close:
+ close_objects(mr, &objects);
+ intel_memory_region_destroy(mr);
+out_err:
+ if (err == -ENOMEM)
+ err = 0;
+
+ return err;
+}
+
static int igt_gpu_write_dw(struct intel_context *ce,
struct i915_vma *vma,
u32 dword,
@@ -680,8 +822,14 @@ static int igt_lmem_create_with_ps(void *arg)
i915_gem_object_lock(obj, NULL);
err = i915_gem_object_pin_pages(obj);
- if (err)
+ if (err) {
+ if (err == -ENXIO || err == -E2BIG || err == -ENOMEM) {
+ pr_info("%s not enough lmem for ps(%u) err=%d\n",
+ __func__, ps, err);
+ err = 0;
+ }
goto out_put;
+ }
daddr = i915_gem_object_get_dma_address(obj, 0);
if (!IS_ALIGNED(daddr, ps)) {
@@ -1179,6 +1327,7 @@ int intel_memory_region_mock_selftests(void)
SUBTEST(igt_mock_contiguous),
SUBTEST(igt_mock_splintered_region),
SUBTEST(igt_mock_max_segment),
+ SUBTEST(igt_mock_io_size),
};
struct intel_memory_region *mem;
struct drm_i915_private *i915;
@@ -1188,7 +1337,7 @@ int intel_memory_region_mock_selftests(void)
if (!i915)
return -ENOMEM;
- mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
+ mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0, 0);
if (IS_ERR(mem)) {
pr_err("failed to create memory region\n");
err = PTR_ERR(mem);
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c
index 19bff8afcaaa..f64325491f35 100644
--- a/drivers/gpu/drm/i915/selftests/mock_region.c
+++ b/drivers/gpu/drm/i915/selftests/mock_region.c
@@ -22,17 +22,12 @@ static void mock_region_put_pages(struct drm_i915_gem_object *obj,
static int mock_region_get_pages(struct drm_i915_gem_object *obj)
{
- unsigned int flags;
struct sg_table *pages;
int err;
- flags = 0;
- if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
- flags |= TTM_PL_FLAG_CONTIGUOUS;
-
obj->mm.res = intel_region_ttm_resource_alloc(obj->mm.region,
obj->base.size,
- flags);
+ obj->flags);
if (IS_ERR(obj->mm.res))
return PTR_ERR(obj->mm.res);
@@ -107,7 +102,8 @@ mock_region_create(struct drm_i915_private *i915,
resource_size_t start,
resource_size_t size,
resource_size_t min_page_size,
- resource_size_t io_start)
+ resource_size_t io_start,
+ resource_size_t io_size)
{
int instance = ida_alloc_max(&i915->selftest.mock_region_instances,
TTM_NUM_MEM_TYPES - TTM_PL_PRIV - 1,
@@ -117,6 +113,7 @@ mock_region_create(struct drm_i915_private *i915,
return ERR_PTR(instance);
return intel_memory_region_create(i915, start, size, min_page_size,
- io_start, INTEL_MEMORY_MOCK, instance,
+ io_start, io_size,
+ INTEL_MEMORY_MOCK, instance,
&mock_region_ops);
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.h b/drivers/gpu/drm/i915/selftests/mock_region.h
index 329bf74dfaca..e36c3a433551 100644
--- a/drivers/gpu/drm/i915/selftests/mock_region.h
+++ b/drivers/gpu/drm/i915/selftests/mock_region.h
@@ -16,6 +16,7 @@ mock_region_create(struct drm_i915_private *i915,
resource_size_t start,
resource_size_t size,
resource_size_t min_page_size,
- resource_size_t io_start);
+ resource_size_t io_start,
+ resource_size_t io_size);
#endif /* !__MOCK_REGION_H */