summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gem/i915_gem_ttm.c')
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c415
1 files changed, 340 insertions, 75 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index de3fe79b665a..f65fe86c02b5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -3,10 +3,16 @@
* Copyright © 2021 Intel Corporation
*/
-#include <drm/ttm/ttm_bo_driver.h>
+#include <linux/shmem_fs.h>
+
+#include <drm/drm_buddy.h>
+#include <drm/drm_print.h>
#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_tt.h>
#include "i915_drv.h"
+#include "i915_jiffies.h"
+#include "i915_ttm_buddy_manager.h"
#include "intel_memory_region.h"
#include "intel_region_ttm.h"
@@ -16,10 +22,12 @@
#include "gem/i915_gem_ttm.h"
#include "gem/i915_gem_ttm_move.h"
#include "gem/i915_gem_ttm_pm.h"
+#include "gt/intel_gpu_commands.h"
#define I915_TTM_PRIO_PURGE 0
#define I915_TTM_PRIO_NO_PAGES 1
#define I915_TTM_PRIO_HAS_PAGES 2
+#define I915_TTM_PRIO_NEEDS_CPU_ACCESS 3
/*
* Size of struct ttm_place vector in on-stack struct ttm_placement allocs
@@ -59,8 +67,6 @@ static const struct ttm_place sys_placement_flags = {
static struct ttm_placement i915_sys_placement = {
.num_placement = 1,
.placement = &sys_placement_flags,
- .num_busy_placement = 1,
- .busy_placement = &sys_placement_flags,
};
/**
@@ -121,41 +127,57 @@ i915_ttm_select_tt_caching(const struct drm_i915_gem_object *obj)
static void
i915_ttm_place_from_region(const struct intel_memory_region *mr,
struct ttm_place *place,
+ resource_size_t offset,
+ resource_size_t size,
unsigned int flags)
{
memset(place, 0, sizeof(*place));
place->mem_type = intel_region_to_ttm_type(mr);
+ if (mr->type == INTEL_MEMORY_SYSTEM)
+ return;
+
if (flags & I915_BO_ALLOC_CONTIGUOUS)
- place->flags = TTM_PL_FLAG_CONTIGUOUS;
+ place->flags |= TTM_PL_FLAG_CONTIGUOUS;
+ if (offset != I915_BO_INVALID_OFFSET) {
+ WARN_ON(overflows_type(offset >> PAGE_SHIFT, place->fpfn));
+ place->fpfn = offset >> PAGE_SHIFT;
+ WARN_ON(overflows_type(place->fpfn + (size >> PAGE_SHIFT), place->lpfn));
+ place->lpfn = place->fpfn + (size >> PAGE_SHIFT);
+ } else if (resource_size(&mr->io) && resource_size(&mr->io) < mr->total) {
+ if (flags & I915_BO_ALLOC_GPU_ONLY) {
+ place->flags |= TTM_PL_FLAG_TOPDOWN;
+ } else {
+ place->fpfn = 0;
+ WARN_ON(overflows_type(resource_size(&mr->io) >> PAGE_SHIFT, place->lpfn));
+ place->lpfn = resource_size(&mr->io) >> PAGE_SHIFT;
+ }
+ }
}
static void
i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj,
- struct ttm_place *requested,
- struct ttm_place *busy,
+ struct ttm_place *places,
struct ttm_placement *placement)
{
unsigned int num_allowed = obj->mm.n_placements;
unsigned int flags = obj->flags;
unsigned int i;
- placement->num_placement = 1;
i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] :
- obj->mm.region, requested, flags);
+ obj->mm.region, &places[0], obj->bo_offset,
+ obj->base.size, flags);
/* Cache this on object? */
- placement->num_busy_placement = num_allowed;
- for (i = 0; i < placement->num_busy_placement; ++i)
- i915_ttm_place_from_region(obj->mm.placements[i], busy + i, flags);
-
- if (num_allowed == 0) {
- *busy = *requested;
- placement->num_busy_placement = 1;
+ for (i = 0; i < num_allowed; ++i) {
+ i915_ttm_place_from_region(obj->mm.placements[i],
+ &places[i + 1], obj->bo_offset,
+ obj->base.size, flags);
+ places[i + 1].flags |= TTM_PL_FLAG_FALLBACK;
}
- placement->placement = requested;
- placement->busy_placement = busy;
+ placement->num_placement = num_allowed + 1;
+ placement->placement = places;
}
static int i915_ttm_tt_shmem_populate(struct ttm_device *bdev,
@@ -165,7 +187,7 @@ static int i915_ttm_tt_shmem_populate(struct ttm_device *bdev,
struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev);
struct intel_memory_region *mr = i915->mm.regions[INTEL_MEMORY_SYSTEM];
struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
- const unsigned int max_segment = i915_sg_segment_size();
+ const unsigned int max_segment = i915_sg_segment_size(i915->drm.dev);
const size_t size = (size_t)ttm->num_pages << PAGE_SHIFT;
struct file *filp = i915_tt->filp;
struct sgt_iter sgt_iter;
@@ -245,22 +267,23 @@ static const struct i915_refct_sgt_ops tt_rsgt_ops = {
static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
uint32_t page_flags)
{
- struct ttm_resource_manager *man =
- ttm_manager_type(bo->bdev, bo->resource->mem_type);
+ struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915),
+ bdev);
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ unsigned long ccs_pages = 0;
enum ttm_caching caching;
struct i915_ttm_tt *i915_tt;
int ret;
- if (!obj)
+ if (i915_ttm_is_ghost_object(bo))
return NULL;
i915_tt = kzalloc(sizeof(*i915_tt), GFP_KERNEL);
if (!i915_tt)
return NULL;
- if (obj->flags & I915_BO_ALLOC_CPU_CLEAR &&
- man->use_tt)
+ if (obj->flags & I915_BO_ALLOC_CPU_CLEAR && (!bo->resource ||
+ ttm_manager_type(bo->bdev, bo->resource->mem_type)->use_tt))
page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
caching = i915_ttm_select_tt_caching(obj);
@@ -270,7 +293,12 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
i915_tt->is_shmem = true;
}
- ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags, caching);
+ if (i915_gem_object_needs_ccs_pages(obj))
+ ccs_pages = DIV_ROUND_UP(DIV_ROUND_UP(bo->base.size,
+ NUM_BYTES_PER_CCS_BYTE),
+ PAGE_SIZE);
+
+ ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags, caching, ccs_pages);
if (ret)
goto err_free;
@@ -330,7 +358,7 @@ static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo,
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
- if (!obj)
+ if (i915_ttm_is_ghost_object(bo))
return false;
/*
@@ -342,7 +370,10 @@ static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo,
return false;
/* Will do for now. Our pinned objects are still on TTM's LRU lists */
- return i915_gem_object_evictable(obj);
+ if (!i915_gem_object_evictable(obj))
+ return false;
+
+ return ttm_bo_eviction_valuable(bo, place);
}
static void i915_ttm_evict_flags(struct ttm_buffer_object *bo,
@@ -424,21 +455,19 @@ int i915_ttm_purge(struct drm_i915_gem_object *obj)
return 0;
}
-static int i915_ttm_shrinker_release_pages(struct drm_i915_gem_object *obj,
- bool no_wait_gpu,
- bool should_writeback)
+static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
{
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
struct i915_ttm_tt *i915_tt =
container_of(bo->ttm, typeof(*i915_tt), ttm);
struct ttm_operation_ctx ctx = {
.interruptible = true,
- .no_wait_gpu = no_wait_gpu,
+ .no_wait_gpu = flags & I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT,
};
struct ttm_placement place = {};
int ret;
- if (!bo->ttm || bo->resource->mem_type != TTM_PL_SYSTEM)
+ if (!bo->ttm || i915_ttm_cpu_maps_iomem(bo->resource))
return 0;
GEM_BUG_ON(!i915_tt->is_shmem);
@@ -467,7 +496,7 @@ static int i915_ttm_shrinker_release_pages(struct drm_i915_gem_object *obj,
return ret;
}
- if (should_writeback)
+ if (flags & I915_GEM_OBJECT_SHRINK_WRITEBACK)
__shmem_writeback(obj->base.size, i915_tt->filp->f_mapping);
return 0;
@@ -477,7 +506,13 @@ static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
- if (likely(obj)) {
+ /*
+ * This gets called twice by ttm, so long as we have a ttm resource or
+ * ttm_tt then we can still safely call this. Due to pipeline-gutting,
+ * we maybe have NULL bo->resource, but in that case we should always
+ * have a ttm alive (like if the pages are swapped out).
+ */
+ if ((bo->resource || bo->ttm) && !i915_ttm_is_ghost_object(bo)) {
__i915_gem_object_pages_fini(obj);
i915_ttm_free_cached_io_rsgt(obj);
}
@@ -496,7 +531,7 @@ static struct i915_refct_sgt *i915_ttm_tt_get_st(struct ttm_tt *ttm)
ret = sg_alloc_table_from_pages_segment(st,
ttm->pages, ttm->num_pages,
0, (unsigned long)ttm->num_pages << PAGE_SHIFT,
- i915_sg_segment_size(), GFP_KERNEL);
+ i915_sg_segment_size(i915_tt->dev), GFP_KERNEL);
if (ret) {
st->sgl = NULL;
return ERR_PTR(ret);
@@ -530,10 +565,15 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
struct ttm_resource *res)
{
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
+ u32 page_alignment;
if (!i915_ttm_gtt_binds_lmem(res))
return i915_ttm_tt_get_st(bo->ttm);
+ page_alignment = bo->page_alignment << PAGE_SHIFT;
+ if (!page_alignment)
+ page_alignment = obj->mm.region->min_page_size;
+
/*
* If CPU mapping differs, we need to add the ttm_tt pages to
* the resulting st. Might make sense for GGTT.
@@ -544,7 +584,8 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
struct i915_refct_sgt *rsgt;
rsgt = intel_region_ttm_resource_to_rsgt(obj->mm.region,
- res);
+ res,
+ page_alignment);
if (IS_ERR(rsgt))
return rsgt;
@@ -553,16 +594,24 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
return i915_refct_sgt_get(obj->ttm.cached_io_rsgt);
}
- return intel_region_ttm_resource_to_rsgt(obj->mm.region, res);
+ return intel_region_ttm_resource_to_rsgt(obj->mm.region, res,
+ page_alignment);
}
static int i915_ttm_truncate(struct drm_i915_gem_object *obj)
{
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
- int err;
+ long err;
WARN_ON_ONCE(obj->mm.madv == I915_MADV_WILLNEED);
+ err = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
+ true, 15 * HZ);
+ if (err < 0)
+ return err;
+ if (err == 0)
+ return -EBUSY;
+
err = i915_ttm_move_notify(bo);
if (err)
return err;
@@ -575,7 +624,7 @@ static void i915_ttm_swap_notify(struct ttm_buffer_object *bo)
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
int ret;
- if (!obj)
+ if (i915_ttm_is_ghost_object(bo))
return;
ret = i915_ttm_move_notify(bo);
@@ -585,11 +634,48 @@ static void i915_ttm_swap_notify(struct ttm_buffer_object *bo)
i915_ttm_purge(obj);
}
+/**
+ * i915_ttm_resource_mappable - Return true if the ttm resource is CPU
+ * accessible.
+ * @res: The TTM resource to check.
+ *
+ * This is interesting on small-BAR systems where we may encounter lmem objects
+ * that can't be accessed via the CPU.
+ */
+bool i915_ttm_resource_mappable(struct ttm_resource *res)
+{
+ struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
+
+ if (!i915_ttm_cpu_maps_iomem(res))
+ return true;
+
+ return bman_res->used_visible_size == PFN_UP(bman_res->base.size);
+}
+
static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
{
+ struct drm_i915_gem_object *obj = i915_ttm_to_gem(mem->bo);
+ bool unknown_state;
+
+ if (i915_ttm_is_ghost_object(mem->bo))
+ return -EINVAL;
+
+ if (!kref_get_unless_zero(&obj->base.refcount))
+ return -EINVAL;
+
+ assert_object_held(obj);
+
+ unknown_state = i915_gem_object_has_unknown_state(obj);
+ i915_gem_object_put(obj);
+ if (unknown_state)
+ return -EINVAL;
+
if (!i915_ttm_cpu_maps_iomem(mem))
return 0;
+ if (!i915_ttm_resource_mappable(mem))
+ return -EINVAL;
+
mem->bus.caching = ttm_write_combined;
mem->bus.is_iomem = true;
@@ -604,15 +690,59 @@ static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
unsigned long base;
unsigned int ofs;
- GEM_BUG_ON(!obj);
+ GEM_BUG_ON(i915_ttm_is_ghost_object(bo));
GEM_WARN_ON(bo->ttm);
base = obj->mm.region->iomap.base - obj->mm.region->region.start;
- sg = __i915_gem_object_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs, true);
+ sg = i915_gem_object_page_iter_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs);
return ((base + sg_dma_address(sg)) >> PAGE_SHIFT) + ofs;
}
+static int i915_ttm_access_memory(struct ttm_buffer_object *bo,
+ unsigned long offset, void *buf,
+ int len, int write)
+{
+ struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ resource_size_t iomap = obj->mm.region->iomap.base -
+ obj->mm.region->region.start;
+ unsigned long page = offset >> PAGE_SHIFT;
+ unsigned long bytes_left = len;
+
+ /*
+ * TODO: For now just let it fail if the resource is non-mappable,
+ * otherwise we need to perform the memcpy from the gpu here, without
+ * interfering with the object (like moving the entire thing).
+ */
+ if (!i915_ttm_resource_mappable(bo->resource))
+ return -EIO;
+
+ offset -= page << PAGE_SHIFT;
+ do {
+ unsigned long bytes = min(bytes_left, PAGE_SIZE - offset);
+ void __iomem *ptr;
+ dma_addr_t daddr;
+
+ daddr = i915_gem_object_get_dma_address(obj, page);
+ ptr = ioremap_wc(iomap + daddr + offset, bytes);
+ if (!ptr)
+ return -EIO;
+
+ if (write)
+ memcpy_toio(ptr, buf, bytes);
+ else
+ memcpy_fromio(buf, ptr, bytes);
+ iounmap(ptr);
+
+ page++;
+ buf += bytes;
+ bytes_left -= bytes;
+ offset = 0;
+ } while (bytes_left);
+
+ return len;
+}
+
/*
* All callbacks need to take care not to downcast a struct ttm_buffer_object
* without checking its subclass, since it might be a TTM ghost object.
@@ -629,6 +759,7 @@ static struct ttm_device_funcs i915_ttm_bo_driver = {
.delete_mem_notify = i915_ttm_delete_mem_notify,
.io_mem_reserve = i915_ttm_io_mem_reserve,
.io_mem_pfn = i915_ttm_io_mem_pfn,
+ .access_memory = i915_ttm_access_memory,
};
/**
@@ -649,12 +780,16 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
.interruptible = true,
.no_wait_gpu = false,
};
- int real_num_busy;
+ struct ttm_placement initial_placement;
+ struct ttm_place initial_place;
int ret;
/* First try only the requested placement. No eviction. */
- real_num_busy = fetch_and_zero(&placement->num_busy_placement);
- ret = ttm_bo_validate(bo, placement, &ctx);
+ initial_placement.num_placement = 1;
+ memcpy(&initial_place, placement->placement, sizeof(struct ttm_place));
+ initial_place.flags |= TTM_PL_FLAG_DESIRED;
+ initial_placement.placement = &initial_place;
+ ret = ttm_bo_validate(bo, &initial_placement, &ctx);
if (ret) {
ret = i915_ttm_err_to_gem(ret);
/*
@@ -669,14 +804,13 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
* If the initial attempt fails, allow all accepted placements,
* evicting if necessary.
*/
- placement->num_busy_placement = real_num_busy;
ret = ttm_bo_validate(bo, placement, &ctx);
if (ret)
return i915_ttm_err_to_gem(ret);
}
if (bo->ttm && !ttm_tt_is_populated(bo->ttm)) {
- ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx);
+ ret = ttm_bo_populate(bo, &ctx);
if (ret)
return ret;
@@ -693,23 +827,27 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
GEM_BUG_ON(obj->mm.rsgt);
obj->mm.rsgt = rsgt;
- __i915_gem_object_set_pages(obj, &rsgt->table,
- i915_sg_dma_sizes(rsgt->table.sgl));
+ __i915_gem_object_set_pages(obj, &rsgt->table);
}
+ GEM_BUG_ON(bo->ttm && ((obj->base.size >> PAGE_SHIFT) < bo->ttm->num_pages));
i915_ttm_adjust_lru(obj);
return ret;
}
static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
{
- struct ttm_place requested, busy[I915_TTM_MAX_PLACEMENTS];
+ struct ttm_place places[I915_TTM_MAX_PLACEMENTS + 1];
struct ttm_placement placement;
+ /* restricted by sg_alloc_table */
+ if (overflows_type(obj->base.size >> PAGE_SHIFT, unsigned int))
+ return -E2BIG;
+
GEM_BUG_ON(obj->mm.n_placements > I915_TTM_MAX_PLACEMENTS);
/* Move to the requested placement. */
- i915_ttm_placement_from_obj(obj, &requested, busy, &placement);
+ i915_ttm_placement_from_obj(obj, places, &placement);
return __i915_ttm_get_pages(obj, &placement);
}
@@ -728,18 +866,18 @@ static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
* Gem forced migration using the i915_ttm_migrate() op, is allowed even
* to regions that are not in the object's list of allowable placements.
*/
-static int i915_ttm_migrate(struct drm_i915_gem_object *obj,
- struct intel_memory_region *mr)
+static int __i915_ttm_migrate(struct drm_i915_gem_object *obj,
+ struct intel_memory_region *mr,
+ unsigned int flags)
{
struct ttm_place requested;
struct ttm_placement placement;
int ret;
- i915_ttm_place_from_region(mr, &requested, obj->flags);
+ i915_ttm_place_from_region(mr, &requested, obj->bo_offset,
+ obj->base.size, flags);
placement.num_placement = 1;
- placement.num_busy_placement = 1;
placement.placement = &requested;
- placement.busy_placement = &requested;
ret = __i915_ttm_get_pages(obj, &placement);
if (ret)
@@ -758,6 +896,13 @@ static int i915_ttm_migrate(struct drm_i915_gem_object *obj,
return 0;
}
+static int i915_ttm_migrate(struct drm_i915_gem_object *obj,
+ struct intel_memory_region *mr,
+ unsigned int flags)
+{
+ return __i915_ttm_migrate(obj, mr, flags);
+}
+
static void i915_ttm_put_pages(struct drm_i915_gem_object *obj,
struct sg_table *st)
{
@@ -842,14 +987,28 @@ void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
} else if (obj->mm.madv != I915_MADV_WILLNEED) {
bo->priority = I915_TTM_PRIO_PURGE;
} else if (!i915_gem_object_has_pages(obj)) {
- if (bo->priority < I915_TTM_PRIO_HAS_PAGES)
- bo->priority = I915_TTM_PRIO_HAS_PAGES;
+ bo->priority = I915_TTM_PRIO_NO_PAGES;
} else {
- if (bo->priority > I915_TTM_PRIO_NO_PAGES)
- bo->priority = I915_TTM_PRIO_NO_PAGES;
+ struct ttm_resource_manager *man =
+ ttm_manager_type(bo->bdev, bo->resource->mem_type);
+
+ /*
+ * If we need to place an LMEM resource which doesn't need CPU
+ * access then we should try not to victimize mappable objects
+ * first, since we likely end up stealing more of the mappable
+ * portion. And likewise when we try to find space for a mappable
+ * object, we know not to ever victimize objects that don't
+ * occupy any mappable pages.
+ */
+ if (i915_ttm_cpu_maps_iomem(bo->resource) &&
+ i915_ttm_buddy_man_visible_size(man) < man->size &&
+ !(obj->flags & I915_BO_ALLOC_GPU_ONLY))
+ bo->priority = I915_TTM_PRIO_NEEDS_CPU_ACCESS;
+ else
+ bo->priority = I915_TTM_PRIO_HAS_PAGES;
}
- ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
+ ttm_bo_move_to_lru_tail(bo);
spin_unlock(&bo->bdev->lru_lock);
}
@@ -872,7 +1031,7 @@ static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj)
{
GEM_BUG_ON(!obj->ttm.created);
- ttm_bo_put(i915_gem_to_ttm(obj));
+ ttm_bo_fini(i915_gem_to_ttm(obj));
}
static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
@@ -880,14 +1039,11 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
struct vm_area_struct *area = vmf->vma;
struct ttm_buffer_object *bo = area->vm_private_data;
struct drm_device *dev = bo->base.dev;
- struct drm_i915_gem_object *obj;
+ struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ intel_wakeref_t wakeref = NULL;
vm_fault_t ret;
int idx;
- obj = i915_ttm_to_gem(bo);
- if (!obj)
- return VM_FAULT_SIGBUS;
-
/* Sanity check that we allow writing into this object */
if (unlikely(i915_gem_object_is_readonly(obj) &&
area->vm_flags & VM_WRITE))
@@ -902,6 +1058,57 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
+ /*
+ * This must be swapped out with shmem ttm_tt (pipeline-gutting).
+ * Calling ttm_bo_validate() here with TTM_PL_SYSTEM should only go as
+ * far as far doing a ttm_bo_move_null(), which should skip all the
+ * other junk.
+ */
+ if (!bo->resource) {
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = true, /* should be idle already */
+ };
+ int err;
+
+ GEM_BUG_ON(!bo->ttm || !(bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED));
+
+ err = ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx);
+ if (err) {
+ dma_resv_unlock(bo->base.resv);
+ return VM_FAULT_SIGBUS;
+ }
+ } else if (!i915_ttm_resource_mappable(bo->resource)) {
+ int err = -ENODEV;
+ int i;
+
+ for (i = 0; i < obj->mm.n_placements; i++) {
+ struct intel_memory_region *mr = obj->mm.placements[i];
+ unsigned int flags;
+
+ if (!resource_size(&mr->io) && mr->type != INTEL_MEMORY_SYSTEM)
+ continue;
+
+ flags = obj->flags;
+ flags &= ~I915_BO_ALLOC_GPU_ONLY;
+ err = __i915_ttm_migrate(obj, mr, flags);
+ if (!err)
+ break;
+ }
+
+ if (err) {
+ drm_dbg_ratelimited(dev,
+ "Unable to make resource CPU accessible(err = %pe)\n",
+ ERR_PTR(err));
+ dma_resv_unlock(bo->base.resv);
+ ret = VM_FAULT_SIGBUS;
+ goto out_rpm;
+ }
+ }
+
+ if (i915_ttm_cpu_maps_iomem(bo->resource))
+ wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
+
if (drm_dev_enter(dev, &idx)) {
ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
TTM_BO_VM_NUM_PREFAULT);
@@ -909,12 +1116,35 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
} else {
ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
}
+
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
- return ret;
+ goto out_rpm;
+
+ /*
+ * ttm_bo_vm_reserve() already has dma_resv_lock.
+ * userfault_count is protected by dma_resv lock and rpm wakeref.
+ */
+ if (ret == VM_FAULT_NOPAGE && wakeref && !obj->userfault_count) {
+ obj->userfault_count = 1;
+ spin_lock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
+ list_add(&obj->userfault_link, &to_i915(obj->base.dev)->runtime_pm.lmem_userfault_list);
+ spin_unlock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
+
+ GEM_WARN_ON(!i915_ttm_cpu_maps_iomem(bo->resource));
+ }
+
+ if (wakeref && CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND != 0)
+ intel_wakeref_auto(&to_i915(obj->base.dev)->runtime_pm.userfault_wakeref,
+ msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
i915_ttm_adjust_lru(obj);
dma_resv_unlock(bo->base.resv);
+
+out_rpm:
+ if (wakeref)
+ intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref);
+
return ret;
}
@@ -936,7 +1166,7 @@ static void ttm_vm_open(struct vm_area_struct *vma)
struct drm_i915_gem_object *obj =
i915_ttm_to_gem(vma->vm_private_data);
- GEM_BUG_ON(!obj);
+ GEM_BUG_ON(i915_ttm_is_ghost_object(vma->vm_private_data));
i915_gem_object_get(obj);
}
@@ -945,7 +1175,7 @@ static void ttm_vm_close(struct vm_area_struct *vma)
struct drm_i915_gem_object *obj =
i915_ttm_to_gem(vma->vm_private_data);
- GEM_BUG_ON(!obj);
+ GEM_BUG_ON(i915_ttm_is_ghost_object(vma->vm_private_data));
i915_gem_object_put(obj);
}
@@ -966,7 +1196,29 @@ static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj)
{
+ struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
+ intel_wakeref_t wakeref = NULL;
+
+ assert_object_held_shared(obj);
+
+ if (i915_ttm_cpu_maps_iomem(bo->resource)) {
+ wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
+
+ /* userfault_count is protected by obj lock and rpm wakeref. */
+ if (obj->userfault_count) {
+ spin_lock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
+ list_del(&obj->userfault_link);
+ spin_unlock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
+ obj->userfault_count = 0;
+ }
+ }
+
+ GEM_WARN_ON(obj->userfault_count);
+
ttm_bo_unmap_virtual(i915_gem_to_ttm(obj));
+
+ if (wakeref)
+ intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref);
}
static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
@@ -977,7 +1229,7 @@ static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
.get_pages = i915_ttm_get_pages,
.put_pages = i915_ttm_put_pages,
.truncate = i915_ttm_truncate,
- .shrinker_release_pages = i915_ttm_shrinker_release_pages,
+ .shrink = i915_ttm_shrink,
.adjust_lru = i915_ttm_adjust_lru,
.delayed_free = i915_ttm_delayed_free,
@@ -1019,7 +1271,7 @@ void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
}
}
-/**
+/*
* __i915_gem_ttm_object_init - Initialize a ttm-backed i915 gem object
* @mem: The initial memory region for the object.
* @obj: The gem object.
@@ -1030,6 +1282,7 @@ void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
*/
int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj,
+ resource_size_t offset,
resource_size_t size,
resource_size_t page_size,
unsigned int flags)
@@ -1046,6 +1299,8 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags);
+ obj->bo_offset = offset;
+
/* Don't put on a region list until we're either locked or fully initialized. */
obj->mm.region = mem;
INIT_LIST_HEAD(&obj->mm.region_link);
@@ -1072,13 +1327,23 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
* If this function fails, it will call the destructor, but
* our caller still owns the object. So no freeing in the
* destructor until obj->ttm.created is true.
- * Similarly, in delayed_destroy, we can't call ttm_bo_put()
+ * Similarly, in delayed_destroy, we can't call ttm_bo_fini()
* until successful initialization.
*/
- ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), size,
- bo_type, &i915_sys_placement,
- page_size >> PAGE_SHIFT,
+ ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), bo_type,
+ &i915_sys_placement, page_size >> PAGE_SHIFT,
&ctx, NULL, NULL, i915_ttm_bo_destroy);
+
+ /*
+ * XXX: The ttm_bo_init_reserved() functions returns -ENOSPC if the size
+ * is too big to add vma. The direct function that returns -ENOSPC is
+ * drm_mm_insert_node_in_range(). To handle the same error as other code
+ * that returns -E2BIG when the size is too large, it converts -ENOSPC to
+ * -E2BIG.
+ */
+ if (size >> PAGE_SHIFT > INT_MAX && ret == -ENOSPC)
+ ret = -E2BIG;
+
if (ret)
return i915_ttm_err_to_gem(ret);
@@ -1105,7 +1370,7 @@ i915_gem_ttm_system_setup(struct drm_i915_private *i915,
mr = intel_memory_region_create(i915, 0,
totalram_pages() << PAGE_SHIFT,
- PAGE_SIZE, 0,
+ PAGE_SIZE, 0, 0,
type, instance,
&ttm_system_region_ops);
if (IS_ERR(mr))