summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gem/i915_gem_ttm.c')
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c73
1 files changed, 35 insertions, 38 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 9227f8146a58..f65fe86c02b5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -5,11 +5,13 @@
#include <linux/shmem_fs.h>
+#include <drm/drm_buddy.h>
+#include <drm/drm_print.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_tt.h>
-#include <drm/drm_buddy.h>
#include "i915_drv.h"
+#include "i915_jiffies.h"
#include "i915_ttm_buddy_manager.h"
#include "intel_memory_region.h"
#include "intel_region_ttm.h"
@@ -65,8 +67,6 @@ static const struct ttm_place sys_placement_flags = {
static struct ttm_placement i915_sys_placement = {
.num_placement = 1,
.placement = &sys_placement_flags,
- .num_busy_placement = 1,
- .busy_placement = &sys_placement_flags,
};
/**
@@ -144,45 +144,40 @@ i915_ttm_place_from_region(const struct intel_memory_region *mr,
place->fpfn = offset >> PAGE_SHIFT;
WARN_ON(overflows_type(place->fpfn + (size >> PAGE_SHIFT), place->lpfn));
place->lpfn = place->fpfn + (size >> PAGE_SHIFT);
- } else if (mr->io_size && mr->io_size < mr->total) {
+ } else if (resource_size(&mr->io) && resource_size(&mr->io) < mr->total) {
if (flags & I915_BO_ALLOC_GPU_ONLY) {
place->flags |= TTM_PL_FLAG_TOPDOWN;
} else {
place->fpfn = 0;
- WARN_ON(overflows_type(mr->io_size >> PAGE_SHIFT, place->lpfn));
- place->lpfn = mr->io_size >> PAGE_SHIFT;
+ WARN_ON(overflows_type(resource_size(&mr->io) >> PAGE_SHIFT, place->lpfn));
+ place->lpfn = resource_size(&mr->io) >> PAGE_SHIFT;
}
}
}
static void
i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj,
- struct ttm_place *requested,
- struct ttm_place *busy,
+ struct ttm_place *places,
struct ttm_placement *placement)
{
unsigned int num_allowed = obj->mm.n_placements;
unsigned int flags = obj->flags;
unsigned int i;
- placement->num_placement = 1;
i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] :
- obj->mm.region, requested, obj->bo_offset,
+ obj->mm.region, &places[0], obj->bo_offset,
obj->base.size, flags);
/* Cache this on object? */
- placement->num_busy_placement = num_allowed;
- for (i = 0; i < placement->num_busy_placement; ++i)
- i915_ttm_place_from_region(obj->mm.placements[i], busy + i,
- obj->bo_offset, obj->base.size, flags);
-
- if (num_allowed == 0) {
- *busy = *requested;
- placement->num_busy_placement = 1;
+ for (i = 0; i < num_allowed; ++i) {
+ i915_ttm_place_from_region(obj->mm.placements[i],
+ &places[i + 1], obj->bo_offset,
+ obj->base.size, flags);
+ places[i + 1].flags |= TTM_PL_FLAG_FALLBACK;
}
- placement->placement = requested;
- placement->busy_placement = busy;
+ placement->num_placement = num_allowed + 1;
+ placement->placement = places;
}
static int i915_ttm_tt_shmem_populate(struct ttm_device *bdev,
@@ -785,12 +780,16 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
.interruptible = true,
.no_wait_gpu = false,
};
- int real_num_busy;
+ struct ttm_placement initial_placement;
+ struct ttm_place initial_place;
int ret;
/* First try only the requested placement. No eviction. */
- real_num_busy = fetch_and_zero(&placement->num_busy_placement);
- ret = ttm_bo_validate(bo, placement, &ctx);
+ initial_placement.num_placement = 1;
+ memcpy(&initial_place, placement->placement, sizeof(struct ttm_place));
+ initial_place.flags |= TTM_PL_FLAG_DESIRED;
+ initial_placement.placement = &initial_place;
+ ret = ttm_bo_validate(bo, &initial_placement, &ctx);
if (ret) {
ret = i915_ttm_err_to_gem(ret);
/*
@@ -805,14 +804,13 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
* If the initial attempt fails, allow all accepted placements,
* evicting if necessary.
*/
- placement->num_busy_placement = real_num_busy;
ret = ttm_bo_validate(bo, placement, &ctx);
if (ret)
return i915_ttm_err_to_gem(ret);
}
if (bo->ttm && !ttm_tt_is_populated(bo->ttm)) {
- ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx);
+ ret = ttm_bo_populate(bo, &ctx);
if (ret)
return ret;
@@ -839,7 +837,7 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
{
- struct ttm_place requested, busy[I915_TTM_MAX_PLACEMENTS];
+ struct ttm_place places[I915_TTM_MAX_PLACEMENTS + 1];
struct ttm_placement placement;
/* restricted by sg_alloc_table */
@@ -849,7 +847,7 @@ static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
GEM_BUG_ON(obj->mm.n_placements > I915_TTM_MAX_PLACEMENTS);
/* Move to the requested placement. */
- i915_ttm_placement_from_obj(obj, &requested, busy, &placement);
+ i915_ttm_placement_from_obj(obj, places, &placement);
return __i915_ttm_get_pages(obj, &placement);
}
@@ -879,9 +877,7 @@ static int __i915_ttm_migrate(struct drm_i915_gem_object *obj,
i915_ttm_place_from_region(mr, &requested, obj->bo_offset,
obj->base.size, flags);
placement.num_placement = 1;
- placement.num_busy_placement = 1;
placement.placement = &requested;
- placement.busy_placement = &requested;
ret = __i915_ttm_get_pages(obj, &placement);
if (ret)
@@ -1000,7 +996,7 @@ void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
* If we need to place an LMEM resource which doesn't need CPU
* access then we should try not to victimize mappable objects
* first, since we likely end up stealing more of the mappable
- * portion. And likewise when we try to find space for a mappble
+ * portion. And likewise when we try to find space for a mappable
* object, we know not to ever victimize objects that don't
* occupy any mappable pages.
*/
@@ -1035,7 +1031,7 @@ static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj)
{
GEM_BUG_ON(!obj->ttm.created);
- ttm_bo_put(i915_gem_to_ttm(obj));
+ ttm_bo_fini(i915_gem_to_ttm(obj));
}
static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
@@ -1044,7 +1040,7 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
struct ttm_buffer_object *bo = area->vm_private_data;
struct drm_device *dev = bo->base.dev;
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
- intel_wakeref_t wakeref = 0;
+ intel_wakeref_t wakeref = NULL;
vm_fault_t ret;
int idx;
@@ -1090,7 +1086,7 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
struct intel_memory_region *mr = obj->mm.placements[i];
unsigned int flags;
- if (!mr->io_size && mr->type != INTEL_MEMORY_SYSTEM)
+ if (!resource_size(&mr->io) && mr->type != INTEL_MEMORY_SYSTEM)
continue;
flags = obj->flags;
@@ -1101,8 +1097,9 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
}
if (err) {
- drm_dbg(dev, "Unable to make resource CPU accessible(err = %pe)\n",
- ERR_PTR(err));
+ drm_dbg_ratelimited(dev,
+ "Unable to make resource CPU accessible(err = %pe)\n",
+ ERR_PTR(err));
dma_resv_unlock(bo->base.resv);
ret = VM_FAULT_SIGBUS;
goto out_rpm;
@@ -1136,7 +1133,7 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
GEM_WARN_ON(!i915_ttm_cpu_maps_iomem(bo->resource));
}
- if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
+ if (wakeref && CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND != 0)
intel_wakeref_auto(&to_i915(obj->base.dev)->runtime_pm.userfault_wakeref,
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
@@ -1200,7 +1197,7 @@ static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj)
{
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
- intel_wakeref_t wakeref = 0;
+ intel_wakeref_t wakeref = NULL;
assert_object_held_shared(obj);
@@ -1330,7 +1327,7 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
* If this function fails, it will call the destructor, but
* our caller still owns the object. So no freeing in the
* destructor until obj->ttm.created is true.
- * Similarly, in delayed_destroy, we can't call ttm_bo_put()
+ * Similarly, in delayed_destroy, we can't call ttm_bo_fini()
* until successful initialization.
*/
ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), bo_type,