summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gem/i915_gem_ttm.c')
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c259
1 files changed, 182 insertions, 77 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index a4aa9500fa17..f65fe86c02b5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -5,11 +5,13 @@
#include <linux/shmem_fs.h>
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_placement.h>
#include <drm/drm_buddy.h>
+#include <drm/drm_print.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_tt.h>
#include "i915_drv.h"
+#include "i915_jiffies.h"
#include "i915_ttm_buddy_manager.h"
#include "intel_memory_region.h"
#include "intel_region_ttm.h"
@@ -65,8 +67,6 @@ static const struct ttm_place sys_placement_flags = {
static struct ttm_placement i915_sys_placement = {
.num_placement = 1,
.placement = &sys_placement_flags,
- .num_busy_placement = 1,
- .busy_placement = &sys_placement_flags,
};
/**
@@ -140,46 +140,44 @@ i915_ttm_place_from_region(const struct intel_memory_region *mr,
if (flags & I915_BO_ALLOC_CONTIGUOUS)
place->flags |= TTM_PL_FLAG_CONTIGUOUS;
if (offset != I915_BO_INVALID_OFFSET) {
+ WARN_ON(overflows_type(offset >> PAGE_SHIFT, place->fpfn));
place->fpfn = offset >> PAGE_SHIFT;
+ WARN_ON(overflows_type(place->fpfn + (size >> PAGE_SHIFT), place->lpfn));
place->lpfn = place->fpfn + (size >> PAGE_SHIFT);
- } else if (mr->io_size && mr->io_size < mr->total) {
+ } else if (resource_size(&mr->io) && resource_size(&mr->io) < mr->total) {
if (flags & I915_BO_ALLOC_GPU_ONLY) {
place->flags |= TTM_PL_FLAG_TOPDOWN;
} else {
place->fpfn = 0;
- place->lpfn = mr->io_size >> PAGE_SHIFT;
+ WARN_ON(overflows_type(resource_size(&mr->io) >> PAGE_SHIFT, place->lpfn));
+ place->lpfn = resource_size(&mr->io) >> PAGE_SHIFT;
}
}
}
static void
i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj,
- struct ttm_place *requested,
- struct ttm_place *busy,
+ struct ttm_place *places,
struct ttm_placement *placement)
{
unsigned int num_allowed = obj->mm.n_placements;
unsigned int flags = obj->flags;
unsigned int i;
- placement->num_placement = 1;
i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] :
- obj->mm.region, requested, obj->bo_offset,
+ obj->mm.region, &places[0], obj->bo_offset,
obj->base.size, flags);
/* Cache this on object? */
- placement->num_busy_placement = num_allowed;
- for (i = 0; i < placement->num_busy_placement; ++i)
- i915_ttm_place_from_region(obj->mm.placements[i], busy + i,
- obj->bo_offset, obj->base.size, flags);
-
- if (num_allowed == 0) {
- *busy = *requested;
- placement->num_busy_placement = 1;
+ for (i = 0; i < num_allowed; ++i) {
+ i915_ttm_place_from_region(obj->mm.placements[i],
+ &places[i + 1], obj->bo_offset,
+ obj->base.size, flags);
+ places[i + 1].flags |= TTM_PL_FLAG_FALLBACK;
}
- placement->placement = requested;
- placement->busy_placement = busy;
+ placement->num_placement = num_allowed + 1;
+ placement->placement = places;
}
static int i915_ttm_tt_shmem_populate(struct ttm_device *bdev,
@@ -271,23 +269,21 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
{
struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915),
bdev);
- struct ttm_resource_manager *man =
- ttm_manager_type(bo->bdev, bo->resource->mem_type);
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
unsigned long ccs_pages = 0;
enum ttm_caching caching;
struct i915_ttm_tt *i915_tt;
int ret;
- if (!obj)
+ if (i915_ttm_is_ghost_object(bo))
return NULL;
i915_tt = kzalloc(sizeof(*i915_tt), GFP_KERNEL);
if (!i915_tt)
return NULL;
- if (obj->flags & I915_BO_ALLOC_CPU_CLEAR &&
- man->use_tt)
+ if (obj->flags & I915_BO_ALLOC_CPU_CLEAR && (!bo->resource ||
+ ttm_manager_type(bo->bdev, bo->resource->mem_type)->use_tt))
page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
caching = i915_ttm_select_tt_caching(obj);
@@ -362,7 +358,7 @@ static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo,
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
- if (!obj)
+ if (i915_ttm_is_ghost_object(bo))
return false;
/*
@@ -471,7 +467,7 @@ static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
struct ttm_placement place = {};
int ret;
- if (!bo->ttm || bo->resource->mem_type != TTM_PL_SYSTEM)
+ if (!bo->ttm || i915_ttm_cpu_maps_iomem(bo->resource))
return 0;
GEM_BUG_ON(!i915_tt->is_shmem);
@@ -509,18 +505,15 @@ static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
- intel_wakeref_t wakeref = 0;
-
- if (bo->resource && likely(obj)) {
- /* ttm_bo_release() already has dma_resv_lock */
- if (i915_ttm_cpu_maps_iomem(bo->resource))
- wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
+ /*
+ * This gets called twice by ttm, so long as we have a ttm resource or
+ * ttm_tt then we can still safely call this. Due to pipeline-gutting,
+ * we maybe have NULL bo->resource, but in that case we should always
+ * have a ttm alive (like if the pages are swapped out).
+ */
+ if ((bo->resource || bo->ttm) && !i915_ttm_is_ghost_object(bo)) {
__i915_gem_object_pages_fini(obj);
-
- if (wakeref)
- intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref);
-
i915_ttm_free_cached_io_rsgt(obj);
}
}
@@ -608,10 +601,17 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
static int i915_ttm_truncate(struct drm_i915_gem_object *obj)
{
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
- int err;
+ long err;
WARN_ON_ONCE(obj->mm.madv == I915_MADV_WILLNEED);
+ err = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
+ true, 15 * HZ);
+ if (err < 0)
+ return err;
+ if (err == 0)
+ return -EBUSY;
+
err = i915_ttm_move_notify(bo);
if (err)
return err;
@@ -624,7 +624,7 @@ static void i915_ttm_swap_notify(struct ttm_buffer_object *bo)
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
int ret;
- if (!obj)
+ if (i915_ttm_is_ghost_object(bo))
return;
ret = i915_ttm_move_notify(bo);
@@ -649,7 +649,7 @@ bool i915_ttm_resource_mappable(struct ttm_resource *res)
if (!i915_ttm_cpu_maps_iomem(res))
return true;
- return bman_res->used_visible_size == bman_res->base.num_pages;
+ return bman_res->used_visible_size == PFN_UP(bman_res->base.size);
}
static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
@@ -657,7 +657,7 @@ static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource
struct drm_i915_gem_object *obj = i915_ttm_to_gem(mem->bo);
bool unknown_state;
- if (!obj)
+ if (i915_ttm_is_ghost_object(mem->bo))
return -EINVAL;
if (!kref_get_unless_zero(&obj->base.refcount))
@@ -690,15 +690,59 @@ static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
unsigned long base;
unsigned int ofs;
- GEM_BUG_ON(!obj);
+ GEM_BUG_ON(i915_ttm_is_ghost_object(bo));
GEM_WARN_ON(bo->ttm);
base = obj->mm.region->iomap.base - obj->mm.region->region.start;
- sg = __i915_gem_object_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs, true);
+ sg = i915_gem_object_page_iter_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs);
return ((base + sg_dma_address(sg)) >> PAGE_SHIFT) + ofs;
}
+static int i915_ttm_access_memory(struct ttm_buffer_object *bo,
+ unsigned long offset, void *buf,
+ int len, int write)
+{
+ struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ resource_size_t iomap = obj->mm.region->iomap.base -
+ obj->mm.region->region.start;
+ unsigned long page = offset >> PAGE_SHIFT;
+ unsigned long bytes_left = len;
+
+ /*
+ * TODO: For now just let it fail if the resource is non-mappable,
+ * otherwise we need to perform the memcpy from the gpu here, without
+ * interfering with the object (like moving the entire thing).
+ */
+ if (!i915_ttm_resource_mappable(bo->resource))
+ return -EIO;
+
+ offset -= page << PAGE_SHIFT;
+ do {
+ unsigned long bytes = min(bytes_left, PAGE_SIZE - offset);
+ void __iomem *ptr;
+ dma_addr_t daddr;
+
+ daddr = i915_gem_object_get_dma_address(obj, page);
+ ptr = ioremap_wc(iomap + daddr + offset, bytes);
+ if (!ptr)
+ return -EIO;
+
+ if (write)
+ memcpy_toio(ptr, buf, bytes);
+ else
+ memcpy_fromio(buf, ptr, bytes);
+ iounmap(ptr);
+
+ page++;
+ buf += bytes;
+ bytes_left -= bytes;
+ offset = 0;
+ } while (bytes_left);
+
+ return len;
+}
+
/*
* All callbacks need to take care not to downcast a struct ttm_buffer_object
* without checking its subclass, since it might be a TTM ghost object.
@@ -715,6 +759,7 @@ static struct ttm_device_funcs i915_ttm_bo_driver = {
.delete_mem_notify = i915_ttm_delete_mem_notify,
.io_mem_reserve = i915_ttm_io_mem_reserve,
.io_mem_pfn = i915_ttm_io_mem_pfn,
+ .access_memory = i915_ttm_access_memory,
};
/**
@@ -735,12 +780,16 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
.interruptible = true,
.no_wait_gpu = false,
};
- int real_num_busy;
+ struct ttm_placement initial_placement;
+ struct ttm_place initial_place;
int ret;
/* First try only the requested placement. No eviction. */
- real_num_busy = fetch_and_zero(&placement->num_busy_placement);
- ret = ttm_bo_validate(bo, placement, &ctx);
+ initial_placement.num_placement = 1;
+ memcpy(&initial_place, placement->placement, sizeof(struct ttm_place));
+ initial_place.flags |= TTM_PL_FLAG_DESIRED;
+ initial_placement.placement = &initial_place;
+ ret = ttm_bo_validate(bo, &initial_placement, &ctx);
if (ret) {
ret = i915_ttm_err_to_gem(ret);
/*
@@ -755,14 +804,13 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
* If the initial attempt fails, allow all accepted placements,
* evicting if necessary.
*/
- placement->num_busy_placement = real_num_busy;
ret = ttm_bo_validate(bo, placement, &ctx);
if (ret)
return i915_ttm_err_to_gem(ret);
}
if (bo->ttm && !ttm_tt_is_populated(bo->ttm)) {
- ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx);
+ ret = ttm_bo_populate(bo, &ctx);
if (ret)
return ret;
@@ -779,8 +827,7 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
GEM_BUG_ON(obj->mm.rsgt);
obj->mm.rsgt = rsgt;
- __i915_gem_object_set_pages(obj, &rsgt->table,
- i915_sg_dma_sizes(rsgt->table.sgl));
+ __i915_gem_object_set_pages(obj, &rsgt->table);
}
GEM_BUG_ON(bo->ttm && ((obj->base.size >> PAGE_SHIFT) < bo->ttm->num_pages));
@@ -790,13 +837,17 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
{
- struct ttm_place requested, busy[I915_TTM_MAX_PLACEMENTS];
+ struct ttm_place places[I915_TTM_MAX_PLACEMENTS + 1];
struct ttm_placement placement;
+ /* restricted by sg_alloc_table */
+ if (overflows_type(obj->base.size >> PAGE_SHIFT, unsigned int))
+ return -E2BIG;
+
GEM_BUG_ON(obj->mm.n_placements > I915_TTM_MAX_PLACEMENTS);
/* Move to the requested placement. */
- i915_ttm_placement_from_obj(obj, &requested, busy, &placement);
+ i915_ttm_placement_from_obj(obj, places, &placement);
return __i915_ttm_get_pages(obj, &placement);
}
@@ -826,9 +877,7 @@ static int __i915_ttm_migrate(struct drm_i915_gem_object *obj,
i915_ttm_place_from_region(mr, &requested, obj->bo_offset,
obj->base.size, flags);
placement.num_placement = 1;
- placement.num_busy_placement = 1;
placement.placement = &requested;
- placement.busy_placement = &requested;
ret = __i915_ttm_get_pages(obj, &placement);
if (ret)
@@ -947,7 +996,7 @@ void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
* If we need to place an LMEM resource which doesn't need CPU
* access then we should try not to victimize mappable objects
* first, since we likely end up stealing more of the mappable
- * portion. And likewise when we try to find space for a mappble
+ * portion. And likewise when we try to find space for a mappable
* object, we know not to ever victimize objects that don't
* occupy any mappable pages.
*/
@@ -982,7 +1031,7 @@ static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj)
{
GEM_BUG_ON(!obj->ttm.created);
- ttm_bo_put(i915_gem_to_ttm(obj));
+ ttm_bo_fini(i915_gem_to_ttm(obj));
}
static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
@@ -990,15 +1039,11 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
struct vm_area_struct *area = vmf->vma;
struct ttm_buffer_object *bo = area->vm_private_data;
struct drm_device *dev = bo->base.dev;
- struct drm_i915_gem_object *obj;
- intel_wakeref_t wakeref = 0;
+ struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ intel_wakeref_t wakeref = NULL;
vm_fault_t ret;
int idx;
- obj = i915_ttm_to_gem(bo);
- if (!obj)
- return VM_FAULT_SIGBUS;
-
/* Sanity check that we allow writing into this object */
if (unlikely(i915_gem_object_is_readonly(obj) &&
area->vm_flags & VM_WRITE))
@@ -1013,10 +1058,27 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
- if (i915_ttm_cpu_maps_iomem(bo->resource))
- wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
+ /*
+ * This must be swapped out with shmem ttm_tt (pipeline-gutting).
+ * Calling ttm_bo_validate() here with TTM_PL_SYSTEM should only go as
+ * far as far doing a ttm_bo_move_null(), which should skip all the
+ * other junk.
+ */
+ if (!bo->resource) {
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = true, /* should be idle already */
+ };
+ int err;
- if (!i915_ttm_resource_mappable(bo->resource)) {
+ GEM_BUG_ON(!bo->ttm || !(bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED));
+
+ err = ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx);
+ if (err) {
+ dma_resv_unlock(bo->base.resv);
+ return VM_FAULT_SIGBUS;
+ }
+ } else if (!i915_ttm_resource_mappable(bo->resource)) {
int err = -ENODEV;
int i;
@@ -1024,7 +1086,7 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
struct intel_memory_region *mr = obj->mm.placements[i];
unsigned int flags;
- if (!mr->io_size && mr->type != INTEL_MEMORY_SYSTEM)
+ if (!resource_size(&mr->io) && mr->type != INTEL_MEMORY_SYSTEM)
continue;
flags = obj->flags;
@@ -1035,13 +1097,18 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
}
if (err) {
- drm_dbg(dev, "Unable to make resource CPU accessible\n");
+ drm_dbg_ratelimited(dev,
+ "Unable to make resource CPU accessible(err = %pe)\n",
+ ERR_PTR(err));
dma_resv_unlock(bo->base.resv);
ret = VM_FAULT_SIGBUS;
goto out_rpm;
}
}
+ if (i915_ttm_cpu_maps_iomem(bo->resource))
+ wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
+
if (drm_dev_enter(dev, &idx)) {
ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
TTM_BO_VM_NUM_PREFAULT);
@@ -1053,16 +1120,21 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
goto out_rpm;
- /* ttm_bo_vm_reserve() already has dma_resv_lock */
+ /*
+ * ttm_bo_vm_reserve() already has dma_resv_lock.
+ * userfault_count is protected by dma_resv lock and rpm wakeref.
+ */
if (ret == VM_FAULT_NOPAGE && wakeref && !obj->userfault_count) {
obj->userfault_count = 1;
- mutex_lock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
- list_add(&obj->userfault_link, &to_gt(to_i915(obj->base.dev))->lmem_userfault_list);
- mutex_unlock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
+ spin_lock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
+ list_add(&obj->userfault_link, &to_i915(obj->base.dev)->runtime_pm.lmem_userfault_list);
+ spin_unlock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
+
+ GEM_WARN_ON(!i915_ttm_cpu_maps_iomem(bo->resource));
}
- if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
- intel_wakeref_auto(&to_gt(to_i915(obj->base.dev))->userfault_wakeref,
+ if (wakeref && CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND != 0)
+ intel_wakeref_auto(&to_i915(obj->base.dev)->runtime_pm.userfault_wakeref,
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
i915_ttm_adjust_lru(obj);
@@ -1094,7 +1166,7 @@ static void ttm_vm_open(struct vm_area_struct *vma)
struct drm_i915_gem_object *obj =
i915_ttm_to_gem(vma->vm_private_data);
- GEM_BUG_ON(!obj);
+ GEM_BUG_ON(i915_ttm_is_ghost_object(vma->vm_private_data));
i915_gem_object_get(obj);
}
@@ -1103,7 +1175,7 @@ static void ttm_vm_close(struct vm_area_struct *vma)
struct drm_i915_gem_object *obj =
i915_ttm_to_gem(vma->vm_private_data);
- GEM_BUG_ON(!obj);
+ GEM_BUG_ON(i915_ttm_is_ghost_object(vma->vm_private_data));
i915_gem_object_put(obj);
}
@@ -1124,7 +1196,29 @@ static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj)
{
+ struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
+ intel_wakeref_t wakeref = NULL;
+
+ assert_object_held_shared(obj);
+
+ if (i915_ttm_cpu_maps_iomem(bo->resource)) {
+ wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
+
+ /* userfault_count is protected by obj lock and rpm wakeref. */
+ if (obj->userfault_count) {
+ spin_lock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
+ list_del(&obj->userfault_link);
+ spin_unlock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
+ obj->userfault_count = 0;
+ }
+ }
+
+ GEM_WARN_ON(obj->userfault_count);
+
ttm_bo_unmap_virtual(i915_gem_to_ttm(obj));
+
+ if (wakeref)
+ intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref);
}
static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
@@ -1177,7 +1271,7 @@ void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
}
}
-/**
+/*
* __i915_gem_ttm_object_init - Initialize a ttm-backed i915 gem object
* @mem: The initial memory region for the object.
* @obj: The gem object.
@@ -1233,12 +1327,23 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
* If this function fails, it will call the destructor, but
* our caller still owns the object. So no freeing in the
* destructor until obj->ttm.created is true.
- * Similarly, in delayed_destroy, we can't call ttm_bo_put()
+ * Similarly, in delayed_destroy, we can't call ttm_bo_fini()
* until successful initialization.
*/
ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), bo_type,
&i915_sys_placement, page_size >> PAGE_SHIFT,
&ctx, NULL, NULL, i915_ttm_bo_destroy);
+
+ /*
+ * XXX: The ttm_bo_init_reserved() functions returns -ENOSPC if the size
+ * is too big to add vma. The direct function that returns -ENOSPC is
+ * drm_mm_insert_node_in_range(). To handle the same error as other code
+ * that returns -E2BIG when the size is too large, it converts -ENOSPC to
+ * -E2BIG.
+ */
+ if (size >> PAGE_SHIFT > INT_MAX && ret == -ENOSPC)
+ ret = -E2BIG;
+
if (ret)
return i915_ttm_err_to_gem(ret);