diff options
author | Joonas Lahtinen <joonas.lahtinen@linux.intel.com> | 2023-04-11 15:43:45 +0300 |
---|---|---|
committer | Joonas Lahtinen <joonas.lahtinen@linux.intel.com> | 2023-04-11 15:43:45 +0300 |
commit | ea68a3e9d14e9e0bf017d178fb4bd53b6deb1482 (patch) | |
tree | 4ca0e37218ecc0844d330cb37b956d5219bb1d32 /drivers/gpu/drm/i915/gem | |
parent | 16fc9c08f0ec7b1c95f1ea4a16097acdb3fc943d (diff) | |
parent | 55bf14961db9da61220e6f04bc9919c94b1a6585 (diff) |
Merge drm/drm-next into drm-intel-gt-next
Need to pull in commit from drm-next (earlier in drm-intel-next):
1eca0778f4b3 ("drm/i915: add struct i915_dsm to wrap dsm members together")
In order to merge following patch to drm-intel-gt-next:
https://patchwork.freedesktop.org/patch/530942/?series=114925&rev=6
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Diffstat (limited to 'drivers/gpu/drm/i915/gem')
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_clflush.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_create.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_domain.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_mman.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_object.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_object_types.h | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 57 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_ttm.c | 38 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_ttm.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c | 60 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c | 7 |
11 files changed, 126 insertions, 52 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c index b3b398fe689c..385ffc575b48 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c @@ -8,6 +8,7 @@ #include "display/intel_frontbuffer.h" +#include "i915_config.h" #include "i915_drv.h" #include "i915_gem_clflush.h" #include "i915_sw_fence_work.h" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c index f55dcc01d97f..bfe1dbda4cb7 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_create.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c @@ -5,6 +5,7 @@ #include <drm/drm_fourcc.h> +#include "display/intel_display.h" #include "gem/i915_gem_ioctls.h" #include "gem/i915_gem_lmem.h" #include "gem/i915_gem_region.h" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index b0b6cdb0b07f..d2d5a24301b2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -4,6 +4,7 @@ * Copyright © 2014-2016 Intel Corporation */ +#include "display/intel_display.h" #include "display/intel_frontbuffer.h" #include "gt/intel_gt.h" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index d036ef2dcdba..3dbacdf0911a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -697,7 +697,7 @@ insert: GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo); out: if (file) - drm_vma_node_allow(&mmo->vma_node, file); + drm_vma_node_allow_once(&mmo->vma_node, file); return mmo; err: @@ -941,7 +941,7 @@ i915_gem_object_mmap(struct drm_i915_gem_object *obj, i915_gem_object_put(obj); return -EINVAL; } - vma->vm_flags &= ~VM_MAYWRITE; + vm_flags_clear(vma, VM_MAYWRITE); } anon = mmap_singleton(to_i915(dev)); @@ -950,7 +950,7 @@ i915_gem_object_mmap(struct drm_i915_gem_object *obj, return PTR_ERR(anon); } - vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO; + vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO); /* * We keep the ref on mmo->obj, not vm_file, but we require diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index f9a8acbba715..885ccde9dc3c 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -303,7 +303,7 @@ i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj) static inline bool i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj) { - return READ_ONCE(obj->frontbuffer); + return READ_ONCE(obj->frontbuffer) || obj->is_dpt; } static inline unsigned int diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index 8540edff164b..830c11431ee8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -498,6 +498,9 @@ struct drm_i915_gem_object { */ unsigned int cache_dirty:1; + /* @is_dpt: Object houses a display page table (DPT) */ + unsigned int is_dpt:1; + /** * @read_domains: Read memory domains. * diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index 7ab9e98bcbd2..8ac376c24aa2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -110,9 +110,7 @@ static int adjust_stolen(struct drm_i915_private *i915, else ggtt_start &= PGTBL_ADDRESS_LO_MASK; - ggtt_res = - (struct resource) DEFINE_RES_MEM(ggtt_start, - ggtt_total_entries(ggtt) * 4); + ggtt_res = DEFINE_RES_MEM(ggtt_start, ggtt_total_entries(ggtt) * 4); if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end) stolen[0].end = ggtt_res.start; @@ -211,7 +209,7 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *i915, IS_GM45(i915) ? CTG_STOLEN_RESERVED : ELK_STOLEN_RESERVED); - resource_size_t stolen_top = i915->dsm.end + 1; + resource_size_t stolen_top = i915->dsm.stolen.end + 1; drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n", IS_GM45(i915) ? "CTG" : "ELK", reg_val); @@ -276,7 +274,7 @@ static void vlv_get_stolen_reserved(struct drm_i915_private *i915, resource_size_t *size) { u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); - resource_size_t stolen_top = i915->dsm.end + 1; + resource_size_t stolen_top = i915->dsm.stolen.end + 1; drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); @@ -365,7 +363,7 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *i915, resource_size_t *size) { u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); - resource_size_t stolen_top = i915->dsm.end + 1; + resource_size_t stolen_top = i915->dsm.stolen.end + 1; drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); @@ -414,7 +412,7 @@ static void icl_get_stolen_reserved(struct drm_i915_private *i915, } /* - * Initialize i915->dsm_reserved to contain the reserved space within the Data + * Initialize i915->dsm.reserved to contain the reserved space within the Data * Stolen Memory. This is a range on the top of DSM that is reserved, not to * be used by driver, so must be excluded from the region passed to the * allocator later. In the spec this is also called as WOPCM. @@ -430,7 +428,7 @@ static int init_reserved_stolen(struct drm_i915_private *i915) resource_size_t reserved_size; int ret = 0; - stolen_top = i915->dsm.end + 1; + stolen_top = i915->dsm.stolen.end + 1; reserved_base = stolen_top; reserved_size = 0; @@ -471,13 +469,12 @@ static int init_reserved_stolen(struct drm_i915_private *i915) goto bail_out; } - i915->dsm_reserved = - (struct resource)DEFINE_RES_MEM(reserved_base, reserved_size); + i915->dsm.reserved = DEFINE_RES_MEM(reserved_base, reserved_size); - if (!resource_contains(&i915->dsm, &i915->dsm_reserved)) { + if (!resource_contains(&i915->dsm.stolen, &i915->dsm.reserved)) { drm_err(&i915->drm, "Stolen reserved area %pR outside stolen memory %pR\n", - &i915->dsm_reserved, &i915->dsm); + &i915->dsm.reserved, &i915->dsm.stolen); ret = -EINVAL; goto bail_out; } @@ -485,8 +482,7 @@ static int init_reserved_stolen(struct drm_i915_private *i915) return 0; bail_out: - i915->dsm_reserved = - (struct resource)DEFINE_RES_MEM(reserved_base, 0); + i915->dsm.reserved = DEFINE_RES_MEM(reserved_base, 0); return ret; } @@ -517,27 +513,27 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem) if (request_smem_stolen(i915, &mem->region)) return -ENOSPC; - i915->dsm = mem->region; + i915->dsm.stolen = mem->region; if (init_reserved_stolen(i915)) return -ENOSPC; /* Exclude the reserved region from driver use */ - mem->region.end = i915->dsm_reserved.start - 1; + mem->region.end = i915->dsm.reserved.start - 1; mem->io_size = min(mem->io_size, resource_size(&mem->region)); - i915->stolen_usable_size = resource_size(&mem->region); + i915->dsm.usable_size = resource_size(&mem->region); drm_dbg(&i915->drm, "Memory reserved for graphics device: %lluK, usable: %lluK\n", - (u64)resource_size(&i915->dsm) >> 10, - (u64)i915->stolen_usable_size >> 10); + (u64)resource_size(&i915->dsm.stolen) >> 10, + (u64)i915->dsm.usable_size >> 10); - if (i915->stolen_usable_size == 0) + if (i915->dsm.usable_size == 0) return -ENOSPC; /* Basic memrange allocator for stolen space. */ - drm_mm_init(&i915->mm.stolen, 0, i915->stolen_usable_size); + drm_mm_init(&i915->mm.stolen, 0, i915->dsm.usable_size); return 0; } @@ -587,7 +583,7 @@ i915_pages_create_for_stolen(struct drm_device *dev, struct sg_table *st; struct scatterlist *sg; - GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm))); + GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm.stolen))); /* We hide that we have no struct page backing our stolen object * by wrapping the contiguous physical allocation with a fake @@ -607,7 +603,7 @@ i915_pages_create_for_stolen(struct drm_device *dev, sg->offset = 0; sg->length = size; - sg_dma_address(sg) = (dma_addr_t)i915->dsm.start + offset; + sg_dma_address(sg) = (dma_addr_t)i915->dsm.stolen.start + offset; sg_dma_len(sg) = size; return st; @@ -894,8 +890,9 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type, if (HAS_LMEMBAR_SMEM_STOLEN(i915)) { /* * MTL dsm size is in GGC register. - * Also MTL uses offset to DSMBASE in ptes, so i915 - * uses dsm_base = 0 to setup stolen region. + * Also MTL uses offset to GSMBASE in ptes, so i915 + * uses dsm_base = 8MBs to setup stolen region, since + * DSMBASE = GSMBASE + 8MB. */ ret = mtl_get_gms_size(uncore); if (ret < 0) { @@ -903,11 +900,11 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type, return ERR_PTR(ret); } - dsm_base = 0; + dsm_base = SZ_8M; dsm_size = (resource_size_t)(ret * SZ_1M); GEM_BUG_ON(pci_resource_len(pdev, GEN12_LMEM_BAR) != SZ_256M); - GEM_BUG_ON((dsm_size + SZ_8M) > lmem_size); + GEM_BUG_ON((dsm_base + dsm_size) > lmem_size); } else { /* Use DSM base address instead for stolen memory */ dsm_base = intel_uncore_read64(uncore, GEN12_DSMBASE) & GEN12_BDSM_MASK; @@ -916,14 +913,12 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type, dsm_size = ALIGN_DOWN(lmem_size - dsm_base, SZ_1M); } - io_size = dsm_size; - if (HAS_LMEMBAR_SMEM_STOLEN(i915)) { - io_start = pci_resource_start(pdev, GEN12_LMEM_BAR) + SZ_8M; - } else if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) { + if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) { io_start = 0; io_size = 0; } else { io_start = pci_resource_start(pdev, GEN12_LMEM_BAR) + dsm_base; + io_size = dsm_size; } min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K : diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index e28c34d9dba9..9227f8146a58 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -274,8 +274,6 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo, { struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915), bdev); - struct ttm_resource_manager *man = - ttm_manager_type(bo->bdev, bo->resource->mem_type); struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); unsigned long ccs_pages = 0; enum ttm_caching caching; @@ -289,8 +287,8 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo, if (!i915_tt) return NULL; - if (obj->flags & I915_BO_ALLOC_CPU_CLEAR && - man->use_tt) + if (obj->flags & I915_BO_ALLOC_CPU_CLEAR && (!bo->resource || + ttm_manager_type(bo->bdev, bo->resource->mem_type)->use_tt)) page_flags |= TTM_TT_FLAG_ZERO_ALLOC; caching = i915_ttm_select_tt_caching(obj); @@ -474,7 +472,7 @@ static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags) struct ttm_placement place = {}; int ret; - if (!bo->ttm || bo->resource->mem_type != TTM_PL_SYSTEM) + if (!bo->ttm || i915_ttm_cpu_maps_iomem(bo->resource)) return 0; GEM_BUG_ON(!i915_tt->is_shmem); @@ -513,7 +511,13 @@ static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo) { struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); - if (bo->resource && !i915_ttm_is_ghost_object(bo)) { + /* + * This gets called twice by ttm, so long as we have a ttm resource or + * ttm_tt then we can still safely call this. Due to pipeline-gutting, + * we maybe have NULL bo->resource, but in that case we should always + * have a ttm alive (like if the pages are swapped out). + */ + if ((bo->resource || bo->ttm) && !i915_ttm_is_ghost_object(bo)) { __i915_gem_object_pages_fini(obj); i915_ttm_free_cached_io_rsgt(obj); } @@ -1058,7 +1062,27 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) return VM_FAULT_SIGBUS; } - if (!i915_ttm_resource_mappable(bo->resource)) { + /* + * This must be swapped out with shmem ttm_tt (pipeline-gutting). + * Calling ttm_bo_validate() here with TTM_PL_SYSTEM should only go as + * far as far doing a ttm_bo_move_null(), which should skip all the + * other junk. + */ + if (!bo->resource) { + struct ttm_operation_ctx ctx = { + .interruptible = true, + .no_wait_gpu = true, /* should be idle already */ + }; + int err; + + GEM_BUG_ON(!bo->ttm || !(bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED)); + + err = ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx); + if (err) { + dma_resv_unlock(bo->base.resv); + return VM_FAULT_SIGBUS; + } + } else if (!i915_ttm_resource_mappable(bo->resource)) { int err = -ENODEV; int i; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h index 2a94a99ef76b..f8f6bed1b297 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h @@ -98,7 +98,7 @@ static inline bool i915_ttm_gtt_binds_lmem(struct ttm_resource *mem) static inline bool i915_ttm_cpu_maps_iomem(struct ttm_resource *mem) { /* Once / if we support GGTT, this is also false for cached ttm_tts */ - return mem->mem_type != I915_PL_SYSTEM; + return mem && mem->mem_type != I915_PL_SYSTEM; } bool i915_ttm_resource_mappable(struct ttm_resource *res); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c index 46b398ccd250..69eb20ed4d47 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c @@ -103,7 +103,27 @@ void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj) { struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); unsigned int cache_level; + unsigned int mem_flags; unsigned int i; + int mem_type; + + /* + * We might have been purged (or swapped out) if the resource is NULL, + * in which case the SYSTEM placement is the closest match to describe + * the current domain. If the object is ever used in this state then we + * will require moving it again. + */ + if (!bo->resource) { + mem_flags = I915_BO_FLAG_STRUCT_PAGE; + mem_type = I915_PL_SYSTEM; + cache_level = I915_CACHE_NONE; + } else { + mem_flags = i915_ttm_cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM : + I915_BO_FLAG_STRUCT_PAGE; + mem_type = bo->resource->mem_type; + cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource, + bo->ttm); + } /* * If object was moved to an allowable region, update the object @@ -111,11 +131,11 @@ void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj) * in an allowable region, it's evicted and we don't update the * object region. */ - if (intel_region_to_ttm_type(obj->mm.region) != bo->resource->mem_type) { + if (intel_region_to_ttm_type(obj->mm.region) != mem_type) { for (i = 0; i < obj->mm.n_placements; ++i) { struct intel_memory_region *mr = obj->mm.placements[i]; - if (intel_region_to_ttm_type(mr) == bo->resource->mem_type && + if (intel_region_to_ttm_type(mr) == mem_type && mr != obj->mm.region) { i915_gem_object_release_memory_region(obj); i915_gem_object_init_memory_region(obj, mr); @@ -125,12 +145,8 @@ void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj) } obj->mem_flags &= ~(I915_BO_FLAG_STRUCT_PAGE | I915_BO_FLAG_IOMEM); + obj->mem_flags |= mem_flags; - obj->mem_flags |= i915_ttm_cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM : - I915_BO_FLAG_STRUCT_PAGE; - - cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource, - bo->ttm); i915_gem_object_set_cache_coherency(obj, cache_level); } @@ -568,6 +584,32 @@ int i915_ttm_move(struct ttm_buffer_object *bo, bool evict, return 0; } + if (!bo->resource) { + if (dst_mem->mem_type != TTM_PL_SYSTEM) { + hop->mem_type = TTM_PL_SYSTEM; + hop->flags = TTM_PL_FLAG_TEMPORARY; + return -EMULTIHOP; + } + + /* + * This is only reached when first creating the object, or if + * the object was purged or swapped out (pipeline-gutting). For + * the former we can safely skip all of the below since we are + * only using a dummy SYSTEM placement here. And with the latter + * we will always re-enter here with bo->resource set correctly + * (as per the above), since this is part of a multi-hop + * sequence, where at the end we can do the move for real. + * + * The special case here is when the dst_mem is TTM_PL_SYSTEM, + * which doens't require any kind of move, so it should be safe + * to skip all the below and call ttm_bo_move_null() here, where + * the caller in __i915_ttm_get_pages() will take care of the + * rest, since we should have a valid ttm_tt. + */ + ttm_bo_move_null(bo, dst_mem); + return 0; + } + ret = i915_ttm_move_notify(bo); if (ret) return ret; @@ -673,6 +715,10 @@ int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst, assert_object_held(dst); assert_object_held(src); + + if (GEM_WARN_ON(!src_bo->resource || !dst_bo->resource)) + return -EINVAL; + i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); ret = dma_resv_reserve_fences(src_bo->base.resv, 1); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c index be644763d3bb..ad649523d5e0 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c @@ -53,7 +53,7 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply, unsigned int flags; int err = 0; - if (bo->resource->mem_type == I915_PL_SYSTEM || obj->ttm.backup) + if (!i915_ttm_cpu_maps_iomem(bo->resource) || obj->ttm.backup) return 0; if (pm_apply->allow_gpu && i915_gem_object_evictable(obj)) @@ -186,7 +186,10 @@ static int i915_ttm_restore(struct i915_gem_apply_to_region *apply, return err; /* Content may have been swapped. */ - err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx); + if (!backup_bo->resource) + err = ttm_bo_validate(backup_bo, i915_ttm_sys_placement(), &ctx); + if (!err) + err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx); if (!err) { err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu, false); |