diff options
Diffstat (limited to 'drivers/gpu/drm/xe/xe_bo.c')
-rw-r--r-- | drivers/gpu/drm/xe/xe_bo.c | 883 |
1 files changed, 674 insertions, 209 deletions
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index 1be2415966df..8422f3cab113 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -33,9 +33,11 @@ #include "xe_pxp.h" #include "xe_res_cursor.h" #include "xe_shrinker.h" +#include "xe_sriov_vf_ccs.h" #include "xe_trace_bo.h" #include "xe_ttm_stolen_mgr.h" #include "xe_vm.h" +#include "xe_vram_types.h" const char *const xe_mem_type_to_name[TTM_NUM_MEM_TYPES] = { [XE_PL_SYSTEM] = "system", @@ -186,6 +188,8 @@ static void try_add_system(struct xe_device *xe, struct xe_bo *bo, bo->placements[*c] = (struct ttm_place) { .mem_type = XE_PL_TT, + .flags = (bo_flags & XE_BO_FLAG_VRAM_MASK) ? + TTM_PL_FLAG_FALLBACK : 0, }; *c += 1; } @@ -198,6 +202,8 @@ static bool force_contiguous(u32 bo_flags) else if (bo_flags & XE_BO_FLAG_PINNED && !(bo_flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) return true; /* needs vmap */ + else if (bo_flags & XE_BO_FLAG_CPU_ADDR_MIRROR) + return true; /* * For eviction / restore on suspend / resume objects pinned in VRAM @@ -819,8 +825,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, return ret; } - tt_has_data = ttm && (ttm_tt_is_populated(ttm) || - (ttm->page_flags & TTM_TT_FLAG_SWAPPED)); + tt_has_data = ttm && (ttm_tt_is_populated(ttm) || ttm_tt_is_swapped(ttm)); move_lacks_source = !old_mem || (handle_system_ccs ? (!bo->ccs_cleared) : (!mem_type_is_vram(old_mem_type) && !tt_has_data)); @@ -965,6 +970,20 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, dma_fence_put(fence); xe_pm_runtime_put(xe); + /* + * CCS meta data is migrated from TT -> SMEM. So, let us detach the + * BBs from BO as it is no longer needed. + */ + if (IS_VF_CCS_READY(xe) && old_mem_type == XE_PL_TT && + new_mem->mem_type == XE_PL_SYSTEM) + xe_sriov_vf_ccs_detach_bo(bo); + + if (IS_VF_CCS_READY(xe) && + ((move_lacks_source && new_mem->mem_type == XE_PL_TT) || + (old_mem_type == XE_PL_SYSTEM && new_mem->mem_type == XE_PL_TT)) && + handle_system_ccs) + ret = xe_sriov_vf_ccs_attach_bo(bo); + out: if ((!ttm_bo->resource || ttm_bo->resource->mem_type == XE_PL_SYSTEM) && ttm_bo->ttm) { @@ -975,6 +994,9 @@ out: if (timeout < 0) ret = timeout; + if (IS_VF_CCS_READY(xe)) + xe_sriov_vf_ccs_detach_bo(bo); + xe_tt_unmap_sg(xe, ttm_bo->ttm); } @@ -1119,42 +1141,47 @@ out_unref: int xe_bo_notifier_prepare_pinned(struct xe_bo *bo) { struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); + struct xe_validation_ctx ctx; + struct drm_exec exec; struct xe_bo *backup; int ret = 0; - xe_bo_lock(bo, false); + xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.exclusive = true}, ret) { + ret = drm_exec_lock_obj(&exec, &bo->ttm.base); + drm_exec_retry_on_contention(&exec); + xe_assert(xe, !ret); + xe_assert(xe, !bo->backup_obj); - xe_assert(xe, !bo->backup_obj); + /* + * Since this is called from the PM notifier we might have raced with + * someone unpinning this after we dropped the pinned list lock and + * grabbing the above bo lock. + */ + if (!xe_bo_is_pinned(bo)) + break; - /* - * Since this is called from the PM notifier we might have raced with - * someone unpinning this after we dropped the pinned list lock and - * grabbing the above bo lock. - */ - if (!xe_bo_is_pinned(bo)) - goto out_unlock_bo; + if (!xe_bo_is_vram(bo)) + break; - if (!xe_bo_is_vram(bo)) - goto out_unlock_bo; + if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE) + break; - if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE) - goto out_unlock_bo; + backup = xe_bo_init_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, xe_bo_size(bo), + DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel, + XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS | + XE_BO_FLAG_PINNED, &exec); + if (IS_ERR(backup)) { + drm_exec_retry_on_contention(&exec); + ret = PTR_ERR(backup); + xe_validation_retry_on_oom(&ctx, &ret); + break; + } - backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, xe_bo_size(bo), - DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel, - XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS | - XE_BO_FLAG_PINNED); - if (IS_ERR(backup)) { - ret = PTR_ERR(backup); - goto out_unlock_bo; + backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */ + ttm_bo_pin(&backup->ttm); + bo->backup_obj = backup; } - backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */ - ttm_bo_pin(&backup->ttm); - bo->backup_obj = backup; - -out_unlock_bo: - xe_bo_unlock(bo); return ret; } @@ -1180,57 +1207,12 @@ int xe_bo_notifier_unprepare_pinned(struct xe_bo *bo) return 0; } -/** - * xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory - * @bo: The buffer object to move. - * - * On successful completion, the object memory will be moved to system memory. - * - * This is needed to for special handling of pinned VRAM object during - * suspend-resume. - * - * Return: 0 on success. Negative error code on failure. - */ -int xe_bo_evict_pinned(struct xe_bo *bo) +static int xe_bo_evict_pinned_copy(struct xe_bo *bo, struct xe_bo *backup) { - struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); - struct xe_bo *backup = bo->backup_obj; - bool backup_created = false; + struct xe_device *xe = xe_bo_device(bo); bool unmap = false; int ret = 0; - xe_bo_lock(bo, false); - - if (WARN_ON(!bo->ttm.resource)) { - ret = -EINVAL; - goto out_unlock_bo; - } - - if (WARN_ON(!xe_bo_is_pinned(bo))) { - ret = -EINVAL; - goto out_unlock_bo; - } - - if (!xe_bo_is_vram(bo)) - goto out_unlock_bo; - - if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE) - goto out_unlock_bo; - - if (!backup) { - backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, - NULL, xe_bo_size(bo), - DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel, - XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS | - XE_BO_FLAG_PINNED); - if (IS_ERR(backup)) { - ret = PTR_ERR(backup); - goto out_unlock_bo; - } - backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */ - backup_created = true; - } - if (xe_bo_is_user(bo) || (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) { struct xe_migrate *migrate; struct dma_fence *fence; @@ -1240,14 +1222,11 @@ int xe_bo_evict_pinned(struct xe_bo *bo) else migrate = mem_type_to_migrate(xe, bo->ttm.resource->mem_type); + xe_assert(xe, bo->ttm.base.resv == backup->ttm.base.resv); ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1); if (ret) goto out_backup; - ret = dma_resv_reserve_fences(backup->ttm.base.resv, 1); - if (ret) - goto out_backup; - fence = xe_migrate_copy(migrate, bo, backup, bo->ttm.resource, backup->ttm.resource, false); if (IS_ERR(fence)) { @@ -1257,8 +1236,6 @@ int xe_bo_evict_pinned(struct xe_bo *bo) dma_resv_add_fence(bo->ttm.base.resv, fence, DMA_RESV_USAGE_KERNEL); - dma_resv_add_fence(backup->ttm.base.resv, fence, - DMA_RESV_USAGE_KERNEL); dma_fence_put(fence); } else { ret = xe_bo_vmap(backup); @@ -1268,7 +1245,7 @@ int xe_bo_evict_pinned(struct xe_bo *bo) if (iosys_map_is_null(&bo->vmap)) { ret = xe_bo_vmap(bo); if (ret) - goto out_backup; + goto out_vunmap; unmap = true; } @@ -1278,15 +1255,78 @@ int xe_bo_evict_pinned(struct xe_bo *bo) if (!bo->backup_obj) bo->backup_obj = backup; - -out_backup: +out_vunmap: xe_bo_vunmap(backup); - if (ret && backup_created) - xe_bo_put(backup); -out_unlock_bo: +out_backup: if (unmap) xe_bo_vunmap(bo); - xe_bo_unlock(bo); + + return ret; +} + +/** + * xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory + * @bo: The buffer object to move. + * + * On successful completion, the object memory will be moved to system memory. + * + * This is needed to for special handling of pinned VRAM object during + * suspend-resume. + * + * Return: 0 on success. Negative error code on failure. + */ +int xe_bo_evict_pinned(struct xe_bo *bo) +{ + struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); + struct xe_validation_ctx ctx; + struct drm_exec exec; + struct xe_bo *backup = bo->backup_obj; + bool backup_created = false; + int ret = 0; + + xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.exclusive = true}, ret) { + ret = drm_exec_lock_obj(&exec, &bo->ttm.base); + drm_exec_retry_on_contention(&exec); + xe_assert(xe, !ret); + + if (WARN_ON(!bo->ttm.resource)) { + ret = -EINVAL; + break; + } + + if (WARN_ON(!xe_bo_is_pinned(bo))) { + ret = -EINVAL; + break; + } + + if (!xe_bo_is_vram(bo)) + break; + + if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE) + break; + + if (!backup) { + backup = xe_bo_init_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, + xe_bo_size(bo), + DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel, + XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS | + XE_BO_FLAG_PINNED, &exec); + if (IS_ERR(backup)) { + drm_exec_retry_on_contention(&exec); + ret = PTR_ERR(backup); + xe_validation_retry_on_oom(&ctx, &ret); + break; + } + backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */ + backup_created = true; + } + + ret = xe_bo_evict_pinned_copy(bo, backup); + } + + if (ret && backup_created) + xe_bo_put(backup); + return ret; } @@ -1336,10 +1376,6 @@ int xe_bo_restore_pinned(struct xe_bo *bo) if (ret) goto out_unlock_bo; - ret = dma_resv_reserve_fences(backup->ttm.base.resv, 1); - if (ret) - goto out_unlock_bo; - fence = xe_migrate_copy(migrate, backup, bo, backup->ttm.resource, bo->ttm.resource, false); @@ -1350,8 +1386,6 @@ int xe_bo_restore_pinned(struct xe_bo *bo) dma_resv_add_fence(bo->ttm.base.resv, fence, DMA_RESV_USAGE_KERNEL); - dma_resv_add_fence(backup->ttm.base.resv, fence, - DMA_RESV_USAGE_KERNEL); dma_fence_put(fence); } else { ret = xe_bo_vmap(backup); @@ -1502,9 +1536,14 @@ static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo) static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo) { + struct xe_bo *bo = ttm_to_xe_bo(ttm_bo); + if (!xe_bo_is_xe_bo(ttm_bo)) return; + if (IS_VF_CCS_READY(ttm_to_xe_device(ttm_bo->bdev))) + xe_sriov_vf_ccs_detach_bo(bo); + /* * Object is idle and about to be destroyed. Release the * dma-buf attachment. @@ -1686,50 +1725,246 @@ static void xe_gem_object_close(struct drm_gem_object *obj, } } -static vm_fault_t xe_gem_fault(struct vm_fault *vmf) +static bool should_migrate_to_smem(struct xe_bo *bo) +{ + /* + * NOTE: The following atomic checks are platform-specific. For example, + * if a device supports CXL atomics, these may not be necessary or + * may behave differently. + */ + + return bo->attr.atomic_access == DRM_XE_ATOMIC_GLOBAL || + bo->attr.atomic_access == DRM_XE_ATOMIC_CPU; +} + +/* Populate the bo if swapped out, or migrate if the access mode requires that. */ +static int xe_bo_fault_migrate(struct xe_bo *bo, struct ttm_operation_ctx *ctx, + struct drm_exec *exec) +{ + struct ttm_buffer_object *tbo = &bo->ttm; + int err = 0; + + if (ttm_manager_type(tbo->bdev, tbo->resource->mem_type)->use_tt) { + xe_assert(xe_bo_device(bo), + dma_resv_test_signaled(tbo->base.resv, DMA_RESV_USAGE_KERNEL) || + (tbo->ttm && ttm_tt_is_populated(tbo->ttm))); + err = ttm_bo_populate(&bo->ttm, ctx); + } else if (should_migrate_to_smem(bo)) { + xe_assert(xe_bo_device(bo), bo->flags & XE_BO_FLAG_SYSTEM); + err = xe_bo_migrate(bo, XE_PL_TT, ctx, exec); + } + + return err; +} + +/* Call into TTM to populate PTEs, and register bo for PTE removal on runtime suspend. */ +static vm_fault_t __xe_bo_cpu_fault(struct vm_fault *vmf, struct xe_device *xe, struct xe_bo *bo) +{ + vm_fault_t ret; + + trace_xe_bo_cpu_fault(bo); + + ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, + TTM_BO_VM_NUM_PREFAULT); + /* + * When TTM is actually called to insert PTEs, ensure no blocking conditions + * remain, in which case TTM may drop locks and return VM_FAULT_RETRY. + */ + xe_assert(xe, ret != VM_FAULT_RETRY); + + if (ret == VM_FAULT_NOPAGE && + mem_type_is_vram(bo->ttm.resource->mem_type)) { + mutex_lock(&xe->mem_access.vram_userfault.lock); + if (list_empty(&bo->vram_userfault_link)) + list_add(&bo->vram_userfault_link, + &xe->mem_access.vram_userfault.list); + mutex_unlock(&xe->mem_access.vram_userfault.lock); + } + + return ret; +} + +static vm_fault_t xe_err_to_fault_t(int err) +{ + switch (err) { + case 0: + case -EINTR: + case -ERESTARTSYS: + case -EAGAIN: + return VM_FAULT_NOPAGE; + case -ENOMEM: + case -ENOSPC: + return VM_FAULT_OOM; + default: + break; + } + return VM_FAULT_SIGBUS; +} + +static bool xe_ttm_bo_is_imported(struct ttm_buffer_object *tbo) +{ + dma_resv_assert_held(tbo->base.resv); + + return tbo->ttm && + (tbo->ttm->page_flags & (TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE)) == + TTM_TT_FLAG_EXTERNAL; +} + +static vm_fault_t xe_bo_cpu_fault_fastpath(struct vm_fault *vmf, struct xe_device *xe, + struct xe_bo *bo, bool needs_rpm) +{ + struct ttm_buffer_object *tbo = &bo->ttm; + vm_fault_t ret = VM_FAULT_RETRY; + struct xe_validation_ctx ctx; + struct ttm_operation_ctx tctx = { + .interruptible = true, + .no_wait_gpu = true, + .gfp_retry_mayfail = true, + + }; + int err; + + if (needs_rpm && !xe_pm_runtime_get_if_active(xe)) + return VM_FAULT_RETRY; + + err = xe_validation_ctx_init(&ctx, &xe->val, NULL, + (struct xe_val_flags) { + .interruptible = true, + .no_block = true + }); + if (err) + goto out_pm; + + if (!dma_resv_trylock(tbo->base.resv)) + goto out_validation; + + if (xe_ttm_bo_is_imported(tbo)) { + ret = VM_FAULT_SIGBUS; + drm_dbg(&xe->drm, "CPU trying to access an imported buffer object.\n"); + goto out_unlock; + } + + err = xe_bo_fault_migrate(bo, &tctx, NULL); + if (err) { + /* Return VM_FAULT_RETRY on these errors. */ + if (err != -ENOMEM && err != -ENOSPC && err != -EBUSY) + ret = xe_err_to_fault_t(err); + goto out_unlock; + } + + if (dma_resv_test_signaled(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL)) + ret = __xe_bo_cpu_fault(vmf, xe, bo); + +out_unlock: + dma_resv_unlock(tbo->base.resv); +out_validation: + xe_validation_ctx_fini(&ctx); +out_pm: + if (needs_rpm) + xe_pm_runtime_put(xe); + + return ret; +} + +static vm_fault_t xe_bo_cpu_fault(struct vm_fault *vmf) { struct ttm_buffer_object *tbo = vmf->vma->vm_private_data; struct drm_device *ddev = tbo->base.dev; struct xe_device *xe = to_xe_device(ddev); struct xe_bo *bo = ttm_to_xe_bo(tbo); bool needs_rpm = bo->flags & XE_BO_FLAG_VRAM_MASK; + bool retry_after_wait = false; + struct xe_validation_ctx ctx; + struct drm_exec exec; vm_fault_t ret; + int err = 0; int idx; - if (needs_rpm) - xe_pm_runtime_get(xe); + if (!drm_dev_enter(&xe->drm, &idx)) + return ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); - ret = ttm_bo_vm_reserve(tbo, vmf); - if (ret) + ret = xe_bo_cpu_fault_fastpath(vmf, xe, bo, needs_rpm); + if (ret != VM_FAULT_RETRY) goto out; - if (drm_dev_enter(ddev, &idx)) { - trace_xe_bo_cpu_fault(bo); - - ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, - TTM_BO_VM_NUM_PREFAULT); - drm_dev_exit(idx); + if (fault_flag_allow_retry_first(vmf->flags)) { + if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) + goto out; + retry_after_wait = true; + xe_bo_get(bo); + mmap_read_unlock(vmf->vma->vm_mm); } else { - ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); + ret = VM_FAULT_NOPAGE; } - if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) - goto out; /* - * ttm_bo_vm_reserve() already has dma_resv_lock. + * The fastpath failed and we were not required to return and retry immediately. + * We're now running in one of two modes: + * + * 1) retry_after_wait == true: The mmap_read_lock() is dropped, and we're trying + * to resolve blocking waits. But we can't resolve the fault since the + * mmap_read_lock() is dropped. After retrying the fault, the aim is that the fastpath + * should succeed. But it may fail since we drop the bo lock. + * + * 2) retry_after_wait == false: The fastpath failed, typically even after + * a retry. Do whatever's necessary to resolve the fault. + * + * This construct is recommended to avoid excessive waits under the mmap_lock. */ - if (ret == VM_FAULT_NOPAGE && mem_type_is_vram(tbo->resource->mem_type)) { - mutex_lock(&xe->mem_access.vram_userfault.lock); - if (list_empty(&bo->vram_userfault_link)) - list_add(&bo->vram_userfault_link, &xe->mem_access.vram_userfault.list); - mutex_unlock(&xe->mem_access.vram_userfault.lock); + + if (needs_rpm) + xe_pm_runtime_get(xe); + + xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = true}, + err) { + struct ttm_operation_ctx tctx = { + .interruptible = true, + .no_wait_gpu = false, + .gfp_retry_mayfail = retry_after_wait, + }; + long lerr; + + err = drm_exec_lock_obj(&exec, &tbo->base); + drm_exec_retry_on_contention(&exec); + if (err) + break; + + if (xe_ttm_bo_is_imported(tbo)) { + err = -EFAULT; + drm_dbg(&xe->drm, "CPU trying to access an imported buffer object.\n"); + break; + } + + err = xe_bo_fault_migrate(bo, &tctx, &exec); + if (err) { + drm_exec_retry_on_contention(&exec); + xe_validation_retry_on_oom(&ctx, &err); + break; + } + + lerr = dma_resv_wait_timeout(tbo->base.resv, + DMA_RESV_USAGE_KERNEL, true, + MAX_SCHEDULE_TIMEOUT); + if (lerr < 0) { + err = lerr; + break; + } + + if (!retry_after_wait) + ret = __xe_bo_cpu_fault(vmf, xe, bo); } + /* if retry_after_wait == true, we *must* return VM_FAULT_RETRY. */ + if (err && !retry_after_wait) + ret = xe_err_to_fault_t(err); - dma_resv_unlock(tbo->base.resv); -out: if (needs_rpm) xe_pm_runtime_put(xe); + if (retry_after_wait) + xe_bo_put(bo); +out: + drm_dev_exit(idx); + return ret; } @@ -1773,7 +2008,7 @@ int xe_bo_read(struct xe_bo *bo, u64 offset, void *dst, int size) } static const struct vm_operations_struct xe_gem_vm_ops = { - .fault = xe_gem_fault, + .fault = xe_bo_cpu_fault, .open = ttm_bo_vm_open, .close = ttm_bo_vm_close, .access = xe_bo_vm_access, @@ -1821,11 +2056,32 @@ void xe_bo_free(struct xe_bo *bo) kfree(bo); } -struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo, - struct xe_tile *tile, struct dma_resv *resv, - struct ttm_lru_bulk_move *bulk, size_t size, - u16 cpu_caching, enum ttm_bo_type type, - u32 flags) +/** + * xe_bo_init_locked() - Initialize or create an xe_bo. + * @xe: The xe device. + * @bo: An already allocated buffer object or NULL + * if the function should allocate a new one. + * @tile: The tile to select for migration of this bo, and the tile used for + * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos. + * @resv: Pointer to a locked shared reservation object to use fo this bo, + * or NULL for the xe_bo to use its own. + * @bulk: The bulk move to use for LRU bumping, or NULL for external bos. + * @size: The storage size to use for the bo. + * @cpu_caching: The cpu caching used for system memory backing store. + * @type: The TTM buffer object type. + * @flags: XE_BO_FLAG_ flags. + * @exec: The drm_exec transaction to use for exhaustive eviction. + * + * Initialize or create an xe buffer object. On failure, any allocated buffer + * object passed in @bo will have been unreferenced. + * + * Return: The buffer object on success. Negative error pointer on failure. + */ +struct xe_bo *xe_bo_init_locked(struct xe_device *xe, struct xe_bo *bo, + struct xe_tile *tile, struct dma_resv *resv, + struct ttm_lru_bulk_move *bulk, size_t size, + u16 cpu_caching, enum ttm_bo_type type, + u32 flags, struct drm_exec *exec) { struct ttm_operation_ctx ctx = { .interruptible = true, @@ -1894,6 +2150,7 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo, ctx.resv = resv; } + xe_validation_assert_exec(xe, exec, &bo->ttm.base); if (!(flags & XE_BO_FLAG_FIXED_PLACEMENT)) { err = __xe_bo_placement_for_flags(xe, bo, bo->flags); if (WARN_ON(err)) { @@ -1995,7 +2252,7 @@ __xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, u64 start, u64 end, u16 cpu_caching, enum ttm_bo_type type, u32 flags, - u64 alignment) + u64 alignment, struct drm_exec *exec) { struct xe_bo *bo = NULL; int err; @@ -2016,11 +2273,11 @@ __xe_bo_create_locked(struct xe_device *xe, } } - bo = ___xe_bo_create_locked(xe, bo, tile, vm ? xe_vm_resv(vm) : NULL, - vm && !xe_vm_in_fault_mode(vm) && - flags & XE_BO_FLAG_USER ? - &vm->lru_bulk_move : NULL, size, - cpu_caching, type, flags); + bo = xe_bo_init_locked(xe, bo, tile, vm ? xe_vm_resv(vm) : NULL, + vm && !xe_vm_in_fault_mode(vm) && + flags & XE_BO_FLAG_USER ? + &vm->lru_bulk_move : NULL, size, + cpu_caching, type, flags, exec); if (IS_ERR(bo)) return bo; @@ -2054,9 +2311,10 @@ __xe_bo_create_locked(struct xe_device *xe, if (flags & XE_BO_FLAG_FIXED_PLACEMENT) { err = xe_ggtt_insert_bo_at(t->mem.ggtt, bo, - start + xe_bo_size(bo), U64_MAX); + start + xe_bo_size(bo), U64_MAX, + exec); } else { - err = xe_ggtt_insert_bo(t->mem.ggtt, bo); + err = xe_ggtt_insert_bo(t->mem.ggtt, bo, exec); } if (err) goto err_unlock_put_bo; @@ -2073,82 +2331,166 @@ err_unlock_put_bo: return ERR_PTR(err); } -struct xe_bo * -xe_bo_create_locked_range(struct xe_device *xe, - struct xe_tile *tile, struct xe_vm *vm, - size_t size, u64 start, u64 end, - enum ttm_bo_type type, u32 flags, u64 alignment) -{ - return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type, - flags, alignment); -} - +/** + * xe_bo_create_locked() - Create a BO + * @xe: The xe device. + * @tile: The tile to select for migration of this bo, and the tile used for + * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos. + * @vm: The local vm or NULL for external objects. + * @size: The storage size to use for the bo. + * @type: The TTM buffer object type. + * @flags: XE_BO_FLAG_ flags. + * @exec: The drm_exec transaction to use for exhaustive eviction. + * + * Create a locked xe BO with no range- nor alignment restrictions. + * + * Return: The buffer object on success. Negative error pointer on failure. + */ struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, - enum ttm_bo_type type, u32 flags) + enum ttm_bo_type type, u32 flags, + struct drm_exec *exec) { return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type, - flags, 0); + flags, 0, exec); } -struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile, - struct xe_vm *vm, size_t size, - u16 cpu_caching, - u32 flags) +static struct xe_bo *xe_bo_create_novm(struct xe_device *xe, struct xe_tile *tile, + size_t size, u16 cpu_caching, + enum ttm_bo_type type, u32 flags, + u64 alignment, bool intr) { - struct xe_bo *bo = __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, - cpu_caching, ttm_bo_type_device, - flags | XE_BO_FLAG_USER, 0); - if (!IS_ERR(bo)) - xe_bo_unlock_vm_held(bo); + struct xe_validation_ctx ctx; + struct drm_exec exec; + struct xe_bo *bo; + int ret = 0; - return bo; + xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = intr}, + ret) { + bo = __xe_bo_create_locked(xe, tile, NULL, size, 0, ~0ULL, + cpu_caching, type, flags, alignment, &exec); + drm_exec_retry_on_contention(&exec); + if (IS_ERR(bo)) { + ret = PTR_ERR(bo); + xe_validation_retry_on_oom(&ctx, &ret); + } else { + xe_bo_unlock(bo); + } + } + + return ret ? ERR_PTR(ret) : bo; } -struct xe_bo *xe_bo_create(struct xe_device *xe, struct xe_tile *tile, - struct xe_vm *vm, size_t size, - enum ttm_bo_type type, u32 flags) +/** + * xe_bo_create_user() - Create a user BO + * @xe: The xe device. + * @vm: The local vm or NULL for external objects. + * @size: The storage size to use for the bo. + * @cpu_caching: The caching mode to be used for system backing store. + * @flags: XE_BO_FLAG_ flags. + * @exec: The drm_exec transaction to use for exhaustive eviction, or NULL + * if such a transaction should be initiated by the call. + * + * Create a bo on behalf of user-space. + * + * Return: The buffer object on success. Negative error pointer on failure. + */ +struct xe_bo *xe_bo_create_user(struct xe_device *xe, + struct xe_vm *vm, size_t size, + u16 cpu_caching, + u32 flags, struct drm_exec *exec) { - struct xe_bo *bo = xe_bo_create_locked(xe, tile, vm, size, type, flags); + struct xe_bo *bo; + + flags |= XE_BO_FLAG_USER; - if (!IS_ERR(bo)) - xe_bo_unlock_vm_held(bo); + if (vm || exec) { + xe_assert(xe, exec); + bo = __xe_bo_create_locked(xe, NULL, vm, size, 0, ~0ULL, + cpu_caching, ttm_bo_type_device, + flags, 0, exec); + if (!IS_ERR(bo)) + xe_bo_unlock_vm_held(bo); + } else { + bo = xe_bo_create_novm(xe, NULL, size, cpu_caching, + ttm_bo_type_device, flags, 0, true); + } return bo; } -struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile, - struct xe_vm *vm, - size_t size, u64 offset, - enum ttm_bo_type type, u32 flags) +/** + * xe_bo_create_pin_range_novm() - Create and pin a BO with range options. + * @xe: The xe device. + * @tile: The tile to select for migration of this bo, and the tile used for + * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos. + * @size: The storage size to use for the bo. + * @start: Start of fixed VRAM range or 0. + * @end: End of fixed VRAM range or ~0ULL. + * @type: The TTM buffer object type. + * @flags: XE_BO_FLAG_ flags. + * + * Create an Xe BO with range- and options. If @start and @end indicate + * a fixed VRAM range, this must be a ttm_bo_type_kernel bo with VRAM placement + * only. + * + * Return: The buffer object on success. Negative error pointer on failure. + */ +struct xe_bo *xe_bo_create_pin_range_novm(struct xe_device *xe, struct xe_tile *tile, + size_t size, u64 start, u64 end, + enum ttm_bo_type type, u32 flags) { - return xe_bo_create_pin_map_at_aligned(xe, tile, vm, size, offset, - type, flags, 0); + struct xe_validation_ctx ctx; + struct drm_exec exec; + struct xe_bo *bo; + int err = 0; + + xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {}, err) { + bo = __xe_bo_create_locked(xe, tile, NULL, size, start, end, + 0, type, flags, 0, &exec); + if (IS_ERR(bo)) { + drm_exec_retry_on_contention(&exec); + err = PTR_ERR(bo); + xe_validation_retry_on_oom(&ctx, &err); + break; + } + + err = xe_bo_pin(bo, &exec); + xe_bo_unlock(bo); + if (err) { + xe_bo_put(bo); + drm_exec_retry_on_contention(&exec); + xe_validation_retry_on_oom(&ctx, &err); + break; + } + } + + return err ? ERR_PTR(err) : bo; } -struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe, - struct xe_tile *tile, - struct xe_vm *vm, - size_t size, u64 offset, - enum ttm_bo_type type, u32 flags, - u64 alignment) +static struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe, + struct xe_tile *tile, + struct xe_vm *vm, + size_t size, u64 offset, + enum ttm_bo_type type, u32 flags, + u64 alignment, struct drm_exec *exec) { struct xe_bo *bo; int err; u64 start = offset == ~0ull ? 0 : offset; - u64 end = offset == ~0ull ? offset : start + size; + u64 end = offset == ~0ull ? ~0ull : start + size; if (flags & XE_BO_FLAG_STOLEN && xe_ttm_stolen_cpu_access_needs_ggtt(xe)) flags |= XE_BO_FLAG_GGTT; - bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type, - flags | XE_BO_FLAG_NEEDS_CPU_ACCESS | XE_BO_FLAG_PINNED, - alignment); + bo = __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type, + flags | XE_BO_FLAG_NEEDS_CPU_ACCESS | XE_BO_FLAG_PINNED, + alignment, exec); if (IS_ERR(bo)) return bo; - err = xe_bo_pin(bo); + err = xe_bo_pin(bo, exec); if (err) goto err_put; @@ -2168,11 +2510,100 @@ err_put: return ERR_PTR(err); } +/** + * xe_bo_create_pin_map_at_novm() - Create pinned and mapped bo at optional VRAM offset + * @xe: The xe device. + * @tile: The tile to select for migration of this bo, and the tile used for + * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos. + * @size: The storage size to use for the bo. + * @offset: Optional VRAM offset or %~0ull for don't care. + * @type: The TTM buffer object type. + * @flags: XE_BO_FLAG_ flags. + * @alignment: GGTT alignment. + * @intr: Whether to execute any waits for backing store interruptible. + * + * Create a pinned and optionally mapped bo with VRAM offset and GGTT alignment + * options. The bo will be external and not associated with a VM. + * + * Return: The buffer object on success. Negative error pointer on failure. + * In particular, the function may return ERR_PTR(%-EINTR) if @intr was set + * to true on entry. + */ +struct xe_bo * +xe_bo_create_pin_map_at_novm(struct xe_device *xe, struct xe_tile *tile, + size_t size, u64 offset, enum ttm_bo_type type, u32 flags, + u64 alignment, bool intr) +{ + struct xe_validation_ctx ctx; + struct drm_exec exec; + struct xe_bo *bo; + int ret = 0; + + xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = intr}, + ret) { + bo = xe_bo_create_pin_map_at_aligned(xe, tile, NULL, size, offset, + type, flags, alignment, &exec); + if (IS_ERR(bo)) { + drm_exec_retry_on_contention(&exec); + ret = PTR_ERR(bo); + xe_validation_retry_on_oom(&ctx, &ret); + } + } + + return ret ? ERR_PTR(ret) : bo; +} + +/** + * xe_bo_create_pin_map() - Create pinned and mapped bo + * @xe: The xe device. + * @tile: The tile to select for migration of this bo, and the tile used for + * @vm: The vm to associate the buffer object with. The vm's resv must be locked + * with the transaction represented by @exec. + * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos. + * @size: The storage size to use for the bo. + * @type: The TTM buffer object type. + * @flags: XE_BO_FLAG_ flags. + * @exec: The drm_exec transaction to use for exhaustive eviction, and + * previously used for locking @vm's resv. + * + * Create a pinned and mapped bo. The bo will be external and not associated + * with a VM. + * + * Return: The buffer object on success. Negative error pointer on failure. + * In particular, the function may return ERR_PTR(%-EINTR) if @exec was + * configured for interruptible locking. + */ struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, - enum ttm_bo_type type, u32 flags) + enum ttm_bo_type type, u32 flags, + struct drm_exec *exec) +{ + return xe_bo_create_pin_map_at_aligned(xe, tile, vm, size, ~0ull, type, flags, + 0, exec); +} + +/** + * xe_bo_create_pin_map_novm() - Create pinned and mapped bo + * @xe: The xe device. + * @tile: The tile to select for migration of this bo, and the tile used for + * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos. + * @size: The storage size to use for the bo. + * @type: The TTM buffer object type. + * @flags: XE_BO_FLAG_ flags. + * @intr: Whether to execut any waits for backing store interruptible. + * + * Create a pinned and mapped bo. The bo will be external and not associated + * with a VM. + * + * Return: The buffer object on success. Negative error pointer on failure. + * In particular, the function may return ERR_PTR(%-EINTR) if @intr was set + * to true on entry. + */ +struct xe_bo *xe_bo_create_pin_map_novm(struct xe_device *xe, struct xe_tile *tile, + size_t size, enum ttm_bo_type type, u32 flags, + bool intr) { - return xe_bo_create_pin_map_at(xe, tile, vm, size, ~0ull, type, flags); + return xe_bo_create_pin_map_at_novm(xe, tile, size, ~0ull, type, flags, 0, intr); } static void __xe_bo_unpin_map_no_vm(void *arg) @@ -2187,8 +2618,7 @@ struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile int ret; KUNIT_STATIC_STUB_REDIRECT(xe_managed_bo_create_pin_map, xe, tile, size, flags); - - bo = xe_bo_create_pin_map(xe, tile, NULL, size, ttm_bo_type_kernel, flags); + bo = xe_bo_create_pin_map_novm(xe, tile, size, ttm_bo_type_kernel, flags, true); if (IS_ERR(bo)) return bo; @@ -2199,6 +2629,11 @@ struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile return bo; } +void xe_managed_bo_unpin_map_no_vm(struct xe_bo *bo) +{ + devm_release_action(xe_bo_device(bo)->drm.dev, __xe_bo_unpin_map_no_vm, bo); +} + struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile, const void *data, size_t size, u32 flags) { @@ -2270,6 +2705,8 @@ uint64_t vram_region_gpu_offset(struct ttm_resource *res) /** * xe_bo_pin_external - pin an external BO * @bo: buffer object to be pinned + * @in_place: Pin in current placement, don't attempt to migrate. + * @exec: The drm_exec transaction to use for exhaustive eviction. * * Pin an external (not tied to a VM, can be exported via dma-buf / prime FD) * BO. Unique call compared to xe_bo_pin as this function has it own set of @@ -2277,7 +2714,7 @@ uint64_t vram_region_gpu_offset(struct ttm_resource *res) * * Returns 0 for success, negative error code otherwise. */ -int xe_bo_pin_external(struct xe_bo *bo) +int xe_bo_pin_external(struct xe_bo *bo, bool in_place, struct drm_exec *exec) { struct xe_device *xe = xe_bo_device(bo); int err; @@ -2286,9 +2723,11 @@ int xe_bo_pin_external(struct xe_bo *bo) xe_assert(xe, xe_bo_is_user(bo)); if (!xe_bo_is_pinned(bo)) { - err = xe_bo_validate(bo, NULL, false); - if (err) - return err; + if (!in_place) { + err = xe_bo_validate(bo, NULL, false, exec); + if (err) + return err; + } spin_lock(&xe->pinned.lock); list_add_tail(&bo->pinned_link, &xe->pinned.late.external); @@ -2308,7 +2747,17 @@ int xe_bo_pin_external(struct xe_bo *bo) return 0; } -int xe_bo_pin(struct xe_bo *bo) +/** + * xe_bo_pin() - Pin a kernel bo after potentially migrating it + * @bo: The kernel bo to pin. + * @exec: The drm_exec transaction to use for exhaustive eviction. + * + * Attempts to migrate a bo to @bo->placement. If that succeeds, + * pins the bo. + * + * Return: %0 on success, negative error code on migration failure. + */ +int xe_bo_pin(struct xe_bo *bo, struct drm_exec *exec) { struct ttm_place *place = &bo->placements[0]; struct xe_device *xe = xe_bo_device(bo); @@ -2330,7 +2779,7 @@ int xe_bo_pin(struct xe_bo *bo) /* We only expect at most 1 pin */ xe_assert(xe, !xe_bo_is_pinned(bo)); - err = xe_bo_validate(bo, NULL, false); + err = xe_bo_validate(bo, NULL, false, exec); if (err) return err; @@ -2423,6 +2872,7 @@ void xe_bo_unpin(struct xe_bo *bo) * NULL. Used together with @allow_res_evict. * @allow_res_evict: Whether it's allowed to evict bos sharing @vm's * reservation object. + * @exec: The drm_exec transaction to use for exhaustive eviction. * * Make sure the bo is in allowed placement, migrating it if necessary. If * needed, other bos will be evicted. If bos selected for eviction shares @@ -2432,7 +2882,8 @@ void xe_bo_unpin(struct xe_bo *bo) * Return: 0 on success, negative error code on failure. May return * -EINTR or -ERESTARTSYS if internal waits are interrupted by a signal. */ -int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict) +int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict, + struct drm_exec *exec) { struct ttm_operation_ctx ctx = { .interruptible = true, @@ -2441,6 +2892,9 @@ int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict) }; int ret; + if (xe_bo_is_pinned(bo)) + return 0; + if (vm) { lockdep_assert_held(&vm->lock); xe_vm_assert_held(vm); @@ -2451,6 +2905,7 @@ int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict) xe_vm_set_validating(vm, allow_res_evict); trace_xe_bo_validate(bo); + xe_validation_assert_exec(xe_bo_device(bo), exec, &bo->ttm.base); ret = ttm_bo_validate(&bo->ttm, &bo->placement, &ctx); xe_vm_clear_validating(vm, allow_res_evict); @@ -2646,8 +3101,9 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data, struct xe_device *xe = to_xe_device(dev); struct xe_file *xef = to_xe_file(file); struct drm_xe_gem_create *args = data; + struct xe_validation_ctx ctx; + struct drm_exec exec; struct xe_vm *vm = NULL; - ktime_t end = 0; struct xe_bo *bo; unsigned int bo_flags; u32 handle; @@ -2721,25 +3177,26 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data, return -ENOENT; } -retry: - if (vm) { - err = xe_vm_lock(vm, true); - if (err) - goto out_vm; + err = 0; + xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = true}, + err) { + if (vm) { + err = xe_vm_drm_exec_lock(vm, &exec); + drm_exec_retry_on_contention(&exec); + if (err) + break; + } + bo = xe_bo_create_user(xe, vm, args->size, args->cpu_caching, + bo_flags, &exec); + drm_exec_retry_on_contention(&exec); + if (IS_ERR(bo)) { + err = PTR_ERR(bo); + xe_validation_retry_on_oom(&ctx, &err); + break; + } } - - bo = xe_bo_create_user(xe, NULL, vm, args->size, args->cpu_caching, - bo_flags); - - if (vm) - xe_vm_unlock(vm); - - if (IS_ERR(bo)) { - err = PTR_ERR(bo); - if (xe_vm_validate_should_retry(NULL, err, &end)) - goto retry; + if (err) goto out_vm; - } if (args->extensions) { err = gem_create_user_extensions(xe, bo, args->extensions, 0); @@ -2888,6 +3345,9 @@ static void xe_place_from_ttm_type(u32 mem_type, struct ttm_place *place) * xe_bo_migrate - Migrate an object to the desired region id * @bo: The buffer object to migrate. * @mem_type: The TTM region type to migrate to. + * @tctx: A pointer to a struct ttm_operation_ctx or NULL if + * a default interruptibe ctx is to be used. + * @exec: The drm_exec transaction to use for exhaustive eviction. * * Attempt to migrate the buffer object to the desired memory region. The * buffer object may not be pinned, and must be locked. @@ -2899,7 +3359,8 @@ static void xe_place_from_ttm_type(u32 mem_type, struct ttm_place *place) * Return: 0 on success. Negative error code on failure. In particular may * return -EINTR or -ERESTARTSYS if signal pending. */ -int xe_bo_migrate(struct xe_bo *bo, u32 mem_type) +int xe_bo_migrate(struct xe_bo *bo, u32 mem_type, struct ttm_operation_ctx *tctx, + struct drm_exec *exec) { struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); struct ttm_operation_ctx ctx = { @@ -2911,6 +3372,7 @@ int xe_bo_migrate(struct xe_bo *bo, u32 mem_type) struct ttm_place requested; xe_bo_assert_held(bo); + tctx = tctx ? tctx : &ctx; if (bo->ttm.resource->mem_type == mem_type) return 0; @@ -2937,19 +3399,22 @@ int xe_bo_migrate(struct xe_bo *bo, u32 mem_type) add_vram(xe, bo, &requested, bo->flags, mem_type, &c); } - return ttm_bo_validate(&bo->ttm, &placement, &ctx); + if (!tctx->no_wait_gpu) + xe_validation_assert_exec(xe_bo_device(bo), exec, &bo->ttm.base); + return ttm_bo_validate(&bo->ttm, &placement, tctx); } /** * xe_bo_evict - Evict an object to evict placement * @bo: The buffer object to migrate. + * @exec: The drm_exec transaction to use for exhaustive eviction. * * On successful completion, the object memory will be moved to evict * placement. This function blocks until the object has been fully moved. * * Return: 0 on success. Negative error code on failure. */ -int xe_bo_evict(struct xe_bo *bo) +int xe_bo_evict(struct xe_bo *bo, struct drm_exec *exec) { struct ttm_operation_ctx ctx = { .interruptible = false, @@ -3109,11 +3574,11 @@ int xe_bo_dumb_create(struct drm_file *file_priv, args->size = ALIGN(mul_u32_u32(args->pitch, args->height), page_size); - bo = xe_bo_create_user(xe, NULL, NULL, args->size, + bo = xe_bo_create_user(xe, NULL, args->size, DRM_XE_GEM_CPU_CACHING_WC, XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) | XE_BO_FLAG_SCANOUT | - XE_BO_FLAG_NEEDS_CPU_ACCESS); + XE_BO_FLAG_NEEDS_CPU_ACCESS, NULL); if (IS_ERR(bo)) return PTR_ERR(bo); |