diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 441 |
1 files changed, 352 insertions, 89 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 3adaa4670103..3e38c5db2987 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -36,14 +36,122 @@ #include <drm/drm_exec.h> #include <drm/drm_gem_ttm_helper.h> #include <drm/ttm/ttm_tt.h> +#include <drm/drm_syncobj.h> #include "amdgpu.h" #include "amdgpu_display.h" #include "amdgpu_dma_buf.h" #include "amdgpu_hmm.h" #include "amdgpu_xgmi.h" +#include "amdgpu_vm.h" -static const struct drm_gem_object_funcs amdgpu_gem_object_funcs; +static int +amdgpu_gem_add_input_fence(struct drm_file *filp, + uint64_t syncobj_handles_array, + uint32_t num_syncobj_handles) +{ + struct dma_fence *fence; + uint32_t *syncobj_handles; + int ret, i; + + if (!num_syncobj_handles) + return 0; + + syncobj_handles = memdup_user(u64_to_user_ptr(syncobj_handles_array), + size_mul(sizeof(uint32_t), num_syncobj_handles)); + if (IS_ERR(syncobj_handles)) + return PTR_ERR(syncobj_handles); + + for (i = 0; i < num_syncobj_handles; i++) { + + if (!syncobj_handles[i]) { + ret = -EINVAL; + goto free_memdup; + } + + ret = drm_syncobj_find_fence(filp, syncobj_handles[i], 0, 0, &fence); + if (ret) + goto free_memdup; + + dma_fence_wait(fence, false); + + /* TODO: optimize async handling */ + dma_fence_put(fence); + } + +free_memdup: + kfree(syncobj_handles); + return ret; +} + +static int +amdgpu_gem_update_timeline_node(struct drm_file *filp, + uint32_t syncobj_handle, + uint64_t point, + struct drm_syncobj **syncobj, + struct dma_fence_chain **chain) +{ + if (!syncobj_handle) + return 0; + + /* Find the sync object */ + *syncobj = drm_syncobj_find(filp, syncobj_handle); + if (!*syncobj) + return -ENOENT; + + if (!point) + return 0; + + /* Allocate the chain node */ + *chain = dma_fence_chain_alloc(); + if (!*chain) { + drm_syncobj_put(*syncobj); + return -ENOMEM; + } + + return 0; +} + +static void +amdgpu_gem_update_bo_mapping(struct drm_file *filp, + struct amdgpu_bo_va *bo_va, + uint32_t operation, + uint64_t point, + struct dma_fence *fence, + struct drm_syncobj *syncobj, + struct dma_fence_chain *chain) +{ + struct amdgpu_bo *bo = bo_va ? bo_va->base.bo : NULL; + struct amdgpu_fpriv *fpriv = filp->driver_priv; + struct amdgpu_vm *vm = &fpriv->vm; + struct dma_fence *last_update; + + if (!syncobj) + return; + + /* Find the last update fence */ + switch (operation) { + case AMDGPU_VA_OP_MAP: + case AMDGPU_VA_OP_REPLACE: + if (bo && (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)) + last_update = vm->last_update; + else + last_update = bo_va->last_pt_update; + break; + case AMDGPU_VA_OP_UNMAP: + case AMDGPU_VA_OP_CLEAR: + last_update = fence; + break; + default: + return; + } + + /* Add fence to timeline */ + if (!point) + drm_syncobj_replace_fence(syncobj, last_update); + else + drm_syncobj_add_point(syncobj, chain, last_update, point); +} static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf) { @@ -87,12 +195,10 @@ static const struct vm_operations_struct amdgpu_gem_vm_ops = { static void amdgpu_gem_object_free(struct drm_gem_object *gobj) { - struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj); + struct amdgpu_bo *aobj = gem_to_amdgpu_bo(gobj); - if (robj) { - amdgpu_hmm_unregister(robj); - amdgpu_bo_unref(&robj); - } + amdgpu_hmm_unregister(aobj); + ttm_bo_fini(&aobj->tbo); } int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, @@ -126,7 +232,6 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, bo = &ubo->bo; *obj = &bo->tbo.base; - (*obj)->funcs = &amdgpu_gem_object_funcs; return 0; } @@ -175,18 +280,28 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj, return -EPERM; if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID && - abo->tbo.base.resv != vm->root.bo->tbo.base.resv) + !amdgpu_vm_is_bo_always_valid(vm, abo)) return -EPERM; r = amdgpu_bo_reserve(abo, false); if (r) return r; + amdgpu_vm_bo_update_shared(abo); bo_va = amdgpu_vm_bo_find(vm, abo); if (!bo_va) bo_va = amdgpu_vm_bo_add(adev, vm, abo); else ++bo_va->ref_count; + + /* attach gfx eviction fence */ + r = amdgpu_eviction_fence_attach(&fpriv->evf_mgr, abo); + if (r) { + DRM_DEBUG_DRIVER("Failed to attach eviction fence to BO\n"); + amdgpu_bo_unreserve(abo); + return r; + } + amdgpu_bo_unreserve(abo); /* Validate and add eviction fence to DMABuf imports with dynamic @@ -202,7 +317,7 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj, */ if (!vm->is_compute_context || !vm->process_info) return 0; - if (!obj->import_attach || + if (!drm_gem_is_imported(obj) || !dma_buf_is_dynamic(obj->import_attach->dmabuf)) return 0; mutex_lock_nested(&vm->process_info->lock, 1); @@ -214,7 +329,7 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj, dev_warn(adev->dev, "validate_and_fence failed: %d\n", r); if (ti) { - dev_warn(adev->dev, "pid %d\n", ti->pid); + dev_warn(adev->dev, "pid %d\n", ti->task.pid); amdgpu_vm_put_task_info(ti); } } @@ -250,11 +365,15 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj, goto out_unlock; } + if (!amdgpu_vm_is_bo_always_valid(vm, bo)) + amdgpu_eviction_fence_detach(&fpriv->evf_mgr, bo); + bo_va = amdgpu_vm_bo_find(vm, bo); if (!bo_va || --bo_va->ref_count) goto out_unlock; amdgpu_vm_bo_del(adev, bo_va); + amdgpu_vm_bo_update_shared(bo); if (!amdgpu_vm_ready(vm)) goto out_unlock; @@ -295,7 +414,7 @@ static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_str return drm_gem_ttm_mmap(obj, vma); } -static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = { +const struct drm_gem_object_funcs amdgpu_gem_object_funcs = { .free = amdgpu_gem_object_free, .open = amdgpu_gem_object_open, .close = amdgpu_gem_object_close, @@ -323,19 +442,8 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, uint32_t handle, initial_domain; int r; - /* reject DOORBELLs until userspace code to use it is available */ - if (args->in.domains & AMDGPU_GEM_DOMAIN_DOORBELL) - return -EINVAL; - /* reject invalid gem flags */ - if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_NO_CPU_ACCESS | - AMDGPU_GEM_CREATE_CPU_GTT_USWC | - AMDGPU_GEM_CREATE_VRAM_CLEARED | - AMDGPU_GEM_CREATE_VM_ALWAYS_VALID | - AMDGPU_GEM_CREATE_EXPLICIT_SYNC | - AMDGPU_GEM_CREATE_ENCRYPTED | - AMDGPU_GEM_CREATE_DISCARDABLE)) + if (flags & ~AMDGPU_GEM_CREATE_SETTABLE_MASK) return -EINVAL; /* reject invalid gem domains */ @@ -347,6 +455,12 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, return -EINVAL; } + /* always clear VRAM */ + flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED; + + if (args->in.domains & AMDGPU_GEM_DOMAIN_MMIO_REMAP) + return -EINVAL; + /* create a gem object to contain this object in */ if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { @@ -417,7 +531,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_amdgpu_gem_userptr *args = data; struct amdgpu_fpriv *fpriv = filp->driver_priv; struct drm_gem_object *gobj; - struct hmm_range *range; + struct amdgpu_hmm_range *range; struct amdgpu_bo *bo; uint32_t handle; int r; @@ -458,15 +572,20 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, goto release_object; if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { - r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, - &range); - if (r) + range = amdgpu_hmm_range_alloc(NULL); + if (unlikely(!range)) + return -ENOMEM; + r = amdgpu_ttm_tt_get_user_pages(bo, range); + if (r) { + amdgpu_hmm_range_free(range); goto release_object; - + } r = amdgpu_bo_reserve(bo, true); if (r) goto user_pages_done; + amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range); + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); amdgpu_bo_unreserve(bo); @@ -482,8 +601,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, user_pages_done: if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) - amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range); - + amdgpu_hmm_range_free(range); release_object: drm_gem_object_put(gobj); @@ -636,18 +754,23 @@ out: * * Update the bo_va directly after setting its address. Errors are not * vital here, so they are not reported back to userspace. + * + * Returns resulting fence if freed BO(s) got cleared from the PT. + * otherwise stub fence in case of error. */ -static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct amdgpu_bo_va *bo_va, - uint32_t operation) +static struct dma_fence * +amdgpu_gem_va_update_vm(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + struct amdgpu_bo_va *bo_va, + uint32_t operation) { + struct dma_fence *fence = dma_fence_get_stub(); int r; if (!amdgpu_vm_ready(vm)) - return; + return fence; - r = amdgpu_vm_clear_freed(adev, vm, NULL); + r = amdgpu_vm_clear_freed(adev, vm, &fence); if (r) goto error; @@ -663,36 +786,8 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, error: if (r && r != -ERESTARTSYS) DRM_ERROR("Couldn't update BO_VA (%d)\n", r); -} -/** - * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags - * - * @adev: amdgpu_device pointer - * @flags: GEM UAPI flags - * - * Returns the GEM UAPI flags mapped into hardware for the ASIC. - */ -uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags) -{ - uint64_t pte_flag = 0; - - if (flags & AMDGPU_VM_PAGE_EXECUTABLE) - pte_flag |= AMDGPU_PTE_EXECUTABLE; - if (flags & AMDGPU_VM_PAGE_READABLE) - pte_flag |= AMDGPU_PTE_READABLE; - if (flags & AMDGPU_VM_PAGE_WRITEABLE) - pte_flag |= AMDGPU_PTE_WRITEABLE; - if (flags & AMDGPU_VM_PAGE_PRT) - pte_flag |= AMDGPU_PTE_PRT; - if (flags & AMDGPU_VM_PAGE_NOALLOC) - pte_flag |= AMDGPU_PTE_NOALLOC; - - if (adev->gmc.gmc_funcs->map_mtype) - pte_flag |= amdgpu_gmc_map_mtype(adev, - flags & AMDGPU_VM_MTYPE_MASK); - - return pte_flag; + return fence; } int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, @@ -711,8 +806,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, struct amdgpu_fpriv *fpriv = filp->driver_priv; struct amdgpu_bo *abo; struct amdgpu_bo_va *bo_va; + struct drm_syncobj *timeline_syncobj = NULL; + struct dma_fence_chain *timeline_chain = NULL; + struct dma_fence *fence; struct drm_exec exec; - uint64_t va_flags; uint64_t vm_size; int r = 0; @@ -772,6 +869,12 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, abo = NULL; } + r = amdgpu_gem_add_input_fence(filp, + args->input_fence_syncobj_handles, + args->num_syncobj_handles); + if (r) + goto error_put_gobj; + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES, 0); drm_exec_until_all_locked(&exec) { @@ -800,12 +903,19 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, bo_va = NULL; } + r = amdgpu_gem_update_timeline_node(filp, + args->vm_timeline_syncobj_out, + args->vm_timeline_point, + &timeline_syncobj, + &timeline_chain); + if (r) + goto error; + switch (args->operation) { case AMDGPU_VA_OP_MAP: - va_flags = amdgpu_gem_va_map_flags(adev, args->flags); r = amdgpu_vm_bo_map(adev, bo_va, args->va_address, args->offset_in_bo, args->map_size, - va_flags); + args->flags); break; case AMDGPU_VA_OP_UNMAP: r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address); @@ -817,20 +927,31 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, args->map_size); break; case AMDGPU_VA_OP_REPLACE: - va_flags = amdgpu_gem_va_map_flags(adev, args->flags); r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address, args->offset_in_bo, args->map_size, - va_flags); + args->flags); break; default: break; } - if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !adev->debug_vm) - amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va, - args->operation); + if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !adev->debug_vm) { + fence = amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va, + args->operation); + + if (timeline_syncobj) + amdgpu_gem_update_bo_mapping(filp, bo_va, + args->operation, + args->vm_timeline_point, + fence, timeline_syncobj, + timeline_chain); + else + dma_fence_put(fence); + + } error: drm_exec_fini(&exec); +error_put_gobj: drm_gem_object_put(gobj); return r; } @@ -838,22 +959,38 @@ error: int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { - struct amdgpu_device *adev = drm_to_adev(dev); struct drm_amdgpu_gem_op *args = data; struct drm_gem_object *gobj; struct amdgpu_vm_bo_base *base; struct amdgpu_bo *robj; + struct drm_exec exec; + struct amdgpu_fpriv *fpriv = filp->driver_priv; int r; + if (args->padding) + return -EINVAL; + gobj = drm_gem_object_lookup(filp, args->handle); if (!gobj) return -ENOENT; robj = gem_to_amdgpu_bo(gobj); - r = amdgpu_bo_reserve(robj, false); - if (unlikely(r)) - goto out; + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT | + DRM_EXEC_IGNORE_DUPLICATES, 0); + drm_exec_until_all_locked(&exec) { + r = drm_exec_lock_obj(&exec, gobj); + drm_exec_retry_on_contention(&exec); + if (r) + goto out_exec; + + if (args->op == AMDGPU_GEM_OP_GET_MAPPING_INFO) { + r = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 0); + drm_exec_retry_on_contention(&exec); + if (r) + goto out_exec; + } + } switch (args->op) { case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: { @@ -864,29 +1001,26 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, info.alignment = robj->tbo.page_alignment << PAGE_SHIFT; info.domains = robj->preferred_domains; info.domain_flags = robj->flags; - amdgpu_bo_unreserve(robj); + drm_exec_fini(&exec); if (copy_to_user(out, &info, sizeof(info))) r = -EFAULT; break; } case AMDGPU_GEM_OP_SET_PLACEMENT: - if (robj->tbo.base.import_attach && + if (drm_gem_is_imported(&robj->tbo.base) && args->value & AMDGPU_GEM_DOMAIN_VRAM) { r = -EINVAL; - amdgpu_bo_unreserve(robj); - break; + goto out_exec; } if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) { r = -EPERM; - amdgpu_bo_unreserve(robj); - break; + goto out_exec; } for (base = robj->vm_bo; base; base = base->next) if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev), amdgpu_ttm_adev(base->vm->root.bo->tbo.bdev))) { r = -EINVAL; - amdgpu_bo_unreserve(robj); - goto out; + goto out_exec; } @@ -898,18 +1032,147 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) - amdgpu_vm_bo_invalidate(adev, robj, true); + amdgpu_vm_bo_invalidate(robj, true); + drm_exec_fini(&exec); + break; + case AMDGPU_GEM_OP_GET_MAPPING_INFO: { + struct amdgpu_bo_va *bo_va = amdgpu_vm_bo_find(&fpriv->vm, robj); + struct drm_amdgpu_gem_vm_entry *vm_entries; + struct amdgpu_bo_va_mapping *mapping; + int num_mappings = 0; + /* + * num_entries is set as an input to the size of the user-allocated array of + * drm_amdgpu_gem_vm_entry stored at args->value. + * num_entries is sent back as output as the number of mappings the bo has. + * If that number is larger than the size of the array, the ioctl must + * be retried. + */ + vm_entries = kvcalloc(args->num_entries, sizeof(*vm_entries), GFP_KERNEL); + if (!vm_entries) + return -ENOMEM; + + amdgpu_vm_bo_va_for_each_valid_mapping(bo_va, mapping) { + if (num_mappings < args->num_entries) { + vm_entries[num_mappings].addr = mapping->start * AMDGPU_GPU_PAGE_SIZE; + vm_entries[num_mappings].size = (mapping->last - mapping->start + 1) * AMDGPU_GPU_PAGE_SIZE; + vm_entries[num_mappings].offset = mapping->offset; + vm_entries[num_mappings].flags = mapping->flags; + } + num_mappings += 1; + } + + amdgpu_vm_bo_va_for_each_invalid_mapping(bo_va, mapping) { + if (num_mappings < args->num_entries) { + vm_entries[num_mappings].addr = mapping->start * AMDGPU_GPU_PAGE_SIZE; + vm_entries[num_mappings].size = (mapping->last - mapping->start + 1) * AMDGPU_GPU_PAGE_SIZE; + vm_entries[num_mappings].offset = mapping->offset; + vm_entries[num_mappings].flags = mapping->flags; + } + num_mappings += 1; + } - amdgpu_bo_unreserve(robj); + drm_exec_fini(&exec); + + if (num_mappings > 0 && num_mappings <= args->num_entries) + if (copy_to_user(u64_to_user_ptr(args->value), vm_entries, num_mappings * sizeof(*vm_entries))) + r = -EFAULT; + + args->num_entries = num_mappings; + + kvfree(vm_entries); break; + } default: - amdgpu_bo_unreserve(robj); + drm_exec_fini(&exec); r = -EINVAL; } -out: drm_gem_object_put(gobj); return r; +out_exec: + drm_exec_fini(&exec); + drm_gem_object_put(gobj); + return r; +} + +/** + * amdgpu_gem_list_handles_ioctl - get information about a process' buffer objects + * + * @dev: drm device pointer + * @data: drm_amdgpu_gem_list_handles + * @filp: drm file pointer + * + * num_entries is set as an input to the size of the entries array. + * num_entries is sent back as output as the number of bos in the process. + * If that number is larger than the size of the array, the ioctl must + * be retried. + * + * Returns: + * 0 for success, -errno for errors. + */ +int amdgpu_gem_list_handles_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp) +{ + struct drm_amdgpu_gem_list_handles *args = data; + struct drm_amdgpu_gem_list_handles_entry *bo_entries; + struct drm_gem_object *gobj; + int id, ret = 0; + int bo_index = 0; + int num_bos = 0; + + spin_lock(&filp->table_lock); + idr_for_each_entry(&filp->object_idr, gobj, id) + num_bos += 1; + spin_unlock(&filp->table_lock); + + if (args->num_entries < num_bos) { + args->num_entries = num_bos; + return 0; + } + + if (num_bos == 0) { + args->num_entries = 0; + return 0; + } + + bo_entries = kvcalloc(num_bos, sizeof(*bo_entries), GFP_KERNEL); + if (!bo_entries) + return -ENOMEM; + + spin_lock(&filp->table_lock); + idr_for_each_entry(&filp->object_idr, gobj, id) { + struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); + struct drm_amdgpu_gem_list_handles_entry *bo_entry; + + if (bo_index >= num_bos) { + ret = -EAGAIN; + break; + } + + bo_entry = &bo_entries[bo_index]; + + bo_entry->size = amdgpu_bo_size(bo); + bo_entry->alloc_flags = bo->flags & AMDGPU_GEM_CREATE_SETTABLE_MASK; + bo_entry->preferred_domains = bo->preferred_domains; + bo_entry->gem_handle = id; + bo_entry->alignment = bo->tbo.page_alignment; + + if (bo->tbo.base.import_attach) + bo_entry->flags |= AMDGPU_GEM_LIST_HANDLES_FLAG_IS_IMPORT; + + bo_index += 1; + } + spin_unlock(&filp->table_lock); + + args->num_entries = bo_index; + + if (!ret) + if (copy_to_user(u64_to_user_ptr(args->entries), bo_entries, num_bos * sizeof(*bo_entries))) + ret = -EFAULT; + + kvfree(bo_entries); + + return ret; } static int amdgpu_gem_align_pitch(struct amdgpu_device *adev, |
