diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 187 |
1 files changed, 99 insertions, 88 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 3f07b1a2ce47..9cab36322c16 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -62,9 +62,8 @@ int amdgpu_pasid_alloc(unsigned int bits) int pasid = -EINVAL; for (bits = min(bits, 31U); bits > 0; bits--) { - pasid = ida_simple_get(&amdgpu_pasid_ida, - 1U << (bits - 1), 1U << bits, - GFP_KERNEL); + pasid = ida_alloc_range(&amdgpu_pasid_ida, 1U << (bits - 1), + (1U << bits) - 1, GFP_KERNEL); if (pasid != -ENOSPC) break; } @@ -82,7 +81,7 @@ int amdgpu_pasid_alloc(unsigned int bits) void amdgpu_pasid_free(u32 pasid) { trace_amdgpu_pasid_freed(pasid); - ida_simple_remove(&amdgpu_pasid_ida, pasid); + ida_free(&amdgpu_pasid_ida, pasid); } static void amdgpu_pasid_free_cb(struct dma_fence *fence, @@ -188,7 +187,6 @@ static bool amdgpu_vmid_compatible(struct amdgpu_vmid *id, /** * amdgpu_vmid_grab_idle - grab idle VMID * - * @vm: vm to allocate id for * @ring: ring we want to submit job to * @idle: resulting idle VMID * @fence: fence to wait for if no id could be grabbed @@ -196,66 +194,41 @@ static bool amdgpu_vmid_compatible(struct amdgpu_vmid *id, * Try to find an idle VMID, if none is idle add a fence to wait to the sync * object. Returns -ENOMEM when we are out of memory. */ -static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm, - struct amdgpu_ring *ring, +static int amdgpu_vmid_grab_idle(struct amdgpu_ring *ring, struct amdgpu_vmid **idle, struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; - unsigned vmhub = ring->funcs->vmhub; + unsigned vmhub = ring->vm_hub; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; - struct dma_fence **fences; - unsigned i; + /* If anybody is waiting for a VMID let everybody wait for fairness */ if (!dma_fence_is_signaled(ring->vmid_wait)) { *fence = dma_fence_get(ring->vmid_wait); return 0; } - fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_KERNEL); - if (!fences) - return -ENOMEM; - /* Check if we have an idle VMID */ - i = 0; - list_for_each_entry((*idle), &id_mgr->ids_lru, list) { + list_for_each_entry_reverse((*idle), &id_mgr->ids_lru, list) { /* Don't use per engine and per process VMID at the same time */ struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ? NULL : ring; - fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r); - if (!fences[i]) - break; - ++i; + *fence = amdgpu_sync_peek_fence(&(*idle)->active, r); + if (!(*fence)) + return 0; } - /* If we can't find a idle VMID to use, wait till one becomes available */ - if (&(*idle)->list == &id_mgr->ids_lru) { - u64 fence_context = adev->vm_manager.fence_context + ring->idx; - unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; - struct dma_fence_array *array; - unsigned j; - - *idle = NULL; - for (j = 0; j < i; ++j) - dma_fence_get(fences[j]); - - array = dma_fence_array_create(i, fences, fence_context, - seqno, true); - if (!array) { - for (j = 0; j < i; ++j) - dma_fence_put(fences[j]); - kfree(fences); - return -ENOMEM; - } - - *fence = dma_fence_get(&array->base); - dma_fence_put(ring->vmid_wait); - ring->vmid_wait = &array->base; - return 0; - } - kfree(fences); + /* + * If we can't find a idle VMID to use, wait on a fence from the least + * recently used in the hope that it will be available soon. + */ + *idle = NULL; + dma_fence_put(ring->vmid_wait); + ring->vmid_wait = dma_fence_get(*fence); + /* This is the reference we return */ + dma_fence_get(*fence); return 0; } @@ -277,23 +250,28 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; - unsigned vmhub = ring->funcs->vmhub; - struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; + unsigned vmhub = ring->vm_hub; uint64_t fence_context = adev->fence_context + ring->idx; bool needs_flush = vm->use_cpu_for_update; uint64_t updates = amdgpu_vm_tlb_seq(vm); int r; - *id = id_mgr->reserved; + *id = vm->reserved_vmid[vmhub]; if ((*id)->owner != vm->immediate.fence_context || !amdgpu_vmid_compatible(*id, job) || (*id)->flushed_updates < updates || !(*id)->last_flush || ((*id)->last_flush->context != fence_context && - !dma_fence_is_signaled((*id)->last_flush))) { + !dma_fence_is_signaled((*id)->last_flush))) + needs_flush = true; + + if ((*id)->owner != vm->immediate.fence_context || + (!adev->vm_manager.concurrent_flush && needs_flush)) { struct dma_fence *tmp; - /* Don't use per engine and per process VMID at the same time */ + /* Don't use per engine and per process VMID at the + * same time + */ if (adev->vm_manager.concurrent_flush) ring = NULL; @@ -305,13 +283,13 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, *fence = dma_fence_get(tmp); return 0; } - needs_flush = true; } /* Good we can use this VMID. Remember this submission as * user of the VMID. */ - r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished); + r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished, + GFP_ATOMIC); if (r) return r; @@ -327,18 +305,16 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, * @ring: ring we want to submit job to * @job: job who wants to use the VMID * @id: resulting VMID - * @fence: fence to wait for if no id could be grabbed * * Try to reuse a VMID for this submission. */ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_job *job, - struct amdgpu_vmid **id, - struct dma_fence **fence) + struct amdgpu_vmid **id) { struct amdgpu_device *adev = ring->adev; - unsigned vmhub = ring->funcs->vmhub; + unsigned vmhub = ring->vm_hub; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; uint64_t fence_context = adev->fence_context + ring->idx; uint64_t updates = amdgpu_vm_tlb_seq(vm); @@ -372,7 +348,8 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, * user of the VMID. */ r = amdgpu_sync_fence(&(*id)->active, - &job->base.s_fence->finished); + &job->base.s_fence->finished, + GFP_ATOMIC); if (r) return r; @@ -398,23 +375,23 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_job *job, struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; - unsigned vmhub = ring->funcs->vmhub; + unsigned vmhub = ring->vm_hub; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; struct amdgpu_vmid *idle = NULL; struct amdgpu_vmid *id = NULL; int r = 0; mutex_lock(&id_mgr->lock); - r = amdgpu_vmid_grab_idle(vm, ring, &idle, fence); + r = amdgpu_vmid_grab_idle(ring, &idle, fence); if (r || !idle) goto error; - if (vm->reserved_vmid[vmhub]) { + if (amdgpu_vmid_uses_reserved(vm, vmhub)) { r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence); if (r || !id) goto error; } else { - r = amdgpu_vmid_grab_used(vm, ring, job, &id, fence); + r = amdgpu_vmid_grab_used(vm, ring, job, &id); if (r) goto error; @@ -424,7 +401,8 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, /* Remember this submission as user of the VMID */ r = amdgpu_sync_fence(&id->active, - &job->base.s_fence->finished); + &job->base.s_fence->finished, + GFP_ATOMIC); if (r) goto error; @@ -459,47 +437,73 @@ error: return r; } -int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, - struct amdgpu_vm *vm, +/* + * amdgpu_vmid_uses_reserved - check if a VM will use a reserved VMID + * @vm: the VM to check + * @vmhub: the VMHUB which will be used + * + * Returns: True if the VM will use a reserved VMID. + */ +bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub) +{ + return vm->reserved_vmid[vmhub]; +} + +/* + * amdgpu_vmid_alloc_reserved - reserve a specific VMID for this vm + * @adev: amdgpu device structure + * @vm: the VM to reserve an ID for + * @vmhub: the VMHUB which should be used + * + * Mostly used to have a reserved VMID for debugging and SPM. + * + * Returns: 0 for success, -ENOENT if an ID is already reserved. + */ +int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned vmhub) { struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; + struct amdgpu_vmid *id; + int r = 0; mutex_lock(&id_mgr->lock); if (vm->reserved_vmid[vmhub]) goto unlock; - - ++id_mgr->reserved_use_count; - if (!id_mgr->reserved) { - struct amdgpu_vmid *id; - - id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, - list); - /* Remove from normal round robin handling */ - list_del_init(&id->list); - id_mgr->reserved = id; + if (id_mgr->reserved_vmid) { + r = -ENOENT; + goto unlock; } - vm->reserved_vmid[vmhub] = true; + /* Remove from normal round robin handling */ + id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list); + list_del_init(&id->list); + vm->reserved_vmid[vmhub] = id; + id_mgr->reserved_vmid = true; + mutex_unlock(&id_mgr->lock); + return 0; unlock: mutex_unlock(&id_mgr->lock); - return 0; + return r; } -void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, - struct amdgpu_vm *vm, +/* + * amdgpu_vmid_free_reserved - free up a reserved VMID again + * @adev: amdgpu device structure + * @vm: the VM with the reserved ID + * @vmhub: the VMHUB which should be used + */ +void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned vmhub) { struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; mutex_lock(&id_mgr->lock); - if (vm->reserved_vmid[vmhub] && - !--id_mgr->reserved_use_count) { - /* give the reserved ID back to normal round robin */ - list_add(&id_mgr->reserved->list, &id_mgr->ids_lru); - id_mgr->reserved = NULL; + if (vm->reserved_vmid[vmhub]) { + list_add(&vm->reserved_vmid[vmhub]->list, + &id_mgr->ids_lru); + vm->reserved_vmid[vmhub] = NULL; + id_mgr->reserved_vmid = false; } - vm->reserved_vmid[vmhub] = false; mutex_unlock(&id_mgr->lock); } @@ -566,10 +570,17 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev) mutex_init(&id_mgr->lock); INIT_LIST_HEAD(&id_mgr->ids_lru); - id_mgr->reserved_use_count = 0; - /* manage only VMIDs not used by KFD */ - id_mgr->num_ids = adev->vm_manager.first_kfd_vmid; + /* for GC <10, SDMA uses MMHUB so use first_kfd_vmid for both GC and MM */ + if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 0, 0)) + /* manage only VMIDs not used by KFD */ + id_mgr->num_ids = adev->vm_manager.first_kfd_vmid; + else if (AMDGPU_IS_MMHUB0(i) || + AMDGPU_IS_MMHUB1(i)) + id_mgr->num_ids = 16; + else + /* manage only VMIDs not used by KFD */ + id_mgr->num_ids = adev->vm_manager.first_kfd_vmid; /* skip over VMID 0, since it is the system VM */ for (j = 1; j < id_mgr->num_ids; ++j) { |
