From 02d170e264f1a982aa7ecb5d77ffcbb72f1060fd Mon Sep 17 00:00:00 2001 From: Fengguang Wu Date: Fri, 5 Jan 2018 07:06:46 +0800 Subject: drm/amdgpu: fix semicolon.cocci warnings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c:281:2-3: Unneeded semicolon Remove unneeded semicolon. Generated by: scripts/coccinelle/misc/semicolon.cocci Fixes: 620f774f4687 ("drm/amdgpu: separate VMID and PASID handling") CC: Christian König Signed-off-by: Fengguang Wu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 16884a0b677b..5248a3232aff 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -278,7 +278,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, else goto no_flush_needed; - }; + } /* Still no ID to use? Then use the idle one found earlier */ id = idle; -- cgit From 4b5f75504975b42377fa198b0672c9a52eb728e0 Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 5 Jan 2018 11:16:22 +0100 Subject: drm/amdgpu: add amdgpu_pasid_free_delayed v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Free up a pasid after all fences signaled. v2: also handle the case when we can't allocate a fence array. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 82 +++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 5248a3232aff..842caa5ed73b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -40,6 +40,12 @@ */ static DEFINE_IDA(amdgpu_pasid_ida); +/* Helper to free pasid from a fence callback */ +struct amdgpu_pasid_cb { + struct dma_fence_cb cb; + unsigned int pasid; +}; + /** * amdgpu_pasid_alloc - Allocate a PASID * @bits: Maximum width of the PASID in bits, must be at least 1 @@ -75,6 +81,82 @@ void amdgpu_pasid_free(unsigned int pasid) ida_simple_remove(&amdgpu_pasid_ida, pasid); } +static void amdgpu_pasid_free_cb(struct dma_fence *fence, + struct dma_fence_cb *_cb) +{ + struct amdgpu_pasid_cb *cb = + container_of(_cb, struct amdgpu_pasid_cb, cb); + + amdgpu_pasid_free(cb->pasid); + dma_fence_put(fence); + kfree(cb); +} + +/** + * amdgpu_pasid_free_delayed - free pasid when fences signal + * + * @resv: reservation object with the fences to wait for + * @pasid: pasid to free + * + * Free the pasid only after all the fences in resv are signaled. + */ +void amdgpu_pasid_free_delayed(struct reservation_object *resv, + unsigned int pasid) +{ + struct dma_fence *fence, **fences; + struct amdgpu_pasid_cb *cb; + unsigned count; + int r; + + r = reservation_object_get_fences_rcu(resv, NULL, &count, &fences); + if (r) + goto fallback; + + if (count == 0) { + amdgpu_pasid_free(pasid); + return; + } + + if (count == 1) { + fence = fences[0]; + kfree(fences); + } else { + uint64_t context = dma_fence_context_alloc(1); + struct dma_fence_array *array; + + array = dma_fence_array_create(count, fences, context, + 1, false); + if (!array) { + kfree(fences); + goto fallback; + } + fence = &array->base; + } + + cb = kmalloc(sizeof(*cb), GFP_KERNEL); + if (!cb) { + /* Last resort when we are OOM */ + dma_fence_wait(fence, false); + dma_fence_put(fence); + amdgpu_pasid_free(pasid); + } else { + cb->pasid = pasid; + if (dma_fence_add_callback(fence, &cb->cb, + amdgpu_pasid_free_cb)) + amdgpu_pasid_free_cb(fence, &cb->cb); + } + + return; + +fallback: + /* Not enough memory for the delayed delete, as last resort + * block for all the fences to complete. + */ + reservation_object_wait_timeout_rcu(resv, true, false, + MAX_SCHEDULE_TIMEOUT); + amdgpu_pasid_free(pasid); +} + /* * VMID manager * -- cgit From c35ff18823f877459b2b4ace61fb08a9b56106f9 Mon Sep 17 00:00:00 2001 From: Christian König Date: Tue, 9 Jan 2018 19:32:58 +0100 Subject: drm/amdgpu: trace allocated PASIDs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Trace all allocated PASIDs. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 842caa5ed73b..3b9d318cf166 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -69,6 +69,9 @@ int amdgpu_pasid_alloc(unsigned int bits) break; } + if (pasid >= 0) + trace_amdgpu_pasid_allocated(pasid); + return pasid; } @@ -78,6 +81,7 @@ int amdgpu_pasid_alloc(unsigned int bits) */ void amdgpu_pasid_free(unsigned int pasid) { + trace_amdgpu_pasid_freed(pasid); ida_simple_remove(&amdgpu_pasid_ida, pasid); } -- cgit From 5a4633c4b880cf8d1fe7df9c55766205cf9bc295 Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 8 Jan 2018 14:48:11 +0100 Subject: drm/amdgpu: forward pasid to backend flush implementations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit rd the pasid from the VM code to the emit_vm_flush function and update all implementations with the new parameter. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 3b9d318cf166..c13cf7e79b2e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -237,6 +237,7 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm, id->last_flush = NULL; } job->vmid = id - id_mgr->ids; + job->pasid = vm->pasid; trace_amdgpu_vm_grab_id(vm, ring, job); out: return r; @@ -388,6 +389,7 @@ no_flush_needed: list_move_tail(&id->list, &id_mgr->ids_lru); job->vmid = id - id_mgr->ids; + job->pasid = vm->pasid; trace_amdgpu_vm_grab_id(vm, ring, job); error: -- cgit From 8fe27f8fa08b30c2c515b06209c9de52b392a116 Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 31 Jan 2018 10:16:26 +0100 Subject: drm/amdgpu: make VMID assignment more fair v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Similar to finding an idle one before reuse. This guarantees fairness between processes. Otherwise process with a reserved VMID have an unfair advantage while scheduling jobs. v2: improve commit message Signed-off-by: Christian König Acked-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index c13cf7e79b2e..7a3d0de7425d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -268,11 +268,6 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, int r = 0; mutex_lock(&id_mgr->lock); - if (vm->reserved_vmid[vmhub]) { - r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync, fence, job); - mutex_unlock(&id_mgr->lock); - return r; - } fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL); if (!fences) { mutex_unlock(&id_mgr->lock); @@ -319,6 +314,13 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, } kfree(fences); + if (vm->reserved_vmid[vmhub]) { + r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync, + fence, job); + mutex_unlock(&id_mgr->lock); + return r; + } + job->vm_needs_flush = vm->use_cpu_for_update; /* Check if we can use a VMID already assigned to this VM */ list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) { -- cgit From 3a80e92b6eca179aab145ae997df0c424d8851ba Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 31 Jan 2018 11:10:19 +0100 Subject: drm/amdgpu: split finding idle VMID into separate function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No functional change, but makes it easier to maintain the code. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 116 +++++++++++++++++++------------- 1 file changed, 69 insertions(+), 47 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 7a3d0de7425d..fbe958f7cb5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -182,6 +182,72 @@ bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, atomic_read(&adev->gpu_reset_counter); } +/** + * amdgpu_vm_grab_idle - grab idle VMID + * + * @vm: vm to allocate id for + * @ring: ring we want to submit job to + * @sync: sync object where we add dependencies + * @idle: resulting idle VMID + * + * Try to find an idle VMID, if none is idle add a fence to wait to the sync + * object. Returns -ENOMEM when we are out of memory. + */ +static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm, + struct amdgpu_ring *ring, + struct amdgpu_sync *sync, + struct amdgpu_vmid **idle) +{ + struct amdgpu_device *adev = ring->adev; + unsigned vmhub = ring->funcs->vmhub; + struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; + struct dma_fence **fences; + unsigned i; + int r; + + fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL); + if (!fences) + return -ENOMEM; + + /* Check if we have an idle VMID */ + i = 0; + list_for_each_entry((*idle), &id_mgr->ids_lru, list) { + fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, ring); + if (!fences[i]) + break; + ++i; + } + + /* If we can't find a idle VMID to use, wait till one becomes available */ + if (&(*idle)->list == &id_mgr->ids_lru) { + u64 fence_context = adev->vm_manager.fence_context + ring->idx; + unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; + struct dma_fence_array *array; + unsigned j; + + *idle = NULL; + for (j = 0; j < i; ++j) + dma_fence_get(fences[j]); + + array = dma_fence_array_create(i, fences, fence_context, + seqno, true); + if (!array) { + for (j = 0; j < i; ++j) + dma_fence_put(fences[j]); + kfree(fences); + return -ENOMEM; + } + + r = amdgpu_sync_fence(adev, sync, &array->base, false); + dma_fence_put(&array->base); + return r; + + } + kfree(fences); + + return 0; +} + /* idr_mgr->lock must be held */ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm, struct amdgpu_ring *ring, @@ -263,56 +329,12 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, uint64_t fence_context = adev->fence_context + ring->idx; struct dma_fence *updates = sync->last_vm_update; struct amdgpu_vmid *id, *idle; - struct dma_fence **fences; - unsigned i; int r = 0; mutex_lock(&id_mgr->lock); - fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL); - if (!fences) { - mutex_unlock(&id_mgr->lock); - return -ENOMEM; - } - /* Check if we have an idle VMID */ - i = 0; - list_for_each_entry(idle, &id_mgr->ids_lru, list) { - fences[i] = amdgpu_sync_peek_fence(&idle->active, ring); - if (!fences[i]) - break; - ++i; - } - - /* If we can't find a idle VMID to use, wait till one becomes available */ - if (&idle->list == &id_mgr->ids_lru) { - u64 fence_context = adev->vm_manager.fence_context + ring->idx; - unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; - struct dma_fence_array *array; - unsigned j; - - for (j = 0; j < i; ++j) - dma_fence_get(fences[j]); - - array = dma_fence_array_create(i, fences, fence_context, - seqno, true); - if (!array) { - for (j = 0; j < i; ++j) - dma_fence_put(fences[j]); - kfree(fences); - r = -ENOMEM; - goto error; - } - - - r = amdgpu_sync_fence(ring->adev, sync, &array->base, false); - dma_fence_put(&array->base); - if (r) - goto error; - - mutex_unlock(&id_mgr->lock); - return 0; - - } - kfree(fences); + r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle); + if (r || !idle) + goto error; if (vm->reserved_vmid[vmhub]) { r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync, -- cgit From 102374488dd608a9dd3fda962b9bb725848f8c3b Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 31 Jan 2018 11:17:56 +0100 Subject: drm/amdgpu: make VMID owner none atomic v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The variable is protected by the VMID mutex anyway. v2: grab the mutex while resetting the VMID as well Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index fbe958f7cb5b..ac31740d1cd3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -267,7 +267,7 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm, flushed = id->flushed_updates; if ((amdgpu_vmid_had_gpu_reset(adev, id)) || - (atomic64_read(&id->owner) != vm->entity.fence_context) || + (id->owner != vm->entity.fence_context) || (job->vm_pd_addr != id->pd_gpu_addr) || (updates && (!flushed || updates->context != flushed->context || dma_fence_is_later(updates, flushed))) || @@ -296,7 +296,7 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm, id->flushed_updates = dma_fence_get(updates); } id->pd_gpu_addr = job->vm_pd_addr; - atomic64_set(&id->owner, vm->entity.fence_context); + id->owner = vm->entity.fence_context; job->vm_needs_flush = needs_flush; if (needs_flush) { dma_fence_put(id->last_flush); @@ -353,7 +353,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, if (amdgpu_vmid_had_gpu_reset(adev, id)) continue; - if (atomic64_read(&id->owner) != vm->entity.fence_context) + if (id->owner != vm->entity.fence_context) continue; if (job->vm_pd_addr != id->pd_gpu_addr) @@ -402,7 +402,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, id->pd_gpu_addr = job->vm_pd_addr; dma_fence_put(id->flushed_updates); id->flushed_updates = dma_fence_get(updates); - atomic64_set(&id->owner, vm->entity.fence_context); + id->owner = vm->entity.fence_context; needs_flush: job->vm_needs_flush = true; @@ -482,13 +482,15 @@ void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub, struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; struct amdgpu_vmid *id = &id_mgr->ids[vmid]; - atomic64_set(&id->owner, 0); + mutex_lock(&id_mgr->lock); + id->owner = 0; id->gds_base = 0; id->gds_size = 0; id->gws_base = 0; id->gws_size = 0; id->oa_base = 0; id->oa_size = 0; + mutex_unlock(&id_mgr->lock); } /** -- cgit From 782dcfdfd9c1c30e0b8304915297cef665a9867c Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 31 Jan 2018 11:21:23 +0100 Subject: drm/amdgpu: stop checking GPU reset counter during VMID grab MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We do this later on when we flush the VMID anyway. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index ac31740d1cd3..5761a659baf2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -266,8 +266,7 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm, bool needs_flush = vm->use_cpu_for_update; flushed = id->flushed_updates; - if ((amdgpu_vmid_had_gpu_reset(adev, id)) || - (id->owner != vm->entity.fence_context) || + if ((id->owner != vm->entity.fence_context) || (job->vm_pd_addr != id->pd_gpu_addr) || (updates && (!flushed || updates->context != flushed->context || dma_fence_is_later(updates, flushed))) || @@ -350,9 +349,6 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, bool needs_flush = vm->use_cpu_for_update; /* Check all the prerequisites to using this VMID */ - if (amdgpu_vmid_had_gpu_reset(adev, id)) - continue; - if (id->owner != vm->entity.fence_context) continue; -- cgit From cb5372ace9a3eaf9c0f8cd950bf68c2f3ce1c511 Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 31 Jan 2018 11:56:53 +0100 Subject: drm/amdgpu: cleanup and simplify amdgpu_vmid_grab_reserved MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Drop the "_locked" from the name, cleanup and simplify the logic a bit. Add missing comments. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 58 ++++++++++++++++++++------------- 1 file changed, 35 insertions(+), 23 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 5761a659baf2..51633fc1598a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -248,12 +248,22 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm, return 0; } -/* idr_mgr->lock must be held */ -static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm, - struct amdgpu_ring *ring, - struct amdgpu_sync *sync, - struct dma_fence *fence, - struct amdgpu_job *job) +/** + * amdgpu_vm_grab_reserved - try to assign reserved VMID + * + * @vm: vm to allocate id for + * @ring: ring we want to submit job to + * @sync: sync object where we add dependencies + * @fence: fence protecting ID from reuse + * @job: job who wants to use the VMID + * + * Try to assign a reserved VMID. + */ +static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, + struct amdgpu_ring *ring, + struct amdgpu_sync *sync, + struct dma_fence *fence, + struct amdgpu_job *job) { struct amdgpu_device *adev = ring->adev; unsigned vmhub = ring->funcs->vmhub; @@ -261,18 +271,21 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm, struct amdgpu_vmid *id = vm->reserved_vmid[vmhub]; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; struct dma_fence *updates = sync->last_vm_update; - int r = 0; - struct dma_fence *flushed, *tmp; bool needs_flush = vm->use_cpu_for_update; + int r = 0; + + if (updates && id->flushed_updates && + updates->context == id->flushed_updates->context && + !dma_fence_is_later(updates, id->flushed_updates)) + updates = NULL; + + if (id->owner != vm->entity.fence_context || + job->vm_pd_addr != id->pd_gpu_addr || + updates || !id->last_flush || + (id->last_flush->context != fence_context && + !dma_fence_is_signaled(id->last_flush))) { + struct dma_fence *tmp; - flushed = id->flushed_updates; - if ((id->owner != vm->entity.fence_context) || - (job->vm_pd_addr != id->pd_gpu_addr) || - (updates && (!flushed || updates->context != flushed->context || - dma_fence_is_later(updates, flushed))) || - (!id->last_flush || (id->last_flush->context != fence_context && - !dma_fence_is_signaled(id->last_flush)))) { - needs_flush = true; /* to prevent one context starved by another context */ id->pd_gpu_addr = 0; tmp = amdgpu_sync_peek_fence(&id->active, ring); @@ -280,6 +293,7 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm, r = amdgpu_sync_fence(adev, sync, tmp, false); return r; } + needs_flush = true; } /* Good we can use this VMID. Remember this submission as @@ -287,10 +301,9 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm, */ r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); if (r) - goto out; + return r; - if (updates && (!flushed || updates->context != flushed->context || - dma_fence_is_later(updates, flushed))) { + if (updates) { dma_fence_put(id->flushed_updates); id->flushed_updates = dma_fence_get(updates); } @@ -304,8 +317,7 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm, job->vmid = id - id_mgr->ids; job->pasid = vm->pasid; trace_amdgpu_vm_grab_id(vm, ring, job); -out: - return r; + return 0; } /** @@ -315,6 +327,7 @@ out: * @ring: ring we want to submit job to * @sync: sync object where we add dependencies * @fence: fence protecting ID from reuse + * @job: job who wants to use the VMID * * Allocate an id for the vm, adding fences to the sync obj as necessary. */ @@ -336,8 +349,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, goto error; if (vm->reserved_vmid[vmhub]) { - r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync, - fence, job); + r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job); mutex_unlock(&id_mgr->lock); return r; } -- cgit From 25ddf75bb3aa5e1f47eb9c04f0d50bf37269702b Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 31 Jan 2018 13:35:25 +0100 Subject: drm/amdgpu: move reusing VMIDs into separate function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's try this once more. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 130 ++++++++++++++++++++------------ 1 file changed, 81 insertions(+), 49 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 51633fc1598a..0a9789604c77 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -321,58 +321,51 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, } /** - * amdgpu_vm_grab_id - allocate the next free VMID + * amdgpu_vm_grab_used - try to reuse a VMID * * @vm: vm to allocate id for * @ring: ring we want to submit job to * @sync: sync object where we add dependencies * @fence: fence protecting ID from reuse * @job: job who wants to use the VMID + * @id: resulting VMID * - * Allocate an id for the vm, adding fences to the sync obj as necessary. + * Try to reuse a VMID for this submission. */ -int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, - struct amdgpu_sync *sync, struct dma_fence *fence, - struct amdgpu_job *job) +static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, + struct amdgpu_ring *ring, + struct amdgpu_sync *sync, + struct dma_fence *fence, + struct amdgpu_job *job, + struct amdgpu_vmid **id) { struct amdgpu_device *adev = ring->adev; unsigned vmhub = ring->funcs->vmhub; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; uint64_t fence_context = adev->fence_context + ring->idx; struct dma_fence *updates = sync->last_vm_update; - struct amdgpu_vmid *id, *idle; - int r = 0; - - mutex_lock(&id_mgr->lock); - r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle); - if (r || !idle) - goto error; - - if (vm->reserved_vmid[vmhub]) { - r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job); - mutex_unlock(&id_mgr->lock); - return r; - } + int r; job->vm_needs_flush = vm->use_cpu_for_update; + /* Check if we can use a VMID already assigned to this VM */ - list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) { - struct dma_fence *flushed; + list_for_each_entry_reverse((*id), &id_mgr->ids_lru, list) { bool needs_flush = vm->use_cpu_for_update; + struct dma_fence *flushed; /* Check all the prerequisites to using this VMID */ - if (id->owner != vm->entity.fence_context) + if ((*id)->owner != vm->entity.fence_context) continue; - if (job->vm_pd_addr != id->pd_gpu_addr) + if ((*id)->pd_gpu_addr != job->vm_pd_addr) continue; - if (!id->last_flush || - (id->last_flush->context != fence_context && - !dma_fence_is_signaled(id->last_flush))) + if (!(*id)->last_flush || + ((*id)->last_flush->context != fence_context && + !dma_fence_is_signaled((*id)->last_flush))) needs_flush = true; - flushed = id->flushed_updates; + flushed = (*id)->flushed_updates; if (updates && (!flushed || dma_fence_is_later(updates, flushed))) needs_flush = true; @@ -380,44 +373,83 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, if (adev->asic_type < CHIP_VEGA10 && needs_flush) continue; - /* Good we can use this VMID. Remember this submission as + /* Good, we can use this VMID. Remember this submission as * user of the VMID. */ - r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); + r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false); if (r) - goto error; + return r; if (updates && (!flushed || dma_fence_is_later(updates, flushed))) { - dma_fence_put(id->flushed_updates); - id->flushed_updates = dma_fence_get(updates); + dma_fence_put((*id)->flushed_updates); + (*id)->flushed_updates = dma_fence_get(updates); } - if (needs_flush) - goto needs_flush; - else - goto no_flush_needed; - + job->vm_needs_flush |= needs_flush; + return 0; } - /* Still no ID to use? Then use the idle one found earlier */ - id = idle; + *id = NULL; + return 0; +} - /* Remember this submission as user of the VMID */ - r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); +/** + * amdgpu_vm_grab_id - allocate the next free VMID + * + * @vm: vm to allocate id for + * @ring: ring we want to submit job to + * @sync: sync object where we add dependencies + * @fence: fence protecting ID from reuse + * @job: job who wants to use the VMID + * + * Allocate an id for the vm, adding fences to the sync obj as necessary. + */ +int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, + struct amdgpu_sync *sync, struct dma_fence *fence, + struct amdgpu_job *job) +{ + struct amdgpu_device *adev = ring->adev; + unsigned vmhub = ring->funcs->vmhub; + struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; + struct dma_fence *updates = sync->last_vm_update; + struct amdgpu_vmid *id, *idle; + int r = 0; + + mutex_lock(&id_mgr->lock); + r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle); + if (r || !idle) + goto error; + + if (vm->reserved_vmid[vmhub]) { + r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job); + mutex_unlock(&id_mgr->lock); + return r; + } + + r = amdgpu_vmid_grab_used(vm, ring, sync, fence, job, &id); if (r) goto error; - id->pd_gpu_addr = job->vm_pd_addr; - dma_fence_put(id->flushed_updates); - id->flushed_updates = dma_fence_get(updates); - id->owner = vm->entity.fence_context; + if (!id) { + /* Still no ID to use? Then use the idle one found earlier */ + id = idle; -needs_flush: - job->vm_needs_flush = true; - dma_fence_put(id->last_flush); - id->last_flush = NULL; + /* Remember this submission as user of the VMID */ + r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); + if (r) + goto error; -no_flush_needed: + id->pd_gpu_addr = job->vm_pd_addr; + dma_fence_put(id->flushed_updates); + id->flushed_updates = dma_fence_get(updates); + id->owner = vm->entity.fence_context; + job->vm_needs_flush = true; + } + + if (job->vm_needs_flush) { + dma_fence_put(id->last_flush); + id->last_flush = NULL; + } list_move_tail(&id->list, &id_mgr->ids_lru); job->vmid = id - id_mgr->ids; -- cgit From 58592a095c981a002137221205411f538b9f0fb9 Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 31 Jan 2018 14:24:45 +0100 Subject: drm/amdgpu: restructure amdgpu_vmid_grab MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now that we have the different cases for grabbing a VMID in separate functions, restructure the top level function to only have one place where VMIDs are assigned to jobs. Signed-off-by: Christian König Acked-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 89 ++++++++++++++++----------------- 1 file changed, 42 insertions(+), 47 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 0a9789604c77..156e026046b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -263,33 +263,34 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_sync *sync, struct dma_fence *fence, - struct amdgpu_job *job) + struct amdgpu_job *job, + struct amdgpu_vmid **id) { struct amdgpu_device *adev = ring->adev; unsigned vmhub = ring->funcs->vmhub; uint64_t fence_context = adev->fence_context + ring->idx; - struct amdgpu_vmid *id = vm->reserved_vmid[vmhub]; - struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; struct dma_fence *updates = sync->last_vm_update; bool needs_flush = vm->use_cpu_for_update; int r = 0; - if (updates && id->flushed_updates && - updates->context == id->flushed_updates->context && - !dma_fence_is_later(updates, id->flushed_updates)) + *id = vm->reserved_vmid[vmhub]; + if (updates && (*id)->flushed_updates && + updates->context == (*id)->flushed_updates->context && + !dma_fence_is_later(updates, (*id)->flushed_updates)) updates = NULL; - if (id->owner != vm->entity.fence_context || - job->vm_pd_addr != id->pd_gpu_addr || - updates || !id->last_flush || - (id->last_flush->context != fence_context && - !dma_fence_is_signaled(id->last_flush))) { + if ((*id)->owner != vm->entity.fence_context || + job->vm_pd_addr != (*id)->pd_gpu_addr || + updates || !(*id)->last_flush || + ((*id)->last_flush->context != fence_context && + !dma_fence_is_signaled((*id)->last_flush))) { struct dma_fence *tmp; /* to prevent one context starved by another context */ - id->pd_gpu_addr = 0; - tmp = amdgpu_sync_peek_fence(&id->active, ring); + (*id)->pd_gpu_addr = 0; + tmp = amdgpu_sync_peek_fence(&(*id)->active, ring); if (tmp) { + *id = NULL; r = amdgpu_sync_fence(adev, sync, tmp, false); return r; } @@ -299,24 +300,15 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, /* Good we can use this VMID. Remember this submission as * user of the VMID. */ - r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); + r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false); if (r) return r; if (updates) { - dma_fence_put(id->flushed_updates); - id->flushed_updates = dma_fence_get(updates); + dma_fence_put((*id)->flushed_updates); + (*id)->flushed_updates = dma_fence_get(updates); } - id->pd_gpu_addr = job->vm_pd_addr; - id->owner = vm->entity.fence_context; job->vm_needs_flush = needs_flush; - if (needs_flush) { - dma_fence_put(id->last_flush); - id->last_flush = NULL; - } - job->vmid = id - id_mgr->ids; - job->pasid = vm->pasid; - trace_amdgpu_vm_grab_id(vm, ring, job); return 0; } @@ -411,7 +403,6 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_device *adev = ring->adev; unsigned vmhub = ring->funcs->vmhub; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; - struct dma_fence *updates = sync->last_vm_update; struct amdgpu_vmid *id, *idle; int r = 0; @@ -421,37 +412,41 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, goto error; if (vm->reserved_vmid[vmhub]) { - r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job); - mutex_unlock(&id_mgr->lock); - return r; - } + r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job, &id); + if (r || !id) + goto error; + } else { + r = amdgpu_vmid_grab_used(vm, ring, sync, fence, job, &id); + if (r) + goto error; - r = amdgpu_vmid_grab_used(vm, ring, sync, fence, job, &id); - if (r) - goto error; + if (!id) { + struct dma_fence *updates = sync->last_vm_update; - if (!id) { - /* Still no ID to use? Then use the idle one found earlier */ - id = idle; + /* Still no ID to use? Then use the idle one found earlier */ + id = idle; - /* Remember this submission as user of the VMID */ - r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); - if (r) - goto error; + /* Remember this submission as user of the VMID */ + r = amdgpu_sync_fence(ring->adev, &id->active, + fence, false); + if (r) + goto error; - id->pd_gpu_addr = job->vm_pd_addr; - dma_fence_put(id->flushed_updates); - id->flushed_updates = dma_fence_get(updates); - id->owner = vm->entity.fence_context; - job->vm_needs_flush = true; + dma_fence_put(id->flushed_updates); + id->flushed_updates = dma_fence_get(updates); + job->vm_needs_flush = true; + } + + list_move_tail(&id->list, &id_mgr->ids_lru); } + id->pd_gpu_addr = job->vm_pd_addr; + id->owner = vm->entity.fence_context; + if (job->vm_needs_flush) { dma_fence_put(id->last_flush); id->last_flush = NULL; } - list_move_tail(&id->list, &id_mgr->ids_lru); - job->vmid = id - id_mgr->ids; job->pasid = vm->pasid; trace_amdgpu_vm_grab_id(vm, ring, job); -- cgit From 3af81440a9b02e2c6e244539ee567063c3d62292 Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 31 Jan 2018 16:03:19 +0100 Subject: drm/amdgpu: cache the fence to wait for a VMID MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Beneficial when a lot of processes are waiting for VMIDs. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 156e026046b5..7d2805729c20 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -205,6 +205,9 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm, unsigned i; int r; + if (ring->vmid_wait && !dma_fence_is_signaled(ring->vmid_wait)) + return amdgpu_sync_fence(adev, sync, ring->vmid_wait, false); + fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL); if (!fences) return -ENOMEM; @@ -239,9 +242,9 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm, } r = amdgpu_sync_fence(adev, sync, &array->base, false); - dma_fence_put(&array->base); + dma_fence_put(ring->vmid_wait); + ring->vmid_wait = &array->base; return r; - } kfree(fences); -- cgit From ed024578e703da96c71339a73cf5c3ff4317dd9a Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Fri, 9 Feb 2018 12:15:45 -0500 Subject: drm/amdgpu: Fix potential uninitialized use of 'idle' in amdgpu_ids.c v2: Use NULL and reverse christmas tree ordering Signed-off-by: Harry Wentland Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 7d2805729c20..563e74755aab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -406,7 +406,8 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_device *adev = ring->adev; unsigned vmhub = ring->funcs->vmhub; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; - struct amdgpu_vmid *id, *idle; + struct amdgpu_vmid *idle = NULL; + struct amdgpu_vmid *id = NULL; int r = 0; mutex_lock(&id_mgr->lock); -- cgit From b3cd285fa68d162a53c2eb4e23bc4fc1ab7d97f6 Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 5 Feb 2018 17:38:01 +0100 Subject: drm/amdgpu: update the PASID mapping only on demand MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Updating the PASID is rather heavyweight and shouldn't be done all the time. Signed-off-by: Christian König Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 563e74755aab..a1c78f90eadf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -607,6 +607,7 @@ void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev) amdgpu_sync_free(&id->active); dma_fence_put(id->flushed_updates); dma_fence_put(id->last_flush); + dma_fence_put(id->pasid_mapping); } } } -- cgit