summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2022-09-28 14:00:57 +0200
committerChristian König <christian.koenig@amd.com>2022-11-03 12:45:19 +0100
commit940ca22b7ea9db6857ba7c6adb961b84d8cc28ff (patch)
treef03d5ac59c0fc42773ea8495c95fd98fdc63f8aa /drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
parentc5093cddf56baceb1545028e8a5971d94cf59d25 (diff)
drm/amdgpu: drop amdgpu_sync from amdgpu_vmid_grab v2
Instead return the fence directly. Avoids memory allocation to store the fence. v2: cleanup coding style as well Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Luben Tuikov <luben.tuikov@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20221014084641.128280-6-christian.koenig@amd.com
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c42
1 files changed, 22 insertions, 20 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index b76294d4275b..2a9a2593dc18 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -170,26 +170,27 @@ bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
*
* @vm: vm to allocate id for
* @ring: ring we want to submit job to
- * @sync: sync object where we add dependencies
* @idle: resulting idle VMID
+ * @fence: fence to wait for if no id could be grabbed
*
* Try to find an idle VMID, if none is idle add a fence to wait to the sync
* object. Returns -ENOMEM when we are out of memory.
*/
static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
struct amdgpu_ring *ring,
- struct amdgpu_sync *sync,
- struct amdgpu_vmid **idle)
+ struct amdgpu_vmid **idle,
+ struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct dma_fence **fences;
unsigned i;
- int r;
- if (!dma_fence_is_signaled(ring->vmid_wait))
- return amdgpu_sync_fence(sync, ring->vmid_wait);
+ if (!dma_fence_is_signaled(ring->vmid_wait)) {
+ *fence = dma_fence_get(ring->vmid_wait);
+ return 0;
+ }
fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_KERNEL);
if (!fences)
@@ -228,10 +229,10 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
return -ENOMEM;
}
- r = amdgpu_sync_fence(sync, &array->base);
+ *fence = dma_fence_get(&array->base);
dma_fence_put(ring->vmid_wait);
ring->vmid_wait = &array->base;
- return r;
+ return 0;
}
kfree(fences);
@@ -243,17 +244,17 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
*
* @vm: vm to allocate id for
* @ring: ring we want to submit job to
- * @sync: sync object where we add dependencies
* @job: job who wants to use the VMID
* @id: resulting VMID
+ * @fence: fence to wait for if no id could be grabbed
*
* Try to assign a reserved VMID.
*/
static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
struct amdgpu_ring *ring,
- struct amdgpu_sync *sync,
struct amdgpu_job *job,
- struct amdgpu_vmid **id)
+ struct amdgpu_vmid **id,
+ struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub;
@@ -280,7 +281,8 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
if (tmp) {
*id = NULL;
- return amdgpu_sync_fence(sync, tmp);
+ *fence = dma_fence_get(tmp);
+ return 0;
}
needs_flush = true;
}
@@ -302,17 +304,17 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
*
* @vm: vm to allocate id for
* @ring: ring we want to submit job to
- * @sync: sync object where we add dependencies
* @job: job who wants to use the VMID
* @id: resulting VMID
+ * @fence: fence to wait for if no id could be grabbed
*
* Try to reuse a VMID for this submission.
*/
static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
struct amdgpu_ring *ring,
- struct amdgpu_sync *sync,
struct amdgpu_job *job,
- struct amdgpu_vmid **id)
+ struct amdgpu_vmid **id,
+ struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub;
@@ -367,13 +369,13 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
*
* @vm: vm to allocate id for
* @ring: ring we want to submit job to
- * @sync: sync object where we add dependencies
* @job: job who wants to use the VMID
+ * @fence: fence to wait for if no id could be grabbed
*
* Allocate an id for the vm, adding fences to the sync obj as necessary.
*/
int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
- struct amdgpu_sync *sync, struct amdgpu_job *job)
+ struct amdgpu_job *job, struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub;
@@ -383,16 +385,16 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
int r = 0;
mutex_lock(&id_mgr->lock);
- r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle);
+ r = amdgpu_vmid_grab_idle(vm, ring, &idle, fence);
if (r || !idle)
goto error;
if (vm->reserved_vmid[vmhub]) {
- r = amdgpu_vmid_grab_reserved(vm, ring, sync, job, &id);
+ r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence);
if (r || !id)
goto error;
} else {
- r = amdgpu_vmid_grab_used(vm, ring, sync, job, &id);
+ r = amdgpu_vmid_grab_used(vm, ring, job, &id, fence);
if (r)
goto error;