summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2016-06-01 10:47:36 +0200
committerAlex Deucher <alexander.deucher@amd.com>2016-07-07 14:51:23 -0400
commit1fbb2e929902ab6e161ebcfb2f4d6de1c4613473 (patch)
tree12738147113fb6731365826c1bd5e80d1cdf4079 /drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
parent8d76001e0058aca129f599810d4f60e36fb36c5b (diff)
drm/amdgpu: use a fence array for VMID management
Just wait for any fence to become available, instead of waiting for the last entry of the LRU. Acked-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Christian König <christian.koenig@amd.com> Acked-by: Chunming Zhou <david1.zhou@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c69
1 files changed, 13 insertions, 56 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index a2766d72b2da..5c8d3022fb87 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -223,16 +223,16 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
}
/**
- * amdgpu_sync_is_idle - test if all fences are signaled
+ * amdgpu_sync_peek_fence - get the next fence not signaled yet
*
* @sync: the sync object
* @ring: optional ring to use for test
*
- * Returns true if all fences in the sync object are signaled or scheduled to
- * the ring (if provided).
+ * Returns the next fence not signaled yet without removing it from the sync
+ * object.
*/
-bool amdgpu_sync_is_idle(struct amdgpu_sync *sync,
- struct amdgpu_ring *ring)
+struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
+ struct amdgpu_ring *ring)
{
struct amdgpu_sync_entry *e;
struct hlist_node *tmp;
@@ -246,68 +246,25 @@ bool amdgpu_sync_is_idle(struct amdgpu_sync *sync,
/* For fences from the same ring it is sufficient
* when they are scheduled.
*/
- if (s_fence->sched == &ring->sched &&
- fence_is_signaled(&s_fence->scheduled))
- continue;
- }
+ if (s_fence->sched == &ring->sched) {
+ if (fence_is_signaled(&s_fence->scheduled))
+ continue;
- if (fence_is_signaled(f)) {
- hash_del(&e->node);
- fence_put(f);
- kmem_cache_free(amdgpu_sync_slab, e);
- continue;
+ return &s_fence->scheduled;
+ }
}
- return false;
- }
-
- return true;
-}
-
-/**
- * amdgpu_sync_cycle_fences - move fences from one sync object into another
- *
- * @dst: the destination sync object
- * @src: the source sync object
- * @fence: fence to add to source
- *
- * Remove all fences from source and put them into destination and add
- * fence as new one into source.
- */
-int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src,
- struct fence *fence)
-{
- struct amdgpu_sync_entry *e, *newone;
- struct hlist_node *tmp;
- int i;
-
- /* Allocate the new entry before moving the old ones */
- newone = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
- if (!newone)
- return -ENOMEM;
-
- hash_for_each_safe(src->fences, i, tmp, e, node) {
- struct fence *f = e->fence;
-
- hash_del(&e->node);
if (fence_is_signaled(f)) {
+ hash_del(&e->node);
fence_put(f);
kmem_cache_free(amdgpu_sync_slab, e);
continue;
}
- if (amdgpu_sync_add_later(dst, f)) {
- kmem_cache_free(amdgpu_sync_slab, e);
- continue;
- }
-
- hash_add(dst->fences, &e->node, f->context);
+ return f;
}
- hash_add(src->fences, &newone->node, fence->context);
- newone->fence = fence_get(fence);
-
- return 0;
+ return NULL;
}
/**