summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
diff options
context:
space:
mode:
authorAndres Rodriguez <andresx7@gmail.com>2017-03-06 16:27:55 -0500
committerAlex Deucher <alexander.deucher@amd.com>2017-05-31 16:49:02 -0400
commit795f2813e628bcf57a69f2dfe413360d14a1d7f4 (patch)
treef60bfe602590fde4bd170c263a569cd8147ffdd0 /drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
parenteffd924d2f3b9c52d5bd8137c3803e83f719a290 (diff)
drm/amdgpu: implement lru amdgpu_queue_mgr policy for compute v4
Use an LRU policy to map usermode rings to HW compute queues. Most compute clients use one queue, and usually the first queue available. This results in poor pipe/queue work distribution when multiple compute apps are running. In most cases pipe 0 queue 0 is the only queue that gets used. In order to better distribute work across multiple HW queues, we adopt a policy to map the usermode ring ids to the LRU HW queue. This fixes a large majority of multi-app compute workloads sharing the same HW queue, even though 7 other queues are available. v2: use ring->funcs->type instead of ring->hw_ip v3: remove amdgpu_queue_mapper_funcs v4: change ring_lru_list_lock to spinlock, grab only once in lru_get() Signed-off-by: Andres Rodriguez <andresx7@gmail.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h4
1 files changed, 4 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 334307efac8b..577528a9af0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -154,6 +154,7 @@ struct amdgpu_ring {
const struct amdgpu_ring_funcs *funcs;
struct amdgpu_fence_driver fence_drv;
struct amd_gpu_scheduler sched;
+ struct list_head lru_list;
struct amdgpu_bo *ring_obj;
volatile uint32_t *ring;
@@ -200,6 +201,9 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned ring_size, struct amdgpu_irq_src *irq_src,
unsigned irq_type);
void amdgpu_ring_fini(struct amdgpu_ring *ring);
+int amdgpu_ring_lru_get(struct amdgpu_device *adev, int hw_ip,
+ struct amdgpu_ring **ring);
+void amdgpu_ring_lru_touch(struct amdgpu_device *adev, struct amdgpu_ring *ring);
static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
{
int i = 0;