summaryrefslogtreecommitdiff
path: root/include/drm
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2020-01-18 13:00:23 +1000
committerDave Airlie <airlied@redhat.com>2020-01-18 13:00:24 +1000
commitdf95968ff78931576ac7a3d3b30312894aaaf22e (patch)
tree8bd80d7f3314854c52e04d446c3394af4d415a1e /include/drm
parent71e7274066c646bb3d9da39d2f4db0a6404c0a2d (diff)
parent7b19914383fc008a6b51871f18da72cf9aa43cae (diff)
Merge tag 'amd-drm-next-5.6-2020-01-17' of git://people.freedesktop.org/~agd5f/linux into drm-next
amd-drm-next-5.6-2020-01-17: amdgpu: - Fix 32 bit harder - Powerplay cleanups - VCN fixes for Arcturus - RAS fixes - eDP/DP fixes - SR-IOV fixes - Re-enable S/G display for PCO/RV2 - Free stolen memory after init on gmc10 - DF hashing optimizations for Arcturus - Properly handle runtime pm in sysfs and debugfs - Unify more GC programming between amdgpu and amdkfd - Golden settings updates for gfx10 - GDDR6 training fixes - Freesync fixes - DSC fixes - TMDS fixes - Renoir USB-C fixes - DC dml updates from hw team - Pollock support - Mutex init regresson fix amdkfd: - Unify more GC programming between amdgpu and amdkfd - Use KIQ to setup HIQ rather than using MMIO scheduler: - Documentation fixes - Improve job distribution with load sharing drm: - DP MST fix Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexdeucher@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200117213625.4722-1-alexander.deucher@amd.com
Diffstat (limited to 'include/drm')
-rw-r--r--include/drm/gpu_scheduler.h16
1 files changed, 8 insertions, 8 deletions
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 96a1a1b7526e..9e71be129c30 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -52,9 +52,9 @@ enum drm_sched_priority {
* @list: used to append this struct to the list of entities in the
* runqueue.
* @rq: runqueue on which this entity is currently scheduled.
- * @rq_list: a list of run queues on which jobs from this entity can
- * be scheduled
- * @num_rq_list: number of run queues in the rq_list
+ * @sched_list: a list of drm_gpu_schedulers on which jobs from this entity can
+ * be scheduled
+ * @num_sched_list: number of drm_gpu_schedulers in the sched_list.
* @rq_lock: lock to modify the runqueue to which this entity belongs.
* @job_queue: the list of jobs of this entity.
* @fence_seq: a linearly increasing seqno incremented with each
@@ -81,8 +81,8 @@ enum drm_sched_priority {
struct drm_sched_entity {
struct list_head list;
struct drm_sched_rq *rq;
- unsigned int num_sched_list;
struct drm_gpu_scheduler **sched_list;
+ unsigned int num_sched_list;
enum drm_sched_priority priority;
spinlock_t rq_lock;
@@ -261,7 +261,7 @@ struct drm_sched_backend_ops {
* @job_list_lock: lock to protect the ring_mirror_list.
* @hang_limit: once the hangs by a job crosses this limit then it is marked
* guilty and it will be considered for scheduling further.
- * @num_jobs: the number of jobs in queue in the scheduler
+ * @score: score to help loadbalancer pick a idle sched
* @ready: marks if the underlying HW is ready to work
* @free_guilty: A hit to time out handler to free the guilty job.
*
@@ -282,8 +282,8 @@ struct drm_gpu_scheduler {
struct list_head ring_mirror_list;
spinlock_t job_list_lock;
int hang_limit;
- atomic_t num_jobs;
- bool ready;
+ atomic_t score;
+ bool ready;
bool free_guilty;
};
@@ -315,7 +315,7 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
int drm_sched_entity_init(struct drm_sched_entity *entity,
enum drm_sched_priority priority,
struct drm_gpu_scheduler **sched_list,
- unsigned int num_rq_list,
+ unsigned int num_sched_list,
atomic_t *guilty);
long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
void drm_sched_entity_fini(struct drm_sched_entity *entity);