summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler.c3
-rw-r--r--include/drm/gpu_scheduler.h2
2 files changed, 5 insertions, 0 deletions
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index afffb9a60cdb..3b9969190927 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -530,6 +530,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
trace_drm_sched_job(sched_job, entity);
+ atomic_inc(&entity->rq->sched->num_jobs);
WRITE_ONCE(entity->last_user, current->group_leader);
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
@@ -821,6 +822,7 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
dma_fence_get(&s_fence->finished);
atomic_dec(&sched->hw_rq_count);
+ atomic_dec(&sched->num_jobs);
drm_sched_fence_finished(s_fence);
trace_drm_sched_process_job(s_fence);
@@ -938,6 +940,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
INIT_LIST_HEAD(&sched->ring_mirror_list);
spin_lock_init(&sched->job_list_lock);
atomic_set(&sched->hw_rq_count, 0);
+ atomic_set(&sched->num_jobs, 0);
atomic64_set(&sched->job_id_count, 0);
/* Each scheduler will run on a seperate kernel thread */
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 2419887e25eb..0c4cfe689d4c 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -262,6 +262,7 @@ struct drm_sched_backend_ops {
* @job_list_lock: lock to protect the ring_mirror_list.
* @hang_limit: once the hangs by a job crosses this limit then it is marked
* guilty and it will be considered for scheduling further.
+ * @num_jobs: the number of jobs in queue in the scheduler
*
* One scheduler is implemented for each hardware ring.
*/
@@ -279,6 +280,7 @@ struct drm_gpu_scheduler {
struct list_head ring_mirror_list;
spinlock_t job_list_lock;
int hang_limit;
+ atomic_t num_jobs;
};
int drm_sched_init(struct drm_gpu_scheduler *sched,