summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/scheduler/sched_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/scheduler/sched_main.c')
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c44
1 files changed, 39 insertions, 5 deletions
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 92d8de24d0a1..a2a953693b45 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -314,6 +314,7 @@ static void drm_sched_job_timedout(struct work_struct *work)
{
struct drm_gpu_scheduler *sched;
struct drm_sched_job *job;
+ enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_NOMINAL;
sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
@@ -331,7 +332,7 @@ static void drm_sched_job_timedout(struct work_struct *work)
list_del_init(&job->list);
spin_unlock(&sched->job_list_lock);
- job->sched->ops->timedout_job(job);
+ status = job->sched->ops->timedout_job(job);
/*
* Guilty job did complete and hence needs to be manually removed
@@ -345,9 +346,11 @@ static void drm_sched_job_timedout(struct work_struct *work)
spin_unlock(&sched->job_list_lock);
}
- spin_lock(&sched->job_list_lock);
- drm_sched_start_timeout(sched);
- spin_unlock(&sched->job_list_lock);
+ if (status != DRM_GPU_SCHED_STAT_ENODEV) {
+ spin_lock(&sched->job_list_lock);
+ drm_sched_start_timeout(sched);
+ spin_unlock(&sched->job_list_lock);
+ }
}
/**
@@ -671,7 +674,7 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
static struct drm_sched_job *
drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
{
- struct drm_sched_job *job;
+ struct drm_sched_job *job, *next;
/*
* Don't destroy jobs while the timeout worker is running OR thread
@@ -690,6 +693,13 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
/* remove job from pending_list */
list_del_init(&job->list);
+ /* make the scheduled timestamp more accurate */
+ next = list_first_entry_or_null(&sched->pending_list,
+ typeof(*next), list);
+ if (next)
+ next->s_fence->scheduled.timestamp =
+ job->s_fence->finished.timestamp;
+
} else {
job = NULL;
/* queue timeout for next job */
@@ -888,9 +898,33 @@ EXPORT_SYMBOL(drm_sched_init);
*/
void drm_sched_fini(struct drm_gpu_scheduler *sched)
{
+ struct drm_sched_entity *s_entity;
+ int i;
+
if (sched->thread)
kthread_stop(sched->thread);
+ for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+ struct drm_sched_rq *rq = &sched->sched_rq[i];
+
+ if (!rq)
+ continue;
+
+ spin_lock(&rq->lock);
+ list_for_each_entry(s_entity, &rq->entities, list)
+ /*
+ * Prevents reinsertion and marks job_queue as idle,
+ * it will removed from rq in drm_sched_entity_fini
+ * eventually
+ */
+ s_entity->stopped = true;
+ spin_unlock(&rq->lock);
+
+ }
+
+ /* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */
+ wake_up_all(&sched->job_scheduled);
+
/* Confirm no work left behind accessing device structures */
cancel_delayed_work_sync(&sched->work_tdr);