summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/scheduler/sched_main.c
diff options
context:
space:
mode:
authorMatthew Brost <matthew.brost@intel.com>2023-10-30 20:24:38 -0700
committerLuben Tuikov <ltuikov89@gmail.com>2023-11-01 17:29:22 -0400
commit7a36dcfa16a5a7a87f65e03e1a3eb2b5e2fca812 (patch)
tree2e9e73d5759fbd909674c3af8ecc7a4be850998b /drivers/gpu/drm/scheduler/sched_main.c
parentf7fe64ad0f22ff034f8ebcfbd7299ee9cc9b57d7 (diff)
drm/sched: Add drm_sched_start_timeout_unlocked helper
Also add a lockdep assert to drm_sched_start_timeout. Signed-off-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Luben Tuikov <luben.tuikov@amd.com> Link: https://lore.kernel.org/r/20231031032439.1558703-5-matthew.brost@intel.com Signed-off-by: Luben Tuikov <ltuikov89@gmail.com>
Diffstat (limited to 'drivers/gpu/drm/scheduler/sched_main.c')
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c23
1 files changed, 13 insertions, 10 deletions
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 3b1b2f8eafe8..fc387de5a0c7 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -334,11 +334,20 @@ static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
*/
static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
{
+ lockdep_assert_held(&sched->job_list_lock);
+
if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
!list_empty(&sched->pending_list))
queue_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout);
}
+static void drm_sched_start_timeout_unlocked(struct drm_gpu_scheduler *sched)
+{
+ spin_lock(&sched->job_list_lock);
+ drm_sched_start_timeout(sched);
+ spin_unlock(&sched->job_list_lock);
+}
+
/**
* drm_sched_fault - immediately start timeout handler
*
@@ -451,11 +460,8 @@ static void drm_sched_job_timedout(struct work_struct *work)
spin_unlock(&sched->job_list_lock);
}
- if (status != DRM_GPU_SCHED_STAT_ENODEV) {
- spin_lock(&sched->job_list_lock);
- drm_sched_start_timeout(sched);
- spin_unlock(&sched->job_list_lock);
- }
+ if (status != DRM_GPU_SCHED_STAT_ENODEV)
+ drm_sched_start_timeout_unlocked(sched);
}
/**
@@ -581,11 +587,8 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
drm_sched_job_done(s_job, -ECANCELED);
}
- if (full_recovery) {
- spin_lock(&sched->job_list_lock);
- drm_sched_start_timeout(sched);
- spin_unlock(&sched->job_list_lock);
- }
+ if (full_recovery)
+ drm_sched_start_timeout_unlocked(sched);
drm_sched_wqueue_start(sched);
}