diff options
author | Matthew Brost <matthew.brost@intel.com> | 2023-10-30 20:24:37 -0700 |
---|---|---|
committer | Luben Tuikov <ltuikov89@gmail.com> | 2023-11-01 17:29:21 -0400 |
commit | f7fe64ad0f22ff034f8ebcfbd7299ee9cc9b57d7 (patch) | |
tree | fe5c8e7dabcaf9a34e039a27f66e3b60524fd948 /drivers/gpu/drm/scheduler | |
parent | a6149f0393699308fb00149be913044977bceb56 (diff) |
drm/sched: Split free_job into own work item
Rather than call free_job and run_job in same work item have a dedicated
work item for each. This aligns with the design and intended use of work
queues.
v2:
- Test for DMA_FENCE_FLAG_TIMESTAMP_BIT before setting
timestamp in free_job() work item (Danilo)
v3:
- Drop forward dec of drm_sched_select_entity (Boris)
- Return in drm_sched_run_job_work if entity NULL (Boris)
v4:
- Replace dequeue with peek and invert logic (Luben)
- Wrap to 100 lines (Luben)
- Update comments for *_queue / *_queue_if_ready functions (Luben)
v5:
- Drop peek argument, blindly reinit idle (Luben)
- s/drm_sched_free_job_queue_if_ready/drm_sched_free_job_queue_if_done (Luben)
- Update work_run_job & work_free_job kernel doc (Luben)
v6:
- Do not move drm_sched_select_entity in file (Luben)
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Link: https://lore.kernel.org/r/20231031032439.1558703-4-matthew.brost@intel.com
Reviewed-by: Luben Tuikov <ltuikov89@gmail.com>
Signed-off-by: Luben Tuikov <ltuikov89@gmail.com>
Diffstat (limited to 'drivers/gpu/drm/scheduler')
-rw-r--r-- | drivers/gpu/drm/scheduler/sched_main.c | 146 |
1 files changed, 98 insertions, 48 deletions
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index d1ae05bded15..3b1b2f8eafe8 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -266,6 +266,32 @@ static void drm_sched_run_job_queue(struct drm_gpu_scheduler *sched) } /** + * drm_sched_free_job_queue - enqueue free-job work + * @sched: scheduler instance + */ +static void drm_sched_free_job_queue(struct drm_gpu_scheduler *sched) +{ + if (!READ_ONCE(sched->pause_submit)) + queue_work(sched->submit_wq, &sched->work_free_job); +} + +/** + * drm_sched_free_job_queue_if_done - enqueue free-job work if ready + * @sched: scheduler instance + */ +static void drm_sched_free_job_queue_if_done(struct drm_gpu_scheduler *sched) +{ + struct drm_sched_job *job; + + spin_lock(&sched->job_list_lock); + job = list_first_entry_or_null(&sched->pending_list, + struct drm_sched_job, list); + if (job && dma_fence_is_signaled(&job->s_fence->finished)) + drm_sched_free_job_queue(sched); + spin_unlock(&sched->job_list_lock); +} + +/** * drm_sched_job_done - complete a job * @s_job: pointer to the job which is done * @@ -284,7 +310,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job, int result) dma_fence_get(&s_fence->finished); drm_sched_fence_finished(s_fence, result); dma_fence_put(&s_fence->finished); - drm_sched_run_job_queue(sched); + drm_sched_free_job_queue(sched); } /** @@ -943,8 +969,10 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) typeof(*next), list); if (next) { - next->s_fence->scheduled.timestamp = - dma_fence_timestamp(&job->s_fence->finished); + if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, + &next->s_fence->scheduled.flags)) + next->s_fence->scheduled.timestamp = + dma_fence_timestamp(&job->s_fence->finished); /* start TO timer for next job */ drm_sched_start_timeout(sched); } @@ -994,7 +1022,40 @@ drm_sched_pick_best(struct drm_gpu_scheduler **sched_list, EXPORT_SYMBOL(drm_sched_pick_best); /** - * drm_sched_run_job_work - main scheduler thread + * drm_sched_run_job_queue_if_ready - enqueue run-job work if ready + * @sched: scheduler instance + */ +static void drm_sched_run_job_queue_if_ready(struct drm_gpu_scheduler *sched) +{ + if (drm_sched_select_entity(sched)) + drm_sched_run_job_queue(sched); +} + +/** + * drm_sched_free_job_work - worker to call free_job + * + * @w: free job work + */ +static void drm_sched_free_job_work(struct work_struct *w) +{ + struct drm_gpu_scheduler *sched = + container_of(w, struct drm_gpu_scheduler, work_free_job); + struct drm_sched_job *cleanup_job; + + if (READ_ONCE(sched->pause_submit)) + return; + + cleanup_job = drm_sched_get_cleanup_job(sched); + if (cleanup_job) { + sched->ops->free_job(cleanup_job); + + drm_sched_free_job_queue_if_done(sched); + drm_sched_run_job_queue_if_ready(sched); + } +} + +/** + * drm_sched_run_job_work - worker to call run_job * * @w: run job work */ @@ -1003,65 +1064,51 @@ static void drm_sched_run_job_work(struct work_struct *w) struct drm_gpu_scheduler *sched = container_of(w, struct drm_gpu_scheduler, work_run_job); struct drm_sched_entity *entity; - struct drm_sched_job *cleanup_job; + struct dma_fence *fence; + struct drm_sched_fence *s_fence; + struct drm_sched_job *sched_job; int r; if (READ_ONCE(sched->pause_submit)) return; - cleanup_job = drm_sched_get_cleanup_job(sched); entity = drm_sched_select_entity(sched); + if (!entity) + return; - if (!entity && !cleanup_job) + sched_job = drm_sched_entity_pop_job(entity); + if (!sched_job) { + complete_all(&entity->entity_idle); return; /* No more work */ + } - if (cleanup_job) - sched->ops->free_job(cleanup_job); - - if (entity) { - struct dma_fence *fence; - struct drm_sched_fence *s_fence; - struct drm_sched_job *sched_job; - - sched_job = drm_sched_entity_pop_job(entity); - if (!sched_job) { - complete_all(&entity->entity_idle); - if (!cleanup_job) - return; /* No more work */ - goto again; - } - - s_fence = sched_job->s_fence; - - atomic_inc(&sched->hw_rq_count); - drm_sched_job_begin(sched_job); + s_fence = sched_job->s_fence; - trace_drm_run_job(sched_job, entity); - fence = sched->ops->run_job(sched_job); - complete_all(&entity->entity_idle); - drm_sched_fence_scheduled(s_fence, fence); + atomic_inc(&sched->hw_rq_count); + drm_sched_job_begin(sched_job); - if (!IS_ERR_OR_NULL(fence)) { - /* Drop for original kref_init of the fence */ - dma_fence_put(fence); + trace_drm_run_job(sched_job, entity); + fence = sched->ops->run_job(sched_job); + complete_all(&entity->entity_idle); + drm_sched_fence_scheduled(s_fence, fence); - r = dma_fence_add_callback(fence, &sched_job->cb, - drm_sched_job_done_cb); - if (r == -ENOENT) - drm_sched_job_done(sched_job, fence->error); - else if (r) - DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", - r); - } else { - drm_sched_job_done(sched_job, IS_ERR(fence) ? - PTR_ERR(fence) : 0); - } + if (!IS_ERR_OR_NULL(fence)) { + /* Drop for original kref_init of the fence */ + dma_fence_put(fence); - wake_up(&sched->job_scheduled); + r = dma_fence_add_callback(fence, &sched_job->cb, + drm_sched_job_done_cb); + if (r == -ENOENT) + drm_sched_job_done(sched_job, fence->error); + else if (r) + DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", r); + } else { + drm_sched_job_done(sched_job, IS_ERR(fence) ? + PTR_ERR(fence) : 0); } -again: - drm_sched_run_job_queue(sched); + wake_up(&sched->job_scheduled); + drm_sched_run_job_queue_if_ready(sched); } /** @@ -1145,6 +1192,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, atomic_set(&sched->hw_rq_count, 0); INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout); INIT_WORK(&sched->work_run_job, drm_sched_run_job_work); + INIT_WORK(&sched->work_free_job, drm_sched_free_job_work); atomic_set(&sched->_score, 0); atomic64_set(&sched->job_id_count, 0); sched->pause_submit = false; @@ -1274,6 +1322,7 @@ void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched) { WRITE_ONCE(sched->pause_submit, true); cancel_work_sync(&sched->work_run_job); + cancel_work_sync(&sched->work_free_job); } EXPORT_SYMBOL(drm_sched_wqueue_stop); @@ -1286,5 +1335,6 @@ void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched) { WRITE_ONCE(sched->pause_submit, false); queue_work(sched->submit_wq, &sched->work_run_job); + queue_work(sched->submit_wq, &sched->work_free_job); } EXPORT_SYMBOL(drm_sched_wqueue_start); |