summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/scheduler/sched_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/scheduler/sched_main.c')
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c294
1 files changed, 169 insertions, 125 deletions
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 550492a7a031..829579c41c6b 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -41,7 +41,7 @@
* 4. Entities themselves maintain a queue of jobs that will be scheduled on
* the hardware.
*
- * The jobs in a entity are always scheduled in the order that they were pushed.
+ * The jobs in an entity are always scheduled in the order in which they were pushed.
*
* Note that once a job was taken from the entities queue and pushed to the
* hardware, i.e. the pending queue, the entity must not be referenced anymore
@@ -64,12 +64,6 @@
* credit limit, the job won't be executed. Instead, the scheduler will wait
* until the credit count has decreased enough to not overflow its credit limit.
* This implies waiting for previously executed jobs.
- *
- * Optionally, drivers may register a callback (update_job_credits) provided by
- * struct drm_sched_backend_ops to update the job's credits dynamically. The
- * scheduler executes this callback every time the scheduler considers a job for
- * execution and subsequently checks whether the job fits the scheduler's credit
- * limit.
*/
#include <linux/wait.h>
@@ -84,11 +78,16 @@
#include <drm/gpu_scheduler.h>
#include <drm/spsc_queue.h>
+#include "sched_internal.h"
+
#define CREATE_TRACE_POINTS
#include "gpu_scheduler_trace.h"
-#define to_drm_sched_job(sched_job) \
- container_of((sched_job), struct drm_sched_job, queue_node)
+#ifdef CONFIG_LOCKDEP
+static struct lockdep_map drm_sched_lockdep_map = {
+ .name = "drm_sched_lockdep_map"
+};
+#endif
int drm_sched_policy = DRM_SCHED_POLICY_FIFO;
@@ -103,9 +102,9 @@ static u32 drm_sched_available_credits(struct drm_gpu_scheduler *sched)
{
u32 credits;
- drm_WARN_ON(sched, check_sub_overflow(sched->credit_limit,
- atomic_read(&sched->credit_count),
- &credits));
+ WARN_ON(check_sub_overflow(sched->credit_limit,
+ atomic_read(&sched->credit_count),
+ &credits));
return credits;
}
@@ -123,23 +122,18 @@ static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched,
{
struct drm_sched_job *s_job;
- s_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
+ s_job = drm_sched_entity_queue_peek(entity);
if (!s_job)
return false;
- if (sched->ops->update_job_credits) {
- s_job->credits = sched->ops->update_job_credits(s_job);
-
- drm_WARN(sched, !s_job->credits,
- "Jobs with zero credits bypass job-flow control.\n");
- }
-
/* If a job exceeds the credit limit, truncate it to the credit limit
* itself to guarantee forward progress.
*/
- if (drm_WARN(sched, s_job->credits > sched->credit_limit,
- "Jobs may not exceed the credit limit, truncate.\n"))
+ if (s_job->credits > sched->credit_limit) {
+ dev_WARN(sched->dev,
+ "Jobs may not exceed the credit limit, truncate.\n");
s_job->credits = sched->credit_limit;
+ }
return drm_sched_available_credits(sched) >= s_job->credits;
}
@@ -153,35 +147,33 @@ static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a,
return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting);
}
-static inline void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity)
+static void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity,
+ struct drm_sched_rq *rq)
{
- struct drm_sched_rq *rq = entity->rq;
-
if (!RB_EMPTY_NODE(&entity->rb_tree_node)) {
rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root);
RB_CLEAR_NODE(&entity->rb_tree_node);
}
}
-void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts)
+void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity,
+ struct drm_sched_rq *rq,
+ ktime_t ts)
{
/*
* Both locks need to be grabbed, one to protect from entity->rq change
* for entity from within concurrent drm_sched_entity_select_rq and the
* other to update the rb tree structure.
*/
- spin_lock(&entity->rq_lock);
- spin_lock(&entity->rq->lock);
+ lockdep_assert_held(&entity->lock);
+ lockdep_assert_held(&rq->lock);
- drm_sched_rq_remove_fifo_locked(entity);
+ drm_sched_rq_remove_fifo_locked(entity, rq);
entity->oldest_job_waiting = ts;
- rb_add_cached(&entity->rb_tree_node, &entity->rq->rb_tree_root,
+ rb_add_cached(&entity->rb_tree_node, &rq->rb_tree_root,
drm_sched_entity_compare_before);
-
- spin_unlock(&entity->rq->lock);
- spin_unlock(&entity->rq_lock);
}
/**
@@ -213,15 +205,14 @@ static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
struct drm_sched_entity *entity)
{
+ lockdep_assert_held(&entity->lock);
+ lockdep_assert_held(&rq->lock);
+
if (!list_empty(&entity->list))
return;
- spin_lock(&rq->lock);
-
atomic_inc(rq->sched->score);
list_add_tail(&entity->list, &rq->entities);
-
- spin_unlock(&rq->lock);
}
/**
@@ -235,6 +226,8 @@ void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
struct drm_sched_entity *entity)
{
+ lockdep_assert_held(&entity->lock);
+
if (list_empty(&entity->list))
return;
@@ -247,7 +240,7 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
rq->current_entity = NULL;
if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
- drm_sched_rq_remove_fifo_locked(entity);
+ drm_sched_rq_remove_fifo_locked(entity, rq);
spin_unlock(&rq->lock);
}
@@ -349,7 +342,6 @@ drm_sched_rq_select_entity_fifo(struct drm_gpu_scheduler *sched,
return ERR_PTR(-ENOSPC);
}
- rq->current_entity = entity;
reinit_completion(&entity->entity_idle);
break;
}
@@ -595,6 +587,9 @@ static void drm_sched_job_timedout(struct work_struct *work)
* callers responsibility to release it manually if it's not part of the
* pending list any more.
*
+ * This function is typically used for reset recovery (see the docu of
+ * drm_sched_backend_ops.timedout_job() for details). Do not call it for
+ * scheduler teardown, i.e., before calling drm_sched_fini().
*/
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
{
@@ -667,20 +662,22 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
*/
cancel_delayed_work(&sched->work_tdr);
}
-
EXPORT_SYMBOL(drm_sched_stop);
/**
* drm_sched_start - recover jobs after a reset
*
* @sched: scheduler instance
- * @full_recovery: proceed with complete sched restart
+ * @errno: error to set on the pending fences
*
+ * This function is typically used for reset recovery (see the docu of
+ * drm_sched_backend_ops.timedout_job() for details). Do not call it for
+ * scheduler startup. The scheduler itself is fully operational after
+ * drm_sched_init() succeeded.
*/
-void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
+void drm_sched_start(struct drm_gpu_scheduler *sched, int errno)
{
struct drm_sched_job *s_job, *tmp;
- int r;
/*
* Locking the list is not required here as the sched thread is parked
@@ -692,24 +689,17 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
atomic_add(s_job->credits, &sched->credit_count);
- if (!full_recovery)
+ if (!fence) {
+ drm_sched_job_done(s_job, errno ?: -ECANCELED);
continue;
+ }
- if (fence) {
- r = dma_fence_add_callback(fence, &s_job->cb,
- drm_sched_job_done_cb);
- if (r == -ENOENT)
- drm_sched_job_done(s_job, fence->error);
- else if (r)
- DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
- r);
- } else
- drm_sched_job_done(s_job, -ECANCELED);
+ if (dma_fence_add_callback(fence, &s_job->cb,
+ drm_sched_job_done_cb))
+ drm_sched_job_done(s_job, fence->error ?: errno);
}
- if (full_recovery)
- drm_sched_start_timeout_unlocked(sched);
-
+ drm_sched_start_timeout_unlocked(sched);
drm_sched_wqueue_start(sched);
}
EXPORT_SYMBOL(drm_sched_start);
@@ -781,6 +771,10 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs);
* Drivers must make sure drm_sched_job_cleanup() if this function returns
* successfully, even when @job is aborted before drm_sched_job_arm() is called.
*
+ * Note that this function does not assign a valid value to each struct member
+ * of struct drm_sched_job. Take a look at that struct's documentation to see
+ * who sets which struct member with what lifetime.
+ *
* WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware
* has died, which can mean that there's no valid runqueue for a @entity.
* This function returns -ENOENT in this case (which probably should be -EIO as
@@ -797,7 +791,7 @@ int drm_sched_job_init(struct drm_sched_job *job,
* or worse--a blank screen--leave a trail in the
* logs, so this can be debugged easier.
*/
- drm_err(job->sched, "%s: entity has no rq!\n", __func__);
+ dev_err(job->sched->dev, "%s: entity has no rq!\n", __func__);
return -ENOENT;
}
@@ -806,6 +800,14 @@ int drm_sched_job_init(struct drm_sched_job *job,
return -EINVAL;
}
+ /*
+ * We don't know for sure how the user has allocated. Thus, zero the
+ * struct so that unallowed (i.e., too early) usage of pointers that
+ * this function does not set is guaranteed to lead to a NULL pointer
+ * exception instead of UB.
+ */
+ memset(job, 0, sizeof(*job));
+
job->entity = entity;
job->credits = credits;
job->s_fence = drm_sched_fence_alloc(entity, owner);
@@ -826,11 +828,15 @@ EXPORT_SYMBOL(drm_sched_job_init);
*
* This arms a scheduler job for execution. Specifically it initializes the
* &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
- * or other places that need to track the completion of this job.
+ * or other places that need to track the completion of this job. It also
+ * initializes sequence numbers, which are fundamental for fence ordering.
*
* Refer to drm_sched_entity_push_job() documentation for locking
* considerations.
*
+ * Once this function was called, you *must* submit @job with
+ * drm_sched_entity_push_job().
+ *
* This can only be called if drm_sched_job_init() succeeded.
*/
void drm_sched_job_arm(struct drm_sched_job *job)
@@ -984,6 +990,29 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
/**
+ * drm_sched_job_has_dependency - check whether fence is the job's dependency
+ * @job: scheduler job to check
+ * @fence: fence to look for
+ *
+ * Returns:
+ * True if @fence is found within the job's dependencies, or otherwise false.
+ */
+bool drm_sched_job_has_dependency(struct drm_sched_job *job,
+ struct dma_fence *fence)
+{
+ struct dma_fence *f;
+ unsigned long index;
+
+ xa_for_each(&job->dependencies, index, f) {
+ if (f == fence)
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(drm_sched_job_has_dependency);
+
+/**
* drm_sched_job_cleanup - clean up scheduler job resources
* @job: scheduler job to clean up
*
@@ -992,9 +1021,12 @@ EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
* Drivers should call this from their error unwind code if @job is aborted
* before drm_sched_job_arm() is called.
*
- * After that point of no return @job is committed to be executed by the
- * scheduler, and this function should be called from the
- * &drm_sched_backend_ops.free_job callback.
+ * drm_sched_job_arm() is a point of no return since it initializes the fences
+ * and their sequence number etc. Once that function has been called, you *must*
+ * submit it with drm_sched_entity_push_job() and cannot simply abort it by
+ * calling drm_sched_job_cleanup().
+ *
+ * This function should be called in the &drm_sched_backend_ops.free_job callback.
*/
void drm_sched_job_cleanup(struct drm_sched_job *job)
{
@@ -1002,10 +1034,15 @@ void drm_sched_job_cleanup(struct drm_sched_job *job)
unsigned long index;
if (kref_read(&job->s_fence->finished.refcount)) {
- /* drm_sched_job_arm() has been called */
+ /* The job has been processed by the scheduler, i.e.,
+ * drm_sched_job_arm() and drm_sched_entity_push_job() have
+ * been called.
+ */
dma_fence_put(&job->s_fence->finished);
} else {
- /* aborted job before committing to run it */
+ /* The job was aborted before it has been committed to be run;
+ * notably, drm_sched_job_arm() has not been called.
+ */
drm_sched_fence_free(job->s_fence);
}
@@ -1022,15 +1059,12 @@ EXPORT_SYMBOL(drm_sched_job_cleanup);
/**
* drm_sched_wakeup - Wake up the scheduler if it is ready to queue
* @sched: scheduler instance
- * @entity: the scheduler entity
*
* Wake up the scheduler if we can queue jobs.
*/
-void drm_sched_wakeup(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity)
+void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
{
- if (drm_sched_can_queue(sched, entity))
- drm_sched_run_job_queue(sched);
+ drm_sched_run_job_queue(sched);
}
/**
@@ -1155,9 +1189,6 @@ static void drm_sched_free_job_work(struct work_struct *w)
container_of(w, struct drm_gpu_scheduler, work_free_job);
struct drm_sched_job *job;
- if (READ_ONCE(sched->pause_submit))
- return;
-
job = drm_sched_get_finished_job(sched);
if (job)
sched->ops->free_job(job);
@@ -1181,17 +1212,16 @@ static void drm_sched_run_job_work(struct work_struct *w)
struct drm_sched_job *sched_job;
int r;
- if (READ_ONCE(sched->pause_submit))
- return;
-
+ /* Find entity with a ready job */
entity = drm_sched_select_entity(sched);
if (!entity)
- return;
+ return; /* No more work */
sched_job = drm_sched_entity_pop_job(entity);
if (!sched_job) {
complete_all(&entity->entity_idle);
- return; /* No more work */
+ drm_sched_run_job_queue(sched);
+ return;
}
s_fence = sched_job->s_fence;
@@ -1200,20 +1230,23 @@ static void drm_sched_run_job_work(struct work_struct *w)
drm_sched_job_begin(sched_job);
trace_drm_run_job(sched_job, entity);
+ /*
+ * The run_job() callback must by definition return a fence whose
+ * refcount has been incremented for the scheduler already.
+ */
fence = sched->ops->run_job(sched_job);
complete_all(&entity->entity_idle);
drm_sched_fence_scheduled(s_fence, fence);
if (!IS_ERR_OR_NULL(fence)) {
- /* Drop for original kref_init of the fence */
- dma_fence_put(fence);
-
r = dma_fence_add_callback(fence, &sched_job->cb,
drm_sched_job_done_cb);
if (r == -ENOENT)
drm_sched_job_done(sched_job, fence->error);
else if (r)
DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", r);
+
+ dma_fence_put(fence);
} else {
drm_sched_job_done(sched_job, IS_ERR(fence) ?
PTR_ERR(fence) : 0);
@@ -1227,43 +1260,27 @@ static void drm_sched_run_job_work(struct work_struct *w)
* drm_sched_init - Init a gpu scheduler instance
*
* @sched: scheduler instance
- * @ops: backend operations for this scheduler
- * @submit_wq: workqueue to use for submission. If NULL, an ordered wq is
- * allocated and used
- * @num_rqs: number of runqueues, one for each priority, up to DRM_SCHED_PRIORITY_COUNT
- * @credit_limit: the number of credits this scheduler can hold from all jobs
- * @hang_limit: number of times to allow a job to hang before dropping it
- * @timeout: timeout value in jiffies for the scheduler
- * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is
- * used
- * @score: optional score atomic shared with other schedulers
- * @name: name used for debugging
- * @dev: target &struct device
+ * @args: scheduler initialization arguments
*
* Return 0 on success, otherwise error code.
*/
-int drm_sched_init(struct drm_gpu_scheduler *sched,
- const struct drm_sched_backend_ops *ops,
- struct workqueue_struct *submit_wq,
- u32 num_rqs, u32 credit_limit, unsigned int hang_limit,
- long timeout, struct workqueue_struct *timeout_wq,
- atomic_t *score, const char *name, struct device *dev)
+int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_init_args *args)
{
- int i, ret;
-
- sched->ops = ops;
- sched->credit_limit = credit_limit;
- sched->name = name;
- sched->timeout = timeout;
- sched->timeout_wq = timeout_wq ? : system_wq;
- sched->hang_limit = hang_limit;
- sched->score = score ? score : &sched->_score;
- sched->dev = dev;
-
- if (num_rqs > DRM_SCHED_PRIORITY_COUNT) {
+ int i;
+
+ sched->ops = args->ops;
+ sched->credit_limit = args->credit_limit;
+ sched->name = args->name;
+ sched->timeout = args->timeout;
+ sched->hang_limit = args->hang_limit;
+ sched->timeout_wq = args->timeout_wq ? args->timeout_wq : system_wq;
+ sched->score = args->score ? args->score : &sched->_score;
+ sched->dev = args->dev;
+
+ if (args->num_rqs > DRM_SCHED_PRIORITY_COUNT) {
/* This is a gross violation--tell drivers what the problem is.
*/
- drm_err(sched, "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n",
+ dev_err(sched->dev, "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n",
__func__);
return -EINVAL;
} else if (sched->sched_rq) {
@@ -1271,26 +1288,32 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
* fine-tune their DRM calling order, and return all
* is good.
*/
- drm_warn(sched, "%s: scheduler already initialized!\n", __func__);
+ dev_warn(sched->dev, "%s: scheduler already initialized!\n", __func__);
return 0;
}
- if (submit_wq) {
- sched->submit_wq = submit_wq;
+ if (args->submit_wq) {
+ sched->submit_wq = args->submit_wq;
sched->own_submit_wq = false;
} else {
- sched->submit_wq = alloc_ordered_workqueue(name, 0);
+#ifdef CONFIG_LOCKDEP
+ sched->submit_wq = alloc_ordered_workqueue_lockdep_map(args->name,
+ WQ_MEM_RECLAIM,
+ &drm_sched_lockdep_map);
+#else
+ sched->submit_wq = alloc_ordered_workqueue(args->name, WQ_MEM_RECLAIM);
+#endif
if (!sched->submit_wq)
return -ENOMEM;
sched->own_submit_wq = true;
}
- ret = -ENOMEM;
- sched->sched_rq = kmalloc_array(num_rqs, sizeof(*sched->sched_rq),
+
+ sched->sched_rq = kmalloc_array(args->num_rqs, sizeof(*sched->sched_rq),
GFP_KERNEL | __GFP_ZERO);
if (!sched->sched_rq)
- goto Out_free;
- sched->num_rqs = num_rqs;
+ goto Out_check_own;
+ sched->num_rqs = args->num_rqs;
for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL);
if (!sched->sched_rq[i])
@@ -1314,13 +1337,14 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
Out_unroll:
for (--i ; i >= DRM_SCHED_PRIORITY_KERNEL; i--)
kfree(sched->sched_rq[i]);
-Out_free:
+
kfree(sched->sched_rq);
sched->sched_rq = NULL;
+Out_check_own:
if (sched->own_submit_wq)
destroy_workqueue(sched->submit_wq);
- drm_err(sched, "%s: Failed to setup GPU scheduler--out of memory\n", __func__);
- return ret;
+ dev_err(sched->dev, "%s: Failed to setup GPU scheduler--out of memory\n", __func__);
+ return -ENOMEM;
}
EXPORT_SYMBOL(drm_sched_init);
@@ -1330,6 +1354,20 @@ EXPORT_SYMBOL(drm_sched_init);
* @sched: scheduler instance
*
* Tears down and cleans up the scheduler.
+ *
+ * This stops submission of new jobs to the hardware through
+ * drm_sched_backend_ops.run_job(). Consequently, drm_sched_backend_ops.free_job()
+ * will not be called for all jobs still in drm_gpu_scheduler.pending_list.
+ * There is no solution for this currently. Thus, it is up to the driver to make
+ * sure that:
+ *
+ * a) drm_sched_fini() is only called after for all submitted jobs
+ * drm_sched_backend_ops.free_job() has been called or that
+ * b) the jobs for which drm_sched_backend_ops.free_job() has not been called
+ * after drm_sched_fini() ran are freed manually.
+ *
+ * FIXME: Take care of the above problem and prevent this function from leaking
+ * the jobs in drm_gpu_scheduler.pending_list under any circumstances.
*/
void drm_sched_fini(struct drm_gpu_scheduler *sched)
{
@@ -1345,7 +1383,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
list_for_each_entry(s_entity, &rq->entities, list)
/*
* Prevents reinsertion and marks job_queue as idle,
- * it will removed from rq in drm_sched_entity_fini
+ * it will be removed from the rq in drm_sched_entity_fini()
* eventually
*/
s_entity->stopped = true;
@@ -1425,8 +1463,10 @@ EXPORT_SYMBOL(drm_sched_wqueue_ready);
/**
* drm_sched_wqueue_stop - stop scheduler submission
- *
* @sched: scheduler instance
+ *
+ * Stops the scheduler from pulling new jobs from entities. It also stops
+ * freeing jobs automatically through drm_sched_backend_ops.free_job().
*/
void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched)
{
@@ -1438,8 +1478,12 @@ EXPORT_SYMBOL(drm_sched_wqueue_stop);
/**
* drm_sched_wqueue_start - start scheduler submission
- *
* @sched: scheduler instance
+ *
+ * Restarts the scheduler after drm_sched_wqueue_stop() has stopped it.
+ *
+ * This function is not necessary for 'conventional' startup. The scheduler is
+ * fully operational after drm_sched_init() succeeded.
*/
void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched)
{