diff options
Diffstat (limited to 'drivers/gpu/drm/scheduler')
| -rw-r--r-- | drivers/gpu/drm/scheduler/.kunitconfig | 12 | ||||
| -rw-r--r-- | drivers/gpu/drm/scheduler/Makefile | 2 | ||||
| -rw-r--r-- | drivers/gpu/drm/scheduler/gpu_scheduler_trace.h | 111 | ||||
| -rw-r--r-- | drivers/gpu/drm/scheduler/sched_entity.c | 227 | ||||
| -rw-r--r-- | drivers/gpu/drm/scheduler/sched_fence.c | 12 | ||||
| -rw-r--r-- | drivers/gpu/drm/scheduler/sched_internal.h | 91 | ||||
| -rw-r--r-- | drivers/gpu/drm/scheduler/sched_main.c | 851 | ||||
| -rw-r--r-- | drivers/gpu/drm/scheduler/tests/Makefile | 7 | ||||
| -rw-r--r-- | drivers/gpu/drm/scheduler/tests/mock_scheduler.c | 370 | ||||
| -rw-r--r-- | drivers/gpu/drm/scheduler/tests/sched_tests.h | 224 | ||||
| -rw-r--r-- | drivers/gpu/drm/scheduler/tests/tests_basic.c | 563 |
11 files changed, 2091 insertions, 379 deletions
diff --git a/drivers/gpu/drm/scheduler/.kunitconfig b/drivers/gpu/drm/scheduler/.kunitconfig new file mode 100644 index 000000000000..cece53609fcf --- /dev/null +++ b/drivers/gpu/drm/scheduler/.kunitconfig @@ -0,0 +1,12 @@ +CONFIG_KUNIT=y +CONFIG_DRM=y +CONFIG_DRM_SCHED_KUNIT_TEST=y +CONFIG_EXPERT=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_ATOMIC_SLEEP=y +CONFIG_LOCK_DEBUGGING_SUPPORT=y +CONFIG_PROVE_LOCKING=y +CONFIG_LOCKDEP=y +CONFIG_DEBUG_LOCKDEP=y +CONFIG_DEBUG_LIST=y diff --git a/drivers/gpu/drm/scheduler/Makefile b/drivers/gpu/drm/scheduler/Makefile index 53863621829f..6e13e4c63e9d 100644 --- a/drivers/gpu/drm/scheduler/Makefile +++ b/drivers/gpu/drm/scheduler/Makefile @@ -23,3 +23,5 @@ gpu-sched-y := sched_main.o sched_fence.o sched_entity.o obj-$(CONFIG_DRM_SCHED) += gpu-sched.o + +obj-$(CONFIG_DRM_SCHED_KUNIT_TEST) += tests/ diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h index 3143ecaaff86..261713dd7d5a 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h +++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h @@ -21,7 +21,7 @@ * */ -#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#if !defined(_GPU_SCHED_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) #define _GPU_SCHED_TRACE_H_ #include <linux/stringify.h> @@ -32,81 +32,126 @@ #define TRACE_SYSTEM gpu_scheduler #define TRACE_INCLUDE_FILE gpu_scheduler_trace +/** + * DOC: uAPI trace events + * + * ``drm_sched_job_queue``, ``drm_sched_job_run``, ``drm_sched_job_add_dep``, + * ``drm_sched_job_done`` and ``drm_sched_job_unschedulable`` are considered + * stable uAPI. + * + * Common trace events attributes: + * + * * ``dev`` - the dev_name() of the device running the job. + * + * * ``ring`` - the hardware ring running the job. Together with ``dev`` it + * uniquely identifies where the job is going to be executed. + * + * * ``fence`` - the &struct dma_fence.context and the &struct dma_fence.seqno of + * &struct drm_sched_fence.finished + * + * All the events depends on drm_sched_job_arm() having been called already for + * the job because they use &struct drm_sched_job.sched or + * &struct drm_sched_job.s_fence. + */ + DECLARE_EVENT_CLASS(drm_sched_job, TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity), TP_ARGS(sched_job, entity), TP_STRUCT__entry( - __field(struct drm_sched_entity *, entity) - __field(struct dma_fence *, fence) __string(name, sched_job->sched->name) - __field(uint64_t, id) __field(u32, job_count) __field(int, hw_job_count) + __string(dev, dev_name(sched_job->sched->dev)) + __field(u64, fence_context) + __field(u64, fence_seqno) + __field(u64, client_id) ), TP_fast_assign( - __entry->entity = entity; - __entry->id = sched_job->id; - __entry->fence = &sched_job->s_fence->finished; - __assign_str(name, sched_job->sched->name); + __assign_str(name); __entry->job_count = spsc_queue_count(&entity->job_queue); __entry->hw_job_count = atomic_read( - &sched_job->sched->hw_rq_count); + &sched_job->sched->credit_count); + __assign_str(dev); + __entry->fence_context = sched_job->s_fence->finished.context; + __entry->fence_seqno = sched_job->s_fence->finished.seqno; + __entry->client_id = sched_job->s_fence->drm_client_id; ), - TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d", - __entry->entity, __entry->id, - __entry->fence, __get_str(name), - __entry->job_count, __entry->hw_job_count) + TP_printk("dev=%s, fence=%llu:%llu, ring=%s, job count:%u, hw job count:%d, client_id:%llu", + __get_str(dev), + __entry->fence_context, __entry->fence_seqno, __get_str(name), + __entry->job_count, __entry->hw_job_count, __entry->client_id) ); -DEFINE_EVENT(drm_sched_job, drm_sched_job, +DEFINE_EVENT(drm_sched_job, drm_sched_job_queue, TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity), TP_ARGS(sched_job, entity) ); -DEFINE_EVENT(drm_sched_job, drm_run_job, +DEFINE_EVENT(drm_sched_job, drm_sched_job_run, TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity), TP_ARGS(sched_job, entity) ); -TRACE_EVENT(drm_sched_process_job, +TRACE_EVENT(drm_sched_job_done, TP_PROTO(struct drm_sched_fence *fence), TP_ARGS(fence), TP_STRUCT__entry( - __field(struct dma_fence *, fence) + __field(u64, fence_context) + __field(u64, fence_seqno) ), TP_fast_assign( - __entry->fence = &fence->finished; + __entry->fence_context = fence->finished.context; + __entry->fence_seqno = fence->finished.seqno; ), - TP_printk("fence=%p signaled", __entry->fence) + TP_printk("fence=%llu:%llu signaled", + __entry->fence_context, __entry->fence_seqno) ); -TRACE_EVENT(drm_sched_job_wait_dep, +TRACE_EVENT(drm_sched_job_add_dep, + TP_PROTO(struct drm_sched_job *sched_job, struct dma_fence *fence), + TP_ARGS(sched_job, fence), + TP_STRUCT__entry( + __field(u64, fence_context) + __field(u64, fence_seqno) + __field(u64, ctx) + __field(u64, seqno) + ), + + TP_fast_assign( + __entry->fence_context = sched_job->s_fence->finished.context; + __entry->fence_seqno = sched_job->s_fence->finished.seqno; + __entry->ctx = fence->context; + __entry->seqno = fence->seqno; + ), + TP_printk("fence=%llu:%llu depends on fence=%llu:%llu", + __entry->fence_context, __entry->fence_seqno, + __entry->ctx, __entry->seqno) +); + +TRACE_EVENT(drm_sched_job_unschedulable, TP_PROTO(struct drm_sched_job *sched_job, struct dma_fence *fence), TP_ARGS(sched_job, fence), TP_STRUCT__entry( - __string(name, sched_job->sched->name) - __field(uint64_t, id) - __field(struct dma_fence *, fence) - __field(uint64_t, ctx) - __field(unsigned, seqno) + __field(u64, fence_context) + __field(u64, fence_seqno) + __field(u64, ctx) + __field(u64, seqno) ), TP_fast_assign( - __assign_str(name, sched_job->sched->name); - __entry->id = sched_job->id; - __entry->fence = fence; + __entry->fence_context = sched_job->s_fence->finished.context; + __entry->fence_seqno = sched_job->s_fence->finished.seqno; __entry->ctx = fence->context; __entry->seqno = fence->seqno; ), - TP_printk("job ring=%s, id=%llu, depends fence=%p, context=%llu, seq=%u", - __get_str(name), __entry->id, - __entry->fence, __entry->ctx, - __entry->seqno) + TP_printk("fence=%llu:%llu depends on unsignalled fence=%llu:%llu", + __entry->fence_context, __entry->fence_seqno, + __entry->ctx, __entry->seqno) ); -#endif +#endif /* _GPU_SCHED_TRACE_H_ */ /* This part must be outside protection */ #undef TRACE_INCLUDE_PATH diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index a42763e1429d..fe174a4857be 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -21,17 +21,16 @@ * */ -#include <linux/kthread.h> +#include <linux/export.h> #include <linux/slab.h> #include <linux/completion.h> #include <drm/drm_print.h> #include <drm/gpu_scheduler.h> -#include "gpu_scheduler_trace.h" +#include "sched_internal.h" -#define to_drm_sched_job(sched_job) \ - container_of((sched_job), struct drm_sched_job, queue_node) +#include "gpu_scheduler_trace.h" /** * drm_sched_entity_init - Init a context entity used by scheduler when @@ -51,7 +50,7 @@ * drm_sched_entity_set_priority(). For changing the set of schedulers * @sched_list at runtime see drm_sched_entity_modify_sched(). * - * An entity is cleaned up by callind drm_sched_entity_fini(). See also + * An entity is cleaned up by calling drm_sched_entity_fini(). See also * drm_sched_entity_destroy(). * * Returns 0 on success or a negative error code on failure. @@ -71,19 +70,42 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, entity->guilty = guilty; entity->num_sched_list = num_sched_list; entity->priority = priority; + entity->last_user = current->group_leader; + /* + * It's perfectly valid to initialize an entity without having a valid + * scheduler attached. It's just not valid to use the scheduler before it + * is initialized itself. + */ entity->sched_list = num_sched_list > 1 ? sched_list : NULL; RCU_INIT_POINTER(entity->last_scheduled, NULL); RB_CLEAR_NODE(&entity->rb_tree_node); - if(num_sched_list) - entity->rq = &sched_list[0]->sched_rq[entity->priority]; + if (num_sched_list && !sched_list[0]->sched_rq) { + /* Since every entry covered by num_sched_list + * should be non-NULL and therefore we warn drivers + * not to do this and to fix their DRM calling order. + */ + pr_warn("%s: called with uninitialized scheduler\n", __func__); + } else if (num_sched_list) { + /* The "priority" of an entity cannot exceed the number of run-queues of a + * scheduler. Protect against num_rqs being 0, by converting to signed. Choose + * the lowest priority available. + */ + if (entity->priority >= sched_list[0]->num_rqs) { + dev_err(sched_list[0]->dev, "entity has out-of-bounds priority: %u. num_rqs: %u\n", + entity->priority, sched_list[0]->num_rqs); + entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1, + (s32) DRM_SCHED_PRIORITY_KERNEL); + } + entity->rq = sched_list[0]->sched_rq[entity->priority]; + } init_completion(&entity->entity_idle); /* We start in an idle state. */ complete_all(&entity->entity_idle); - spin_lock_init(&entity->rq_lock); + spin_lock_init(&entity->lock); spsc_queue_init(&entity->job_queue); atomic_set(&entity->fence_seq, 0); @@ -111,8 +133,10 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, { WARN_ON(!num_sched_list || !sched_list); + spin_lock(&entity->lock); entity->sched_list = sched_list; entity->num_sched_list = num_sched_list; + spin_unlock(&entity->lock); } EXPORT_SYMBOL(drm_sched_entity_modify_sched); @@ -128,18 +152,6 @@ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity) return false; } -/* Return true if entity could provide a job. */ -bool drm_sched_entity_is_ready(struct drm_sched_entity *entity) -{ - if (spsc_queue_peek(&entity->job_queue) == NULL) - return false; - - if (READ_ONCE(entity->dependency)) - return false; - - return true; -} - /** * drm_sched_entity_error - return error of last scheduled job * @entity: scheduler entity to check @@ -161,25 +173,15 @@ int drm_sched_entity_error(struct drm_sched_entity *entity) } EXPORT_SYMBOL(drm_sched_entity_error); +static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, + struct dma_fence_cb *cb); + static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk) { struct drm_sched_job *job = container_of(wrk, typeof(*job), work); - - drm_sched_fence_finished(job->s_fence, -ESRCH); - WARN_ON(job->s_fence->parent); - job->sched->ops->free_job(job); -} - -/* Signal the scheduler finished fence when the entity in question is killed. */ -static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, - struct dma_fence_cb *cb) -{ - struct drm_sched_job *job = container_of(cb, struct drm_sched_job, - finish_cb); + struct dma_fence *f; unsigned long index; - dma_fence_put(f); - /* Wait for all dependencies to avoid data corruptions */ xa_for_each(&job->dependencies, index, f) { struct drm_sched_fence *s_fence = to_drm_sched_fence(f); @@ -207,6 +209,21 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, dma_fence_put(f); } + drm_sched_fence_scheduled(job->s_fence, NULL); + drm_sched_fence_finished(job->s_fence, -ESRCH); + WARN_ON(job->s_fence->parent); + job->sched->ops->free_job(job); +} + +/* Signal the scheduler finished fence when the entity in question is killed. */ +static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, + struct dma_fence_cb *cb) +{ + struct drm_sched_job *job = container_of(cb, struct drm_sched_job, + finish_cb); + + dma_fence_put(f); + INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work); schedule_work(&job->work); } @@ -220,10 +237,10 @@ static void drm_sched_entity_kill(struct drm_sched_entity *entity) if (!entity->rq) return; - spin_lock(&entity->rq_lock); + spin_lock(&entity->lock); entity->stopped = true; drm_sched_rq_remove_entity(entity->rq, entity); - spin_unlock(&entity->rq_lock); + spin_unlock(&entity->lock); /* Make sure this entity is not used by the scheduler at the moment */ wait_for_completion(&entity->entity_idle); @@ -231,13 +248,20 @@ static void drm_sched_entity_kill(struct drm_sched_entity *entity) /* The entity is guaranteed to not be used by the scheduler */ prev = rcu_dereference_check(entity->last_scheduled, true); dma_fence_get(prev); - while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) { + while ((job = drm_sched_entity_queue_pop(entity))) { struct drm_sched_fence *s_fence = job->s_fence; dma_fence_get(&s_fence->finished); - if (!prev || dma_fence_add_callback(prev, &job->finish_cb, - drm_sched_entity_kill_jobs_cb)) + if (!prev || + dma_fence_add_callback(prev, &job->finish_cb, + drm_sched_entity_kill_jobs_cb)) { + /* + * Adding callback above failed. + * dma_fence_put() checks for NULL. + */ + dma_fence_put(prev); drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); + } prev = &s_fence->finished; } @@ -266,9 +290,9 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) return 0; sched = entity->rq->sched; - /** - * The client will not queue more IBs during this fini, consume existing - * queued IBs or discard them on SIGKILL + /* + * The client will not queue more jobs during this fini - consume + * existing queued ones, or discard them on SIGKILL. */ if (current->flags & PF_EXITING) { if (timeout) @@ -281,9 +305,9 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) drm_sched_entity_is_idle(entity)); } - /* For killed process disable any more IBs enqueue right now */ + /* For a killed process disallow further enqueueing of jobs. */ last_user = cmpxchg(&entity->last_user, current->group_leader, NULL); - if ((!last_user || last_user == current->group_leader) && + if (last_user == current->group_leader && (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) drm_sched_entity_kill(entity); @@ -305,9 +329,9 @@ EXPORT_SYMBOL(drm_sched_entity_flush); void drm_sched_entity_fini(struct drm_sched_entity *entity) { /* - * If consumption of existing IBs wasn't completed. Forcefully remove - * them here. Also makes sure that the scheduler won't touch this entity - * any more. + * If consumption of existing jobs wasn't completed forcefully remove + * them. Also makes sure that the scheduler won't touch this entity any + * more. */ drm_sched_entity_kill(entity); @@ -336,20 +360,9 @@ void drm_sched_entity_destroy(struct drm_sched_entity *entity) } EXPORT_SYMBOL(drm_sched_entity_destroy); -/* drm_sched_entity_clear_dep - callback to clear the entities dependency */ -static void drm_sched_entity_clear_dep(struct dma_fence *f, - struct dma_fence_cb *cb) -{ - struct drm_sched_entity *entity = - container_of(cb, struct drm_sched_entity, cb); - - entity->dependency = NULL; - dma_fence_put(f); -} - /* - * drm_sched_entity_clear_dep - callback to clear the entities dependency and - * wake up scheduler + * drm_sched_entity_wakeup - callback to clear the entity's dependency and + * wake up the scheduler */ static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb) @@ -357,8 +370,9 @@ static void drm_sched_entity_wakeup(struct dma_fence *f, struct drm_sched_entity *entity = container_of(cb, struct drm_sched_entity, cb); - drm_sched_entity_clear_dep(f, cb); - drm_sched_wakeup_if_can_queue(entity->rq->sched); + entity->dependency = NULL; + dma_fence_put(f); + drm_sched_wakeup(entity->rq->sched); } /** @@ -367,14 +381,14 @@ static void drm_sched_entity_wakeup(struct dma_fence *f, * @entity: scheduler entity * @priority: scheduler priority * - * Update the priority of runqueus used for the entity. + * Update the priority of runqueues used for the entity. */ void drm_sched_entity_set_priority(struct drm_sched_entity *entity, enum drm_sched_priority priority) { - spin_lock(&entity->rq_lock); + spin_lock(&entity->lock); entity->priority = priority; - spin_unlock(&entity->rq_lock); + spin_unlock(&entity->lock); } EXPORT_SYMBOL(drm_sched_entity_set_priority); @@ -382,7 +396,8 @@ EXPORT_SYMBOL(drm_sched_entity_set_priority); * Add a callback to the current dependency of the entity to wake up the * scheduler when the entity becomes available. */ -static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) +static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity, + struct drm_sched_job *sched_job) { struct drm_gpu_scheduler *sched = entity->rq->sched; struct dma_fence *fence = entity->dependency; @@ -410,15 +425,12 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) fence = dma_fence_get(&s_fence->scheduled); dma_fence_put(entity->dependency); entity->dependency = fence; - if (!dma_fence_add_callback(fence, &entity->cb, - drm_sched_entity_clear_dep)) - return true; - - /* Ignore it when it is already scheduled */ - dma_fence_put(fence); - return false; } + if (trace_drm_sched_job_unschedulable_enabled() && + !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &entity->dependency->flags)) + trace_drm_sched_job_unschedulable(sched_job, entity->dependency); + if (!dma_fence_add_callback(entity->dependency, &entity->cb, drm_sched_entity_wakeup)) return true; @@ -453,15 +465,13 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) { struct drm_sched_job *sched_job; - sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue)); + sched_job = drm_sched_entity_queue_peek(entity); if (!sched_job) return NULL; while ((entity->dependency = drm_sched_job_dependency(sched_job, entity))) { - trace_drm_sched_job_wait_dep(sched_job, entity->dependency); - - if (drm_sched_entity_add_dependency_cb(entity)) + if (drm_sched_entity_add_dependency_cb(entity, sched_job)) return NULL; } @@ -489,9 +499,18 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) { struct drm_sched_job *next; - next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue)); - if (next) - drm_sched_rq_update_fifo(entity, next->submit_ts); + next = drm_sched_entity_queue_peek(entity); + if (next) { + struct drm_sched_rq *rq; + + spin_lock(&entity->lock); + rq = entity->rq; + spin_lock(&rq->lock); + drm_sched_rq_update_fifo_locked(entity, rq, + next->submit_ts); + spin_unlock(&rq->lock); + spin_unlock(&entity->lock); + } } /* Jobs and entities might have different lifecycles. Since we're @@ -518,10 +537,10 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity) return; /* - * Only when the queue is empty are we guaranteed that the scheduler - * thread cannot change ->last_scheduled. To enforce ordering we need - * a read barrier here. See drm_sched_entity_pop_job() for the other - * side. + * Only when the queue is empty are we guaranteed that + * drm_sched_run_job_work() cannot change entity->last_scheduled. To + * enforce ordering we need a read barrier here. See + * drm_sched_entity_pop_job() for the other side. */ smp_rmb(); @@ -531,17 +550,18 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity) if (fence && !dma_fence_is_signaled(fence)) return; - spin_lock(&entity->rq_lock); + spin_lock(&entity->lock); sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list); - rq = sched ? &sched->sched_rq[entity->priority] : NULL; + rq = sched ? sched->sched_rq[entity->priority] : NULL; if (rq != entity->rq) { drm_sched_rq_remove_entity(entity->rq, entity); entity->rq = rq; } - spin_unlock(&entity->rq_lock); if (entity->num_sched_list == 1) entity->sched_list = NULL; + + spin_unlock(&entity->lock); } /** @@ -552,8 +572,6 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity) * fence sequence number this function should be called with drm_sched_job_arm() * under common lock for the struct drm_sched_entity that was set up for * @sched_job in drm_sched_job_init(). - * - * Returns 0 for success, negative error code otherwise. */ void drm_sched_entity_push_job(struct drm_sched_job *sched_job) { @@ -561,7 +579,15 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job) bool first; ktime_t submit_ts; - trace_drm_sched_job(sched_job, entity); + trace_drm_sched_job_queue(sched_job, entity); + + if (trace_drm_sched_job_add_dep_enabled()) { + struct dma_fence *entry; + unsigned long index; + + xa_for_each(&sched_job->dependencies, index, entry) + trace_drm_sched_job_add_dep(sched_job, entry); + } atomic_inc(entity->rq->sched->score); WRITE_ONCE(entity->last_user, current->group_leader); @@ -575,22 +601,31 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job) /* first job wakes up scheduler */ if (first) { + struct drm_gpu_scheduler *sched; + struct drm_sched_rq *rq; + /* Add the entity to the run queue */ - spin_lock(&entity->rq_lock); + spin_lock(&entity->lock); if (entity->stopped) { - spin_unlock(&entity->rq_lock); + spin_unlock(&entity->lock); DRM_ERROR("Trying to push to a killed entity\n"); return; } - drm_sched_rq_add_entity(entity->rq, entity); - spin_unlock(&entity->rq_lock); + rq = entity->rq; + sched = rq->sched; + + spin_lock(&rq->lock); + drm_sched_rq_add_entity(rq, entity); if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) - drm_sched_rq_update_fifo(entity, submit_ts); + drm_sched_rq_update_fifo_locked(entity, rq, submit_ts); + + spin_unlock(&rq->lock); + spin_unlock(&entity->lock); - drm_sched_wakeup_if_can_queue(entity->rq->sched); + drm_sched_wakeup(sched); } } EXPORT_SYMBOL(drm_sched_entity_push_job); diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c index 06cedfe4b486..9391d6f0dc01 100644 --- a/drivers/gpu/drm/scheduler/sched_fence.c +++ b/drivers/gpu/drm/scheduler/sched_fence.c @@ -21,7 +21,7 @@ * */ -#include <linux/kthread.h> +#include <linux/export.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/slab.h> @@ -29,13 +29,13 @@ #include <drm/gpu_scheduler.h> +#include "sched_internal.h" + static struct kmem_cache *sched_fence_slab; static int __init drm_sched_fence_slab_init(void) { - sched_fence_slab = kmem_cache_create( - "drm_sched_fence", sizeof(struct drm_sched_fence), 0, - SLAB_HWCACHE_ALIGN, NULL); + sched_fence_slab = KMEM_CACHE(drm_sched_fence, SLAB_HWCACHE_ALIGN); if (!sched_fence_slab) return -ENOMEM; @@ -206,7 +206,8 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f) EXPORT_SYMBOL(to_drm_sched_fence); struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity, - void *owner) + void *owner, + u64 drm_client_id) { struct drm_sched_fence *fence = NULL; @@ -215,6 +216,7 @@ struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity, return NULL; fence->owner = owner; + fence->drm_client_id = drm_client_id; spin_lock_init(&fence->lock); return fence; diff --git a/drivers/gpu/drm/scheduler/sched_internal.h b/drivers/gpu/drm/scheduler/sched_internal.h new file mode 100644 index 000000000000..7ea5a6736f98 --- /dev/null +++ b/drivers/gpu/drm/scheduler/sched_internal.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: MIT */ + +#ifndef _DRM_GPU_SCHEDULER_INTERNAL_H_ +#define _DRM_GPU_SCHEDULER_INTERNAL_H_ + + +/* Used to choose between FIFO and RR job-scheduling */ +extern int drm_sched_policy; + +#define DRM_SCHED_POLICY_RR 0 +#define DRM_SCHED_POLICY_FIFO 1 + +void drm_sched_wakeup(struct drm_gpu_scheduler *sched); + +void drm_sched_rq_add_entity(struct drm_sched_rq *rq, + struct drm_sched_entity *entity); +void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, + struct drm_sched_entity *entity); + +void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity, + struct drm_sched_rq *rq, ktime_t ts); + +void drm_sched_entity_select_rq(struct drm_sched_entity *entity); +struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity); + +struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *s_entity, + void *owner, u64 drm_client_id); +void drm_sched_fence_init(struct drm_sched_fence *fence, + struct drm_sched_entity *entity); +void drm_sched_fence_free(struct drm_sched_fence *fence); + +void drm_sched_fence_scheduled(struct drm_sched_fence *fence, + struct dma_fence *parent); +void drm_sched_fence_finished(struct drm_sched_fence *fence, int result); + +/** + * drm_sched_entity_queue_pop - Low level helper for popping queued jobs + * + * @entity: scheduler entity + * + * Low level helper for popping queued jobs. + * + * Returns: The job dequeued or NULL. + */ +static inline struct drm_sched_job * +drm_sched_entity_queue_pop(struct drm_sched_entity *entity) +{ + struct spsc_node *node; + + node = spsc_queue_pop(&entity->job_queue); + if (!node) + return NULL; + + return container_of(node, struct drm_sched_job, queue_node); +} + +/** + * drm_sched_entity_queue_peek - Low level helper for peeking at the job queue + * + * @entity: scheduler entity + * + * Low level helper for peeking at the job queue + * + * Returns: The job at the head of the queue or NULL. + */ +static inline struct drm_sched_job * +drm_sched_entity_queue_peek(struct drm_sched_entity *entity) +{ + struct spsc_node *node; + + node = spsc_queue_peek(&entity->job_queue); + if (!node) + return NULL; + + return container_of(node, struct drm_sched_job, queue_node); +} + +/* Return true if entity could provide a job. */ +static inline bool +drm_sched_entity_is_ready(struct drm_sched_entity *entity) +{ + if (!spsc_queue_count(&entity->job_queue)) + return false; + + if (READ_ONCE(entity->dependency)) + return false; + + return true; +} + +#endif diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 506371c42745..1d4f1b822e7b 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -41,14 +41,32 @@ * 4. Entities themselves maintain a queue of jobs that will be scheduled on * the hardware. * - * The jobs in a entity are always scheduled in the order that they were pushed. + * The jobs in an entity are always scheduled in the order in which they were pushed. * * Note that once a job was taken from the entities queue and pushed to the * hardware, i.e. the pending queue, the entity must not be referenced anymore * through the jobs entity pointer. */ -#include <linux/kthread.h> +/** + * DOC: Flow Control + * + * The DRM GPU scheduler provides a flow control mechanism to regulate the rate + * in which the jobs fetched from scheduler entities are executed. + * + * In this context the &drm_gpu_scheduler keeps track of a driver specified + * credit limit representing the capacity of this scheduler and a credit count; + * every &drm_sched_job carries a driver specified number of credits. + * + * Once a job is executed (but not yet finished), the job's credits contribute + * to the scheduler's credit count until the job is finished. If by executing + * one more job the scheduler's credit count would exceed the scheduler's + * credit limit, the job won't be executed. Instead, the scheduler will wait + * until the credit count has decreased enough to not overflow its credit limit. + * This implies waiting for previously executed jobs. + */ + +#include <linux/export.h> #include <linux/wait.h> #include <linux/sched.h> #include <linux/completion.h> @@ -61,12 +79,11 @@ #include <drm/gpu_scheduler.h> #include <drm/spsc_queue.h> +#include "sched_internal.h" + #define CREATE_TRACE_POINTS #include "gpu_scheduler_trace.h" -#define to_drm_sched_job(sched_job) \ - container_of((sched_job), struct drm_sched_job, queue_node) - int drm_sched_policy = DRM_SCHED_POLICY_FIFO; /** @@ -76,6 +93,46 @@ int drm_sched_policy = DRM_SCHED_POLICY_FIFO; MODULE_PARM_DESC(sched_policy, "Specify the scheduling policy for entities on a run-queue, " __stringify(DRM_SCHED_POLICY_RR) " = Round Robin, " __stringify(DRM_SCHED_POLICY_FIFO) " = FIFO (default)."); module_param_named(sched_policy, drm_sched_policy, int, 0444); +static u32 drm_sched_available_credits(struct drm_gpu_scheduler *sched) +{ + u32 credits; + + WARN_ON(check_sub_overflow(sched->credit_limit, + atomic_read(&sched->credit_count), + &credits)); + + return credits; +} + +/** + * drm_sched_can_queue -- Can we queue more to the hardware? + * @sched: scheduler instance + * @entity: the scheduler entity + * + * Return true if we can push at least one more job from @entity, false + * otherwise. + */ +static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched, + struct drm_sched_entity *entity) +{ + struct drm_sched_job *s_job; + + s_job = drm_sched_entity_queue_peek(entity); + if (!s_job) + return false; + + /* If a job exceeds the credit limit, truncate it to the credit limit + * itself to guarantee forward progress. + */ + if (s_job->credits > sched->credit_limit) { + dev_WARN(sched->dev, + "Jobs may not exceed the credit limit, truncate.\n"); + s_job->credits = sched->credit_limit; + } + + return drm_sched_available_credits(sched) >= s_job->credits; +} + static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a, const struct rb_node *b) { @@ -85,35 +142,33 @@ static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a, return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting); } -static inline void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity) +static void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity, + struct drm_sched_rq *rq) { - struct drm_sched_rq *rq = entity->rq; - if (!RB_EMPTY_NODE(&entity->rb_tree_node)) { rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root); RB_CLEAR_NODE(&entity->rb_tree_node); } } -void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts) +void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity, + struct drm_sched_rq *rq, + ktime_t ts) { /* * Both locks need to be grabbed, one to protect from entity->rq change * for entity from within concurrent drm_sched_entity_select_rq and the * other to update the rb tree structure. */ - spin_lock(&entity->rq_lock); - spin_lock(&entity->rq->lock); + lockdep_assert_held(&entity->lock); + lockdep_assert_held(&rq->lock); - drm_sched_rq_remove_fifo_locked(entity); + drm_sched_rq_remove_fifo_locked(entity, rq); entity->oldest_job_waiting = ts; - rb_add_cached(&entity->rb_tree_node, &entity->rq->rb_tree_root, + rb_add_cached(&entity->rb_tree_node, &rq->rb_tree_root, drm_sched_entity_compare_before); - - spin_unlock(&entity->rq->lock); - spin_unlock(&entity->rq_lock); } /** @@ -145,15 +200,14 @@ static void drm_sched_rq_init(struct drm_gpu_scheduler *sched, void drm_sched_rq_add_entity(struct drm_sched_rq *rq, struct drm_sched_entity *entity) { + lockdep_assert_held(&entity->lock); + lockdep_assert_held(&rq->lock); + if (!list_empty(&entity->list)) return; - spin_lock(&rq->lock); - atomic_inc(rq->sched->score); list_add_tail(&entity->list, &rq->entities); - - spin_unlock(&rq->lock); } /** @@ -167,6 +221,8 @@ void drm_sched_rq_add_entity(struct drm_sched_rq *rq, void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, struct drm_sched_entity *entity) { + lockdep_assert_held(&entity->lock); + if (list_empty(&entity->list)) return; @@ -179,7 +235,7 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, rq->current_entity = NULL; if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) - drm_sched_rq_remove_fifo_locked(entity); + drm_sched_rq_remove_fifo_locked(entity, rq); spin_unlock(&rq->lock); } @@ -187,12 +243,18 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, /** * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run * + * @sched: the gpu scheduler * @rq: scheduler run queue to check. * - * Try to find a ready entity, returns NULL if none found. + * Try to find the next ready entity. + * + * Return an entity if one is found; return an error-pointer (!NULL) if an + * entity was ready, but the scheduler had insufficient credits to accommodate + * its job; return NULL, if no ready entity was found. */ static struct drm_sched_entity * -drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq) +drm_sched_rq_select_entity_rr(struct drm_gpu_scheduler *sched, + struct drm_sched_rq *rq) { struct drm_sched_entity *entity; @@ -201,23 +263,14 @@ drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq) entity = rq->current_entity; if (entity) { list_for_each_entry_continue(entity, &rq->entities, list) { - if (drm_sched_entity_is_ready(entity)) { - rq->current_entity = entity; - reinit_completion(&entity->entity_idle); - spin_unlock(&rq->lock); - return entity; - } + if (drm_sched_entity_is_ready(entity)) + goto found; } } list_for_each_entry(entity, &rq->entities, list) { - - if (drm_sched_entity_is_ready(entity)) { - rq->current_entity = entity; - reinit_completion(&entity->entity_idle); - spin_unlock(&rq->lock); - return entity; - } + if (drm_sched_entity_is_ready(entity)) + goto found; if (entity == rq->current_entity) break; @@ -226,17 +279,39 @@ drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq) spin_unlock(&rq->lock); return NULL; + +found: + if (!drm_sched_can_queue(sched, entity)) { + /* + * If scheduler cannot take more jobs signal the caller to not + * consider lower priority queues. + */ + entity = ERR_PTR(-ENOSPC); + } else { + rq->current_entity = entity; + reinit_completion(&entity->entity_idle); + } + + spin_unlock(&rq->lock); + + return entity; } /** * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run * + * @sched: the gpu scheduler * @rq: scheduler run queue to check. * - * Find oldest waiting ready entity, returns NULL if none found. + * Find oldest waiting ready entity. + * + * Return an entity if one is found; return an error-pointer (!NULL) if an + * entity was ready, but the scheduler had insufficient credits to accommodate + * its job; return NULL, if no ready entity was found. */ static struct drm_sched_entity * -drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq) +drm_sched_rq_select_entity_fifo(struct drm_gpu_scheduler *sched, + struct drm_sched_rq *rq) { struct rb_node *rb; @@ -246,7 +321,14 @@ drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq) entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node); if (drm_sched_entity_is_ready(entity)) { - rq->current_entity = entity; + /* If we can't queue yet, preserve the current entity in + * terms of fairness. + */ + if (!drm_sched_can_queue(sched, entity)) { + spin_unlock(&rq->lock); + return ERR_PTR(-ENOSPC); + } + reinit_completion(&entity->entity_idle); break; } @@ -257,25 +339,45 @@ drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq) } /** + * drm_sched_run_job_queue - enqueue run-job work + * @sched: scheduler instance + */ +static void drm_sched_run_job_queue(struct drm_gpu_scheduler *sched) +{ + if (!READ_ONCE(sched->pause_submit)) + queue_work(sched->submit_wq, &sched->work_run_job); +} + +/** + * drm_sched_run_free_queue - enqueue free-job work + * @sched: scheduler instance + */ +static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched) +{ + if (!READ_ONCE(sched->pause_submit)) + queue_work(sched->submit_wq, &sched->work_free_job); +} + +/** * drm_sched_job_done - complete a job * @s_job: pointer to the job which is done * - * Finish the job's fence and wake up the worker thread. + * Finish the job's fence and resubmit the work items. */ static void drm_sched_job_done(struct drm_sched_job *s_job, int result) { struct drm_sched_fence *s_fence = s_job->s_fence; struct drm_gpu_scheduler *sched = s_fence->sched; - atomic_dec(&sched->hw_rq_count); + atomic_sub(s_job->credits, &sched->credit_count); atomic_dec(sched->score); - trace_drm_sched_process_job(s_fence); + trace_drm_sched_job_done(s_fence); dma_fence_get(&s_fence->finished); drm_sched_fence_finished(s_fence, result); dma_fence_put(&s_fence->finished); - wake_up_interruptible(&sched->wake_up_worker); + drm_sched_run_free_queue(sched); } /** @@ -299,12 +401,37 @@ static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb) */ static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched) { + lockdep_assert_held(&sched->job_list_lock); + if (sched->timeout != MAX_SCHEDULE_TIMEOUT && !list_empty(&sched->pending_list)) - queue_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout); + mod_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout); +} + +static void drm_sched_start_timeout_unlocked(struct drm_gpu_scheduler *sched) +{ + spin_lock(&sched->job_list_lock); + drm_sched_start_timeout(sched); + spin_unlock(&sched->job_list_lock); } /** + * drm_sched_tdr_queue_imm: - immediately start job timeout handler + * + * @sched: scheduler for which the timeout handling should be started. + * + * Start timeout handling immediately for the named scheduler. + */ +void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched) +{ + spin_lock(&sched->job_list_lock); + sched->timeout = 0; + drm_sched_start_timeout(sched); + spin_unlock(&sched->job_list_lock); +} +EXPORT_SYMBOL(drm_sched_tdr_queue_imm); + +/** * drm_sched_fault - immediately start timeout handler * * @sched: scheduler where the timeout handling should be started. @@ -380,24 +507,51 @@ static void drm_sched_job_begin(struct drm_sched_job *s_job) spin_unlock(&sched->job_list_lock); } +/** + * drm_sched_job_reinsert_on_false_timeout - reinsert the job on a false timeout + * @sched: scheduler instance + * @job: job to be reinserted on the pending list + * + * In the case of a "false timeout" - when a timeout occurs but the GPU isn't + * hung and is making progress, the scheduler must reinsert the job back into + * @sched->pending_list. Otherwise, the job and its resources won't be freed + * through the &struct drm_sched_backend_ops.free_job callback. + * + * This function must be used in "false timeout" cases only. + */ +static void drm_sched_job_reinsert_on_false_timeout(struct drm_gpu_scheduler *sched, + struct drm_sched_job *job) +{ + spin_lock(&sched->job_list_lock); + list_add(&job->list, &sched->pending_list); + + /* After reinserting the job, the scheduler enqueues the free-job work + * again if ready. Otherwise, a signaled job could be added to the + * pending list, but never freed. + */ + drm_sched_run_free_queue(sched); + spin_unlock(&sched->job_list_lock); +} + static void drm_sched_job_timedout(struct work_struct *work) { struct drm_gpu_scheduler *sched; struct drm_sched_job *job; - enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_NOMINAL; + enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_RESET; sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work); - /* Protects against concurrent deletion in drm_sched_get_cleanup_job */ + /* Protects against concurrent deletion in drm_sched_get_finished_job */ spin_lock(&sched->job_list_lock); job = list_first_entry_or_null(&sched->pending_list, struct drm_sched_job, list); if (job) { /* - * Remove the bad job so it cannot be freed by concurrent - * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread - * is parked at which point it's safe. + * Remove the bad job so it cannot be freed by a concurrent + * &struct drm_sched_backend_ops.free_job. It will be + * reinserted after the scheduler's work items have been + * cancelled, at which point it's safe. */ list_del_init(&job->list); spin_unlock(&sched->job_list_lock); @@ -412,15 +566,15 @@ static void drm_sched_job_timedout(struct work_struct *work) job->sched->ops->free_job(job); sched->free_guilty = false; } + + if (status == DRM_GPU_SCHED_STAT_NO_HANG) + drm_sched_job_reinsert_on_false_timeout(sched, job); } else { spin_unlock(&sched->job_list_lock); } - if (status != DRM_GPU_SCHED_STAT_ENODEV) { - spin_lock(&sched->job_list_lock); - drm_sched_start_timeout(sched); - spin_unlock(&sched->job_list_lock); - } + if (status != DRM_GPU_SCHED_STAT_ENODEV) + drm_sched_start_timeout_unlocked(sched); } /** @@ -434,19 +588,26 @@ static void drm_sched_job_timedout(struct work_struct *work) * callers responsibility to release it manually if it's not part of the * pending list any more. * + * This function is typically used for reset recovery (see the docu of + * drm_sched_backend_ops.timedout_job() for details). Do not call it for + * scheduler teardown, i.e., before calling drm_sched_fini(). + * + * As it's only used for reset recovery, drivers must not call this function + * in their &struct drm_sched_backend_ops.timedout_job callback when they + * skip a reset using &enum drm_gpu_sched_stat.DRM_GPU_SCHED_STAT_NO_HANG. */ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) { struct drm_sched_job *s_job, *tmp; - kthread_park(sched->thread); + drm_sched_wqueue_stop(sched); /* * Reinsert back the bad job here - now it's safe as - * drm_sched_get_cleanup_job cannot race against us and release the + * drm_sched_get_finished_job() cannot race against us and release the * bad job at this point - we parked (waited for) any in progress - * (earlier) cleanups and drm_sched_get_cleanup_job will not be called - * now until the scheduler thread is unparked. + * (earlier) cleanups and drm_sched_get_finished_job() will not be + * called now until the scheduler's work items are submitted again. */ if (bad && bad->sched == sched) /* @@ -459,7 +620,8 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) * Iterate the job list from later to earlier one and either deactive * their HW callbacks or remove them from pending list if they already * signaled. - * This iteration is thread safe as sched thread is stopped. + * This iteration is thread safe as the scheduler's work items have been + * cancelled. */ list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list, list) { @@ -468,7 +630,7 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) &s_job->cb)) { dma_fence_put(s_job->s_fence->parent); s_job->s_fence->parent = NULL; - atomic_dec(&sched->hw_rq_count); + atomic_sub(s_job->credits, &sched->credit_count); } else { /* * remove job from pending_list. @@ -506,53 +668,49 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) */ cancel_delayed_work(&sched->work_tdr); } - EXPORT_SYMBOL(drm_sched_stop); /** * drm_sched_start - recover jobs after a reset * * @sched: scheduler instance - * @full_recovery: proceed with complete sched restart + * @errno: error to set on the pending fences + * + * This function is typically used for reset recovery (see the docu of + * drm_sched_backend_ops.timedout_job() for details). Do not call it for + * scheduler startup. The scheduler itself is fully operational after + * drm_sched_init() succeeded. * + * As it's only used for reset recovery, drivers must not call this function + * in their &struct drm_sched_backend_ops.timedout_job callback when they + * skip a reset using &enum drm_gpu_sched_stat.DRM_GPU_SCHED_STAT_NO_HANG. */ -void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery) +void drm_sched_start(struct drm_gpu_scheduler *sched, int errno) { struct drm_sched_job *s_job, *tmp; - int r; /* - * Locking the list is not required here as the sched thread is parked - * so no new jobs are being inserted or removed. Also concurrent - * GPU recovers can't run in parallel. + * Locking the list is not required here as the scheduler's work items + * are currently not running, so no new jobs are being inserted or + * removed. Also concurrent GPU recovers can't run in parallel. */ list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) { struct dma_fence *fence = s_job->s_fence->parent; - atomic_inc(&sched->hw_rq_count); + atomic_add(s_job->credits, &sched->credit_count); - if (!full_recovery) + if (!fence) { + drm_sched_job_done(s_job, errno ?: -ECANCELED); continue; + } - if (fence) { - r = dma_fence_add_callback(fence, &s_job->cb, - drm_sched_job_done_cb); - if (r == -ENOENT) - drm_sched_job_done(s_job, fence->error); - else if (r) - DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", - r); - } else - drm_sched_job_done(s_job, -ECANCELED); - } - - if (full_recovery) { - spin_lock(&sched->job_list_lock); - drm_sched_start_timeout(sched); - spin_unlock(&sched->job_list_lock); + if (dma_fence_add_callback(fence, &s_job->cb, + drm_sched_job_done_cb)) + drm_sched_job_done(s_job, fence->error ?: errno); } - kthread_unpark(sched->thread); + drm_sched_start_timeout_unlocked(sched); + drm_sched_wqueue_start(sched); } EXPORT_SYMBOL(drm_sched_start); @@ -613,7 +771,11 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs); * drm_sched_job_init - init a scheduler job * @job: scheduler job to init * @entity: scheduler entity to use + * @credits: the number of credits this job contributes to the schedulers + * credit limit * @owner: job owner for debugging + * @drm_client_id: &struct drm_file.client_id of the owner (used by trace + * events) * * Refer to drm_sched_entity_push_job() documentation * for locking considerations. @@ -621,6 +783,10 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs); * Drivers must make sure drm_sched_job_cleanup() if this function returns * successfully, even when @job is aborted before drm_sched_job_arm() is called. * + * Note that this function does not assign a valid value to each struct member + * of struct drm_sched_job. Take a look at that struct's documentation to see + * who sets which struct member with what lifetime. + * * WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware * has died, which can mean that there's no valid runqueue for a @entity. * This function returns -ENOENT in this case (which probably should be -EIO as @@ -630,13 +796,34 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs); */ int drm_sched_job_init(struct drm_sched_job *job, struct drm_sched_entity *entity, - void *owner) + u32 credits, void *owner, + uint64_t drm_client_id) { - if (!entity->rq) + if (!entity->rq) { + /* This will most likely be followed by missing frames + * or worse--a blank screen--leave a trail in the + * logs, so this can be debugged easier. + */ + dev_err(job->sched->dev, "%s: entity has no rq!\n", __func__); return -ENOENT; + } + + if (unlikely(!credits)) { + pr_err("*ERROR* %s: credits cannot be 0!\n", __func__); + return -EINVAL; + } + + /* + * We don't know for sure how the user has allocated. Thus, zero the + * struct so that unallowed (i.e., too early) usage of pointers that + * this function does not set is guaranteed to lead to a NULL pointer + * exception instead of UB. + */ + memset(job, 0, sizeof(*job)); job->entity = entity; - job->s_fence = drm_sched_fence_alloc(entity, owner); + job->credits = credits; + job->s_fence = drm_sched_fence_alloc(entity, owner, drm_client_id); if (!job->s_fence) return -ENOMEM; @@ -654,11 +841,15 @@ EXPORT_SYMBOL(drm_sched_job_init); * * This arms a scheduler job for execution. Specifically it initializes the * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv - * or other places that need to track the completion of this job. + * or other places that need to track the completion of this job. It also + * initializes sequence numbers, which are fundamental for fence ordering. * * Refer to drm_sched_entity_push_job() documentation for locking * considerations. * + * Once this function was called, you *must* submit @job with + * drm_sched_entity_push_job(). + * * This can only be called if drm_sched_job_init() succeeded. */ void drm_sched_job_arm(struct drm_sched_job *job) @@ -671,8 +862,7 @@ void drm_sched_job_arm(struct drm_sched_job *job) sched = entity->rq->sched; job->sched = sched; - job->s_priority = entity->rq - sched->sched_rq; - job->id = atomic64_inc_return(&sched->job_id_count); + job->s_priority = entity->priority; drm_sched_fence_init(job->s_fence, job->entity); } @@ -775,13 +965,14 @@ int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job, dma_resv_assert_held(resv); dma_resv_for_each_fence(&cursor, resv, usage, fence) { - /* Make sure to grab an additional ref on the added fence */ - dma_fence_get(fence); - ret = drm_sched_job_add_dependency(job, fence); - if (ret) { - dma_fence_put(fence); + /* + * As drm_sched_job_add_dependency always consumes the fence + * reference (even when it fails), and dma_resv_for_each_fence + * is not obtaining one, we need to grab one before calling. + */ + ret = drm_sched_job_add_dependency(job, dma_fence_get(fence)); + if (ret) return ret; - } } return 0; } @@ -812,6 +1003,29 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job, EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies); /** + * drm_sched_job_has_dependency - check whether fence is the job's dependency + * @job: scheduler job to check + * @fence: fence to look for + * + * Returns: + * True if @fence is found within the job's dependencies, or otherwise false. + */ +bool drm_sched_job_has_dependency(struct drm_sched_job *job, + struct dma_fence *fence) +{ + struct dma_fence *f; + unsigned long index; + + xa_for_each(&job->dependencies, index, f) { + if (f == fence) + return true; + } + + return false; +} +EXPORT_SYMBOL(drm_sched_job_has_dependency); + +/** * drm_sched_job_cleanup - clean up scheduler job resources * @job: scheduler job to clean up * @@ -820,9 +1034,12 @@ EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies); * Drivers should call this from their error unwind code if @job is aborted * before drm_sched_job_arm() is called. * - * After that point of no return @job is committed to be executed by the - * scheduler, and this function should be called from the - * &drm_sched_backend_ops.free_job callback. + * drm_sched_job_arm() is a point of no return since it initializes the fences + * and their sequence number etc. Once that function has been called, you *must* + * submit it with drm_sched_entity_push_job() and cannot simply abort it by + * calling drm_sched_job_cleanup(). + * + * This function should be called in the &drm_sched_backend_ops.free_job callback. */ void drm_sched_job_cleanup(struct drm_sched_job *job) { @@ -830,10 +1047,15 @@ void drm_sched_job_cleanup(struct drm_sched_job *job) unsigned long index; if (kref_read(&job->s_fence->finished.refcount)) { - /* drm_sched_job_arm() has been called */ + /* The job has been processed by the scheduler, i.e., + * drm_sched_job_arm() and drm_sched_entity_push_job() have + * been called. + */ dma_fence_put(&job->s_fence->finished); } else { - /* aborted job before committing to run it */ + /* The job was aborted before it has been committed to be run; + * notably, drm_sched_job_arm() has not been called. + */ drm_sched_fence_free(job->s_fence); } @@ -848,27 +1070,14 @@ void drm_sched_job_cleanup(struct drm_sched_job *job) EXPORT_SYMBOL(drm_sched_job_cleanup); /** - * drm_sched_can_queue -- Can we queue more to the hardware? - * @sched: scheduler instance - * - * Return true if we can push more jobs to the hw, otherwise false. - */ -static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched) -{ - return atomic_read(&sched->hw_rq_count) < - sched->hw_submission_limit; -} - -/** - * drm_sched_wakeup_if_can_queue - Wake up the scheduler + * drm_sched_wakeup - Wake up the scheduler if it is ready to queue * @sched: scheduler instance * * Wake up the scheduler if we can queue jobs. */ -void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched) +void drm_sched_wakeup(struct drm_gpu_scheduler *sched) { - if (drm_sched_can_queue(sched)) - wake_up_interruptible(&sched->wake_up_worker); + drm_sched_run_job_queue(sched); } /** @@ -876,7 +1085,11 @@ void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched) * * @sched: scheduler instance * - * Returns the entity to process or NULL if none are found. + * Return an entity to process or NULL if none are found. + * + * Note, that we break out of the for-loop when "entity" is non-null, which can + * also be an error-pointer--this assures we don't process lower priority + * run-queues. See comments in the respectively called functions. */ static struct drm_sched_entity * drm_sched_select_entity(struct drm_gpu_scheduler *sched) @@ -884,31 +1097,33 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched) struct drm_sched_entity *entity; int i; - if (!drm_sched_can_queue(sched)) - return NULL; - - /* Kernel run queue has higher priority than normal run queue*/ - for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { + /* Start with the highest priority. + */ + for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) { entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ? - drm_sched_rq_select_entity_fifo(&sched->sched_rq[i]) : - drm_sched_rq_select_entity_rr(&sched->sched_rq[i]); + drm_sched_rq_select_entity_fifo(sched, sched->sched_rq[i]) : + drm_sched_rq_select_entity_rr(sched, sched->sched_rq[i]); if (entity) break; } - return entity; + return IS_ERR(entity) ? NULL : entity; } /** - * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed + * drm_sched_get_finished_job - fetch the next finished job to be destroyed * * @sched: scheduler instance + * @have_more: are there more finished jobs on the list + * + * Informs the caller through @have_more whether there are more finished jobs + * besides the returned one. * * Returns the next finished job from the pending list (if there is one) * ready for it to be destroyed. */ static struct drm_sched_job * -drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) +drm_sched_get_finished_job(struct drm_gpu_scheduler *sched, bool *have_more) { struct drm_sched_job *job, *next; @@ -916,20 +1131,25 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) job = list_first_entry_or_null(&sched->pending_list, struct drm_sched_job, list); - if (job && dma_fence_is_signaled(&job->s_fence->finished)) { /* remove job from pending_list */ list_del_init(&job->list); /* cancel this job's TO timer */ cancel_delayed_work(&sched->work_tdr); - /* make the scheduled timestamp more accurate */ + + *have_more = false; next = list_first_entry_or_null(&sched->pending_list, typeof(*next), list); - if (next) { - next->s_fence->scheduled.timestamp = - job->s_fence->finished.timestamp; + /* make the scheduled timestamp more accurate */ + if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, + &next->s_fence->scheduled.flags)) + next->s_fence->scheduled.timestamp = + dma_fence_timestamp(&job->s_fence->finished); + + *have_more = dma_fence_is_signaled(&next->s_fence->finished); + /* start TO timer for next job */ drm_sched_start_timeout(sched); } @@ -979,178 +1199,264 @@ drm_sched_pick_best(struct drm_gpu_scheduler **sched_list, EXPORT_SYMBOL(drm_sched_pick_best); /** - * drm_sched_blocked - check if the scheduler is blocked - * - * @sched: scheduler instance + * drm_sched_free_job_work - worker to call free_job * - * Returns true if blocked, otherwise false. + * @w: free job work */ -static bool drm_sched_blocked(struct drm_gpu_scheduler *sched) +static void drm_sched_free_job_work(struct work_struct *w) { - if (kthread_should_park()) { - kthread_parkme(); - return true; + struct drm_gpu_scheduler *sched = + container_of(w, struct drm_gpu_scheduler, work_free_job); + struct drm_sched_job *job; + bool have_more; + + job = drm_sched_get_finished_job(sched, &have_more); + if (job) { + sched->ops->free_job(job); + if (have_more) + drm_sched_run_free_queue(sched); } - return false; + drm_sched_run_job_queue(sched); } /** - * drm_sched_main - main scheduler thread + * drm_sched_run_job_work - worker to call run_job * - * @param: scheduler instance - * - * Returns 0. + * @w: run job work */ -static int drm_sched_main(void *param) +static void drm_sched_run_job_work(struct work_struct *w) { - struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param; + struct drm_gpu_scheduler *sched = + container_of(w, struct drm_gpu_scheduler, work_run_job); + struct drm_sched_entity *entity; + struct dma_fence *fence; + struct drm_sched_fence *s_fence; + struct drm_sched_job *sched_job; int r; - sched_set_fifo_low(current); - - while (!kthread_should_stop()) { - struct drm_sched_entity *entity = NULL; - struct drm_sched_fence *s_fence; - struct drm_sched_job *sched_job; - struct dma_fence *fence; - struct drm_sched_job *cleanup_job = NULL; - - wait_event_interruptible(sched->wake_up_worker, - (cleanup_job = drm_sched_get_cleanup_job(sched)) || - (!drm_sched_blocked(sched) && - (entity = drm_sched_select_entity(sched))) || - kthread_should_stop()); - - if (cleanup_job) - sched->ops->free_job(cleanup_job); + /* Find entity with a ready job */ + entity = drm_sched_select_entity(sched); + if (!entity) { + /* + * Either no more work to do, or the next ready job needs more + * credits than the scheduler has currently available. + */ + return; + } - if (!entity) - continue; + sched_job = drm_sched_entity_pop_job(entity); + if (!sched_job) { + complete_all(&entity->entity_idle); + drm_sched_run_job_queue(sched); + return; + } - sched_job = drm_sched_entity_pop_job(entity); + s_fence = sched_job->s_fence; - if (!sched_job) { - complete_all(&entity->entity_idle); - continue; - } + atomic_add(sched_job->credits, &sched->credit_count); + drm_sched_job_begin(sched_job); - s_fence = sched_job->s_fence; + trace_drm_sched_job_run(sched_job, entity); + /* + * The run_job() callback must by definition return a fence whose + * refcount has been incremented for the scheduler already. + */ + fence = sched->ops->run_job(sched_job); + complete_all(&entity->entity_idle); + drm_sched_fence_scheduled(s_fence, fence); + + if (!IS_ERR_OR_NULL(fence)) { + r = dma_fence_add_callback(fence, &sched_job->cb, + drm_sched_job_done_cb); + if (r == -ENOENT) + drm_sched_job_done(sched_job, fence->error); + else if (r) + DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", r); - atomic_inc(&sched->hw_rq_count); - drm_sched_job_begin(sched_job); + dma_fence_put(fence); + } else { + drm_sched_job_done(sched_job, IS_ERR(fence) ? + PTR_ERR(fence) : 0); + } - trace_drm_run_job(sched_job, entity); - fence = sched->ops->run_job(sched_job); - complete_all(&entity->entity_idle); - drm_sched_fence_scheduled(s_fence, fence); + wake_up(&sched->job_scheduled); + drm_sched_run_job_queue(sched); +} - if (!IS_ERR_OR_NULL(fence)) { - /* Drop for original kref_init of the fence */ - dma_fence_put(fence); +static struct workqueue_struct *drm_sched_alloc_wq(const char *name) +{ +#if (IS_ENABLED(CONFIG_LOCKDEP)) + static struct lockdep_map map = { + .name = "drm_sched_lockdep_map" + }; - r = dma_fence_add_callback(fence, &sched_job->cb, - drm_sched_job_done_cb); - if (r == -ENOENT) - drm_sched_job_done(sched_job, fence->error); - else if (r) - DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", - r); - } else { - drm_sched_job_done(sched_job, IS_ERR(fence) ? - PTR_ERR(fence) : 0); - } + /* + * Avoid leaking a lockdep map on each drm sched creation and + * destruction by using a single lockdep map for all drm sched + * allocated submit_wq. + */ - wake_up(&sched->job_scheduled); - } - return 0; + return alloc_ordered_workqueue_lockdep_map(name, WQ_MEM_RECLAIM, &map); +#else + return alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); +#endif } /** * drm_sched_init - Init a gpu scheduler instance * * @sched: scheduler instance - * @ops: backend operations for this scheduler - * @hw_submission: number of hw submissions that can be in flight - * @hang_limit: number of times to allow a job to hang before dropping it - * @timeout: timeout value in jiffies for the scheduler - * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is - * used - * @score: optional score atomic shared with other schedulers - * @name: name used for debugging - * @dev: target &struct device + * @args: scheduler initialization arguments * * Return 0 on success, otherwise error code. */ -int drm_sched_init(struct drm_gpu_scheduler *sched, - const struct drm_sched_backend_ops *ops, - unsigned hw_submission, unsigned hang_limit, - long timeout, struct workqueue_struct *timeout_wq, - atomic_t *score, const char *name, struct device *dev) +int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_init_args *args) { - int i, ret; - sched->ops = ops; - sched->hw_submission_limit = hw_submission; - sched->name = name; - sched->timeout = timeout; - sched->timeout_wq = timeout_wq ? : system_wq; - sched->hang_limit = hang_limit; - sched->score = score ? score : &sched->_score; - sched->dev = dev; - for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++) - drm_sched_rq_init(sched, &sched->sched_rq[i]); - - init_waitqueue_head(&sched->wake_up_worker); + int i; + + sched->ops = args->ops; + sched->credit_limit = args->credit_limit; + sched->name = args->name; + sched->timeout = args->timeout; + sched->hang_limit = args->hang_limit; + sched->timeout_wq = args->timeout_wq ? args->timeout_wq : system_percpu_wq; + sched->score = args->score ? args->score : &sched->_score; + sched->dev = args->dev; + + if (args->num_rqs > DRM_SCHED_PRIORITY_COUNT) { + /* This is a gross violation--tell drivers what the problem is. + */ + dev_err(sched->dev, "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n", + __func__); + return -EINVAL; + } else if (sched->sched_rq) { + /* Not an error, but warn anyway so drivers can + * fine-tune their DRM calling order, and return all + * is good. + */ + dev_warn(sched->dev, "%s: scheduler already initialized!\n", __func__); + return 0; + } + + if (args->submit_wq) { + sched->submit_wq = args->submit_wq; + sched->own_submit_wq = false; + } else { + sched->submit_wq = drm_sched_alloc_wq(args->name); + if (!sched->submit_wq) + return -ENOMEM; + + sched->own_submit_wq = true; + } + + sched->sched_rq = kmalloc_array(args->num_rqs, sizeof(*sched->sched_rq), + GFP_KERNEL | __GFP_ZERO); + if (!sched->sched_rq) + goto Out_check_own; + sched->num_rqs = args->num_rqs; + for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) { + sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL); + if (!sched->sched_rq[i]) + goto Out_unroll; + drm_sched_rq_init(sched, sched->sched_rq[i]); + } + init_waitqueue_head(&sched->job_scheduled); INIT_LIST_HEAD(&sched->pending_list); spin_lock_init(&sched->job_list_lock); - atomic_set(&sched->hw_rq_count, 0); + atomic_set(&sched->credit_count, 0); INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout); + INIT_WORK(&sched->work_run_job, drm_sched_run_job_work); + INIT_WORK(&sched->work_free_job, drm_sched_free_job_work); atomic_set(&sched->_score, 0); atomic64_set(&sched->job_id_count, 0); - - /* Each scheduler will run on a seperate kernel thread */ - sched->thread = kthread_run(drm_sched_main, sched, sched->name); - if (IS_ERR(sched->thread)) { - ret = PTR_ERR(sched->thread); - sched->thread = NULL; - DRM_DEV_ERROR(sched->dev, "Failed to create scheduler for %s.\n", name); - return ret; - } + sched->pause_submit = false; sched->ready = true; return 0; +Out_unroll: + for (--i ; i >= DRM_SCHED_PRIORITY_KERNEL; i--) + kfree(sched->sched_rq[i]); + + kfree(sched->sched_rq); + sched->sched_rq = NULL; +Out_check_own: + if (sched->own_submit_wq) + destroy_workqueue(sched->submit_wq); + dev_err(sched->dev, "%s: Failed to setup GPU scheduler--out of memory\n", __func__); + return -ENOMEM; } EXPORT_SYMBOL(drm_sched_init); +static void drm_sched_cancel_remaining_jobs(struct drm_gpu_scheduler *sched) +{ + struct drm_sched_job *job, *tmp; + + /* All other accessors are stopped. No locking necessary. */ + list_for_each_entry_safe_reverse(job, tmp, &sched->pending_list, list) { + sched->ops->cancel_job(job); + list_del(&job->list); + sched->ops->free_job(job); + } +} + /** * drm_sched_fini - Destroy a gpu scheduler * * @sched: scheduler instance * * Tears down and cleans up the scheduler. + * + * This stops submission of new jobs to the hardware through &struct + * drm_sched_backend_ops.run_job. If &struct drm_sched_backend_ops.cancel_job + * is implemented, all jobs will be canceled through it and afterwards cleaned + * up through &struct drm_sched_backend_ops.free_job. If cancel_job is not + * implemented, memory could leak. */ void drm_sched_fini(struct drm_gpu_scheduler *sched) { struct drm_sched_entity *s_entity; int i; - if (sched->thread) - kthread_stop(sched->thread); + drm_sched_wqueue_stop(sched); - for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { - struct drm_sched_rq *rq = &sched->sched_rq[i]; + for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) { + struct drm_sched_rq *rq = sched->sched_rq[i]; spin_lock(&rq->lock); - list_for_each_entry(s_entity, &rq->entities, list) + list_for_each_entry(s_entity, &rq->entities, list) { /* * Prevents reinsertion and marks job_queue as idle, - * it will removed from rq in drm_sched_entity_fini + * it will be removed from the rq in drm_sched_entity_fini() * eventually + * + * FIXME: + * This lacks the proper spin_lock(&s_entity->lock) and + * is, therefore, a race condition. Most notably, it + * can race with drm_sched_entity_push_job(). The lock + * cannot be taken here, however, because this would + * lead to lock inversion -> deadlock. + * + * The best solution probably is to enforce the life + * time rule of all entities having to be torn down + * before their scheduler. Then, however, locking could + * be dropped alltogether from this function. + * + * For now, this remains a potential race in all + * drivers that keep entities alive for longer than + * the scheduler. + * + * The READ_ONCE() is there to make the lockless read + * (warning about the lockless write below) slightly + * less broken... */ + if (!READ_ONCE(s_entity->stopped)) + dev_warn(sched->dev, "Tearing down scheduler with active entities!\n"); s_entity->stopped = true; + } spin_unlock(&rq->lock); - + kfree(sched->sched_rq[i]); } /* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */ @@ -1159,7 +1465,18 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched) /* Confirm no work left behind accessing device structures */ cancel_delayed_work_sync(&sched->work_tdr); + /* Avoid memory leaks if supported by the driver. */ + if (sched->ops->cancel_job) + drm_sched_cancel_remaining_jobs(sched); + + if (sched->own_submit_wq) + destroy_workqueue(sched->submit_wq); sched->ready = false; + kfree(sched->sched_rq); + sched->sched_rq = NULL; + + if (!list_empty(&sched->pending_list)) + dev_warn(sched->dev, "Tearing down scheduler while jobs are pending!\n"); } EXPORT_SYMBOL(drm_sched_fini); @@ -1186,9 +1503,8 @@ void drm_sched_increase_karma(struct drm_sched_job *bad) if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) { atomic_inc(&bad->karma); - for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL; - i++) { - struct drm_sched_rq *rq = &sched->sched_rq[i]; + for (i = DRM_SCHED_PRIORITY_HIGH; i < sched->num_rqs; i++) { + struct drm_sched_rq *rq = sched->sched_rq[i]; spin_lock(&rq->lock); list_for_each_entry_safe(entity, tmp, &rq->entities, list) { @@ -1206,3 +1522,48 @@ void drm_sched_increase_karma(struct drm_sched_job *bad) } } EXPORT_SYMBOL(drm_sched_increase_karma); + +/** + * drm_sched_wqueue_ready - Is the scheduler ready for submission + * + * @sched: scheduler instance + * + * Returns true if submission is ready + */ +bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched) +{ + return sched->ready; +} +EXPORT_SYMBOL(drm_sched_wqueue_ready); + +/** + * drm_sched_wqueue_stop - stop scheduler submission + * @sched: scheduler instance + * + * Stops the scheduler from pulling new jobs from entities. It also stops + * freeing jobs automatically through drm_sched_backend_ops.free_job(). + */ +void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched) +{ + WRITE_ONCE(sched->pause_submit, true); + cancel_work_sync(&sched->work_run_job); + cancel_work_sync(&sched->work_free_job); +} +EXPORT_SYMBOL(drm_sched_wqueue_stop); + +/** + * drm_sched_wqueue_start - start scheduler submission + * @sched: scheduler instance + * + * Restarts the scheduler after drm_sched_wqueue_stop() has stopped it. + * + * This function is not necessary for 'conventional' startup. The scheduler is + * fully operational after drm_sched_init() succeeded. + */ +void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched) +{ + WRITE_ONCE(sched->pause_submit, false); + queue_work(sched->submit_wq, &sched->work_run_job); + queue_work(sched->submit_wq, &sched->work_free_job); +} +EXPORT_SYMBOL(drm_sched_wqueue_start); diff --git a/drivers/gpu/drm/scheduler/tests/Makefile b/drivers/gpu/drm/scheduler/tests/Makefile new file mode 100644 index 000000000000..5bf707bad373 --- /dev/null +++ b/drivers/gpu/drm/scheduler/tests/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 + +drm-sched-tests-y := \ + mock_scheduler.o \ + tests_basic.o + +obj-$(CONFIG_DRM_SCHED_KUNIT_TEST) += drm-sched-tests.o diff --git a/drivers/gpu/drm/scheduler/tests/mock_scheduler.c b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c new file mode 100644 index 000000000000..8e9ae7d980eb --- /dev/null +++ b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c @@ -0,0 +1,370 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Valve Corporation */ + +#include "sched_tests.h" + +/* + * Here we implement the mock "GPU" (or the scheduler backend) which is used by + * the DRM scheduler unit tests in order to exercise the core functionality. + * + * Test cases are implemented in a separate file. + */ + +/** + * drm_mock_sched_entity_new - Create a new mock scheduler entity + * + * @test: KUnit test owning the entity + * @priority: Scheduling priority + * @sched: Mock scheduler on which the entity can be scheduled + * + * Returns: New mock scheduler entity with allocation managed by the test + */ +struct drm_mock_sched_entity * +drm_mock_sched_entity_new(struct kunit *test, + enum drm_sched_priority priority, + struct drm_mock_scheduler *sched) +{ + struct drm_mock_sched_entity *entity; + struct drm_gpu_scheduler *drm_sched; + int ret; + + entity = kunit_kzalloc(test, sizeof(*entity), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, entity); + + drm_sched = &sched->base; + ret = drm_sched_entity_init(&entity->base, + priority, + &drm_sched, 1, + NULL); + KUNIT_ASSERT_EQ(test, ret, 0); + + entity->test = test; + + return entity; +} + +/** + * drm_mock_sched_entity_free - Destroys a mock scheduler entity + * + * @entity: Entity to destroy + * + * To be used from the test cases once done with the entity. + */ +void drm_mock_sched_entity_free(struct drm_mock_sched_entity *entity) +{ + drm_sched_entity_destroy(&entity->base); +} + +static void drm_mock_sched_job_complete(struct drm_mock_sched_job *job) +{ + struct drm_mock_scheduler *sched = + drm_sched_to_mock_sched(job->base.sched); + + lockdep_assert_held(&sched->lock); + + job->flags |= DRM_MOCK_SCHED_JOB_DONE; + list_del(&job->link); + dma_fence_signal_locked(&job->hw_fence); + complete(&job->done); +} + +static enum hrtimer_restart +drm_mock_sched_job_signal_timer(struct hrtimer *hrtimer) +{ + struct drm_mock_sched_job *job = + container_of(hrtimer, typeof(*job), timer); + struct drm_mock_scheduler *sched = + drm_sched_to_mock_sched(job->base.sched); + struct drm_mock_sched_job *next; + ktime_t now = ktime_get(); + unsigned long flags; + LIST_HEAD(signal); + + spin_lock_irqsave(&sched->lock, flags); + list_for_each_entry_safe(job, next, &sched->job_list, link) { + if (!job->duration_us) + break; + + if (ktime_before(now, job->finish_at)) + break; + + sched->hw_timeline.cur_seqno = job->hw_fence.seqno; + drm_mock_sched_job_complete(job); + } + spin_unlock_irqrestore(&sched->lock, flags); + + return HRTIMER_NORESTART; +} + +/** + * drm_mock_sched_job_new - Create a new mock scheduler job + * + * @test: KUnit test owning the job + * @entity: Scheduler entity of the job + * + * Returns: New mock scheduler job with allocation managed by the test + */ +struct drm_mock_sched_job * +drm_mock_sched_job_new(struct kunit *test, + struct drm_mock_sched_entity *entity) +{ + struct drm_mock_sched_job *job; + int ret; + + job = kunit_kzalloc(test, sizeof(*job), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, job); + + ret = drm_sched_job_init(&job->base, + &entity->base, + 1, + NULL, + 1); + KUNIT_ASSERT_EQ(test, ret, 0); + + job->test = test; + + init_completion(&job->done); + INIT_LIST_HEAD(&job->link); + hrtimer_setup(&job->timer, drm_mock_sched_job_signal_timer, + CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + + return job; +} + +static const char *drm_mock_sched_hw_fence_driver_name(struct dma_fence *fence) +{ + return "drm_mock_sched"; +} + +static const char * +drm_mock_sched_hw_fence_timeline_name(struct dma_fence *fence) +{ + struct drm_mock_sched_job *job = + container_of(fence, typeof(*job), hw_fence); + + return (const char *)job->base.sched->name; +} + +static void drm_mock_sched_hw_fence_release(struct dma_fence *fence) +{ + struct drm_mock_sched_job *job = + container_of(fence, typeof(*job), hw_fence); + + hrtimer_cancel(&job->timer); + + /* Containing job is freed by the kunit framework */ +} + +static const struct dma_fence_ops drm_mock_sched_hw_fence_ops = { + .get_driver_name = drm_mock_sched_hw_fence_driver_name, + .get_timeline_name = drm_mock_sched_hw_fence_timeline_name, + .release = drm_mock_sched_hw_fence_release, +}; + +static struct dma_fence *mock_sched_run_job(struct drm_sched_job *sched_job) +{ + struct drm_mock_scheduler *sched = + drm_sched_to_mock_sched(sched_job->sched); + struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job); + + dma_fence_init(&job->hw_fence, + &drm_mock_sched_hw_fence_ops, + &sched->lock, + sched->hw_timeline.context, + atomic_inc_return(&sched->hw_timeline.next_seqno)); + + dma_fence_get(&job->hw_fence); /* Reference for the job_list */ + + spin_lock_irq(&sched->lock); + if (job->duration_us) { + ktime_t prev_finish_at = 0; + + if (!list_empty(&sched->job_list)) { + struct drm_mock_sched_job *prev = + list_last_entry(&sched->job_list, typeof(*prev), + link); + + prev_finish_at = prev->finish_at; + } + + if (!prev_finish_at) + prev_finish_at = ktime_get(); + + job->finish_at = ktime_add_us(prev_finish_at, job->duration_us); + } + list_add_tail(&job->link, &sched->job_list); + if (job->finish_at) + hrtimer_start(&job->timer, job->finish_at, HRTIMER_MODE_ABS); + spin_unlock_irq(&sched->lock); + + return &job->hw_fence; +} + +/* + * Normally, drivers would take appropriate measures in this callback, such as + * killing the entity the faulty job is associated with, resetting the hardware + * and / or resubmitting non-faulty jobs. + * + * For the mock scheduler, there are no hardware rings to be resetted nor jobs + * to be resubmitted. Thus, this function merely ensures that + * a) timedout fences get signaled properly and removed from the pending list + * b) the mock scheduler framework gets informed about the timeout via a flag + * c) The drm_sched_job, not longer needed, gets freed + */ +static enum drm_gpu_sched_stat +mock_sched_timedout_job(struct drm_sched_job *sched_job) +{ + struct drm_mock_scheduler *sched = drm_sched_to_mock_sched(sched_job->sched); + struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job); + unsigned long flags; + + if (job->flags & DRM_MOCK_SCHED_JOB_DONT_RESET) { + job->flags |= DRM_MOCK_SCHED_JOB_RESET_SKIPPED; + return DRM_GPU_SCHED_STAT_NO_HANG; + } + + spin_lock_irqsave(&sched->lock, flags); + if (!dma_fence_is_signaled_locked(&job->hw_fence)) { + list_del(&job->link); + job->flags |= DRM_MOCK_SCHED_JOB_TIMEDOUT; + dma_fence_set_error(&job->hw_fence, -ETIMEDOUT); + dma_fence_signal_locked(&job->hw_fence); + } + spin_unlock_irqrestore(&sched->lock, flags); + + dma_fence_put(&job->hw_fence); + drm_sched_job_cleanup(sched_job); + /* Mock job itself is freed by the kunit framework. */ + + return DRM_GPU_SCHED_STAT_RESET; +} + +static void mock_sched_free_job(struct drm_sched_job *sched_job) +{ + struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job); + + dma_fence_put(&job->hw_fence); + drm_sched_job_cleanup(sched_job); + + /* Mock job itself is freed by the kunit framework. */ +} + +static void mock_sched_cancel_job(struct drm_sched_job *sched_job) +{ + struct drm_mock_scheduler *sched = drm_sched_to_mock_sched(sched_job->sched); + struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job); + unsigned long flags; + + hrtimer_cancel(&job->timer); + + spin_lock_irqsave(&sched->lock, flags); + if (!dma_fence_is_signaled_locked(&job->hw_fence)) { + list_del(&job->link); + dma_fence_set_error(&job->hw_fence, -ECANCELED); + dma_fence_signal_locked(&job->hw_fence); + } + spin_unlock_irqrestore(&sched->lock, flags); + + /* + * The GPU Scheduler will call drm_sched_backend_ops.free_job(), still. + * Mock job itself is freed by the kunit framework. + */ +} + +static const struct drm_sched_backend_ops drm_mock_scheduler_ops = { + .run_job = mock_sched_run_job, + .timedout_job = mock_sched_timedout_job, + .free_job = mock_sched_free_job, + .cancel_job = mock_sched_cancel_job, +}; + +/** + * drm_mock_sched_new - Create a new mock scheduler + * + * @test: KUnit test owning the job + * @timeout: Job timeout to set + * + * Returns: New mock scheduler with allocation managed by the test + */ +struct drm_mock_scheduler *drm_mock_sched_new(struct kunit *test, long timeout) +{ + struct drm_sched_init_args args = { + .ops = &drm_mock_scheduler_ops, + .num_rqs = DRM_SCHED_PRIORITY_COUNT, + .credit_limit = U32_MAX, + .hang_limit = 1, + .timeout = timeout, + .name = "drm-mock-scheduler", + }; + struct drm_mock_scheduler *sched; + int ret; + + sched = kunit_kzalloc(test, sizeof(*sched), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, sched); + + ret = drm_sched_init(&sched->base, &args); + KUNIT_ASSERT_EQ(test, ret, 0); + + sched->test = test; + sched->hw_timeline.context = dma_fence_context_alloc(1); + atomic_set(&sched->hw_timeline.next_seqno, 0); + INIT_LIST_HEAD(&sched->job_list); + spin_lock_init(&sched->lock); + + return sched; +} + +/** + * drm_mock_sched_fini - Destroys a mock scheduler + * + * @sched: Scheduler to destroy + * + * To be used from the test cases once done with the scheduler. + */ +void drm_mock_sched_fini(struct drm_mock_scheduler *sched) +{ + drm_sched_fini(&sched->base); +} + +/** + * drm_mock_sched_advance - Advances the mock scheduler timeline + * + * @sched: Scheduler timeline to advance + * @num: By how many jobs to advance + * + * Advancing the scheduler timeline by a number of seqnos will trigger + * signalling of the hardware fences and unlinking the jobs from the internal + * scheduler tracking. + * + * This can be used from test cases which want complete control of the simulated + * job execution timing. For example submitting one job with no set duration + * would never complete it before test cases advances the timeline by one. + */ +unsigned int drm_mock_sched_advance(struct drm_mock_scheduler *sched, + unsigned int num) +{ + struct drm_mock_sched_job *job, *next; + unsigned int found = 0; + unsigned long flags; + LIST_HEAD(signal); + + spin_lock_irqsave(&sched->lock, flags); + if (WARN_ON_ONCE(sched->hw_timeline.cur_seqno + num < + sched->hw_timeline.cur_seqno)) + goto unlock; + sched->hw_timeline.cur_seqno += num; + list_for_each_entry_safe(job, next, &sched->job_list, link) { + if (sched->hw_timeline.cur_seqno < job->hw_fence.seqno) + break; + + drm_mock_sched_job_complete(job); + found++; + } +unlock: + spin_unlock_irqrestore(&sched->lock, flags); + + return found; +} + +MODULE_DESCRIPTION("DRM mock scheduler and tests"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/scheduler/tests/sched_tests.h b/drivers/gpu/drm/scheduler/tests/sched_tests.h new file mode 100644 index 000000000000..553d45abd057 --- /dev/null +++ b/drivers/gpu/drm/scheduler/tests/sched_tests.h @@ -0,0 +1,224 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2025 Valve Corporation */ + +#ifndef _SCHED_TESTS_H_ +#define _SCHED_TESTS_H_ + +#include <kunit/test.h> +#include <linux/atomic.h> +#include <linux/completion.h> +#include <linux/dma-fence.h> +#include <linux/hrtimer.h> +#include <linux/ktime.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/types.h> + +#include <drm/gpu_scheduler.h> + +/* + * DOC: Mock DRM scheduler data structures + * + * drm_mock_* data structures are used to implement a mock "GPU". + * + * They subclass the core DRM scheduler objects and add their data on top, which + * enables tracking the submitted jobs and simulating their execution with the + * attributes as specified by the test case. + */ + +/** + * struct drm_mock_scheduler - implements a trivial mock GPU execution engine + * + * @base: DRM scheduler base class + * @test: Backpointer to owning the kunit test case + * @lock: Lock to protect the simulated @hw_timeline and @job_list + * @job_list: List of jobs submitted to the mock GPU + * @hw_timeline: Simulated hardware timeline has a @context, @next_seqno and + * @cur_seqno for implementing a struct dma_fence signaling the + * simulated job completion. + * + * Trivial mock GPU execution engine tracks submitted jobs and enables + * completing them strictly in submission order. + */ +struct drm_mock_scheduler { + struct drm_gpu_scheduler base; + + struct kunit *test; + + spinlock_t lock; + struct list_head job_list; + + struct { + u64 context; + atomic_t next_seqno; + unsigned int cur_seqno; + } hw_timeline; +}; + +/** + * struct drm_mock_sched_entity - implements a mock GPU sched entity + * + * @base: DRM scheduler entity base class + * @test: Backpointer to owning the kunit test case + * + * Mock GPU sched entity is used by the test cases to submit jobs to the mock + * scheduler. + */ +struct drm_mock_sched_entity { + struct drm_sched_entity base; + + struct kunit *test; +}; + +/** + * struct drm_mock_sched_job - implements a mock GPU job + * + * @base: DRM sched job base class + * @done: Completion signaling job completion. + * @flags: Flags designating job state. + * @link: List head element used by job tracking by the drm_mock_scheduler + * @timer: Timer used for simulating job execution duration + * @duration_us: Simulated job duration in micro seconds, or zero if in manual + * timeline advance mode + * @finish_at: Absolute time when the jobs with set duration will complete + * @lock: Lock used for @hw_fence + * @hw_fence: Fence returned to DRM scheduler as the hardware fence + * @test: Backpointer to owning the kunit test case + * + * Mock GPU sched job is used by the test cases to submit jobs to the mock + * scheduler. + */ +struct drm_mock_sched_job { + struct drm_sched_job base; + + struct completion done; + +#define DRM_MOCK_SCHED_JOB_DONE 0x1 +#define DRM_MOCK_SCHED_JOB_TIMEDOUT 0x2 +#define DRM_MOCK_SCHED_JOB_DONT_RESET 0x4 +#define DRM_MOCK_SCHED_JOB_RESET_SKIPPED 0x8 + unsigned long flags; + + struct list_head link; + struct hrtimer timer; + + unsigned int duration_us; + ktime_t finish_at; + + struct dma_fence hw_fence; + + struct kunit *test; +}; + +static inline struct drm_mock_scheduler * +drm_sched_to_mock_sched(struct drm_gpu_scheduler *sched) +{ + return container_of(sched, struct drm_mock_scheduler, base); +}; + +static inline struct drm_mock_sched_entity * +drm_sched_entity_to_mock_entity(struct drm_sched_entity *sched_entity) +{ + return container_of(sched_entity, struct drm_mock_sched_entity, base); +}; + +static inline struct drm_mock_sched_job * +drm_sched_job_to_mock_job(struct drm_sched_job *sched_job) +{ + return container_of(sched_job, struct drm_mock_sched_job, base); +}; + +struct drm_mock_scheduler *drm_mock_sched_new(struct kunit *test, + long timeout); +void drm_mock_sched_fini(struct drm_mock_scheduler *sched); +unsigned int drm_mock_sched_advance(struct drm_mock_scheduler *sched, + unsigned int num); + +struct drm_mock_sched_entity * +drm_mock_sched_entity_new(struct kunit *test, + enum drm_sched_priority priority, + struct drm_mock_scheduler *sched); +void drm_mock_sched_entity_free(struct drm_mock_sched_entity *entity); + +struct drm_mock_sched_job * +drm_mock_sched_job_new(struct kunit *test, + struct drm_mock_sched_entity *entity); + +/** + * drm_mock_sched_job_submit - Arm and submit a job in one go + * + * @job: Job to arm and submit + */ +static inline void drm_mock_sched_job_submit(struct drm_mock_sched_job *job) +{ + drm_sched_job_arm(&job->base); + drm_sched_entity_push_job(&job->base); +} + +/** + * drm_mock_sched_job_set_duration_us - Set a job duration + * + * @job: Job to set the duration for + * @duration_us: Duration in micro seconds + * + * Jobs with duration set will be automatically completed by the mock scheduler + * as the timeline progresses, unless a job without a set duration is + * encountered in the timelime in which case calling drm_mock_sched_advance() + * will be required to bump the timeline. + */ +static inline void +drm_mock_sched_job_set_duration_us(struct drm_mock_sched_job *job, + unsigned int duration_us) +{ + job->duration_us = duration_us; +} + +/** + * drm_mock_sched_job_is_finished - Check if a job is finished + * + * @job: Job to check + * + * Returns: true if finished + */ +static inline bool +drm_mock_sched_job_is_finished(struct drm_mock_sched_job *job) +{ + return job->flags & DRM_MOCK_SCHED_JOB_DONE; +} + +/** + * drm_mock_sched_job_wait_finished - Wait until a job is finished + * + * @job: Job to wait for + * @timeout: Wait time in jiffies + * + * Returns: true if finished within the timeout provided, otherwise false + */ +static inline bool +drm_mock_sched_job_wait_finished(struct drm_mock_sched_job *job, long timeout) +{ + if (job->flags & DRM_MOCK_SCHED_JOB_DONE) + return true; + + return wait_for_completion_timeout(&job->done, timeout) != 0; +} + +/** + * drm_mock_sched_job_wait_scheduled - Wait until a job is scheduled + * + * @job: Job to wait for + * @timeout: Wait time in jiffies + * + * Returns: true if scheduled within the timeout provided, otherwise false + */ +static inline bool +drm_mock_sched_job_wait_scheduled(struct drm_mock_sched_job *job, long timeout) +{ + KUNIT_ASSERT_EQ(job->test, job->flags & DRM_MOCK_SCHED_JOB_DONE, 0); + + return dma_fence_wait_timeout(&job->base.s_fence->scheduled, + false, + timeout) != 0; +} + +#endif diff --git a/drivers/gpu/drm/scheduler/tests/tests_basic.c b/drivers/gpu/drm/scheduler/tests/tests_basic.c new file mode 100644 index 000000000000..82a41a456b0a --- /dev/null +++ b/drivers/gpu/drm/scheduler/tests/tests_basic.c @@ -0,0 +1,563 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Valve Corporation */ + +#include <linux/delay.h> + +#include "sched_tests.h" + +#define MOCK_TIMEOUT (HZ / 5) + +/* + * DRM scheduler basic tests should check the basic functional correctness of + * the scheduler, including some very light smoke testing. More targeted tests, + * for example focusing on testing specific bugs and other more complicated test + * scenarios, should be implemented in separate source units. + */ + +static int drm_sched_basic_init(struct kunit *test) +{ + test->priv = drm_mock_sched_new(test, MAX_SCHEDULE_TIMEOUT); + + return 0; +} + +static void drm_sched_basic_exit(struct kunit *test) +{ + struct drm_mock_scheduler *sched = test->priv; + + drm_mock_sched_fini(sched); +} + +static int drm_sched_timeout_init(struct kunit *test) +{ + test->priv = drm_mock_sched_new(test, MOCK_TIMEOUT); + + return 0; +} + +static void drm_sched_basic_submit(struct kunit *test) +{ + struct drm_mock_scheduler *sched = test->priv; + struct drm_mock_sched_entity *entity; + struct drm_mock_sched_job *job; + unsigned int i; + bool done; + + /* + * Submit one job to the scheduler and verify that it gets scheduled + * and completed only when the mock hw backend processes it. + */ + + entity = drm_mock_sched_entity_new(test, + DRM_SCHED_PRIORITY_NORMAL, + sched); + job = drm_mock_sched_job_new(test, entity); + + drm_mock_sched_job_submit(job); + + done = drm_mock_sched_job_wait_scheduled(job, HZ); + KUNIT_ASSERT_TRUE(test, done); + + done = drm_mock_sched_job_wait_finished(job, HZ / 2); + KUNIT_ASSERT_FALSE(test, done); + + i = drm_mock_sched_advance(sched, 1); + KUNIT_ASSERT_EQ(test, i, 1); + + done = drm_mock_sched_job_wait_finished(job, HZ); + KUNIT_ASSERT_TRUE(test, done); + + drm_mock_sched_entity_free(entity); +} + +struct drm_sched_basic_params { + const char *description; + unsigned int queue_depth; + unsigned int num_entities; + unsigned int job_us; + bool dep_chain; +}; + +static const struct drm_sched_basic_params drm_sched_basic_cases[] = { + { + .description = "A queue of jobs in a single entity", + .queue_depth = 100, + .job_us = 1000, + .num_entities = 1, + }, + { + .description = "A chain of dependent jobs across multiple entities", + .queue_depth = 100, + .job_us = 1000, + .num_entities = 1, + .dep_chain = true, + }, + { + .description = "Multiple independent job queues", + .queue_depth = 100, + .job_us = 1000, + .num_entities = 4, + }, + { + .description = "Multiple inter-dependent job queues", + .queue_depth = 100, + .job_us = 1000, + .num_entities = 4, + .dep_chain = true, + }, +}; + +static void +drm_sched_basic_desc(const struct drm_sched_basic_params *params, char *desc) +{ + strscpy(desc, params->description, KUNIT_PARAM_DESC_SIZE); +} + +KUNIT_ARRAY_PARAM(drm_sched_basic, drm_sched_basic_cases, drm_sched_basic_desc); + +static void drm_sched_basic_test(struct kunit *test) +{ + const struct drm_sched_basic_params *params = test->param_value; + struct drm_mock_scheduler *sched = test->priv; + struct drm_mock_sched_job *job, *prev = NULL; + struct drm_mock_sched_entity **entity; + unsigned int i, cur_ent = 0; + bool done; + + entity = kunit_kcalloc(test, params->num_entities, sizeof(*entity), + GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, entity); + + for (i = 0; i < params->num_entities; i++) + entity[i] = drm_mock_sched_entity_new(test, + DRM_SCHED_PRIORITY_NORMAL, + sched); + + for (i = 0; i < params->queue_depth; i++) { + job = drm_mock_sched_job_new(test, entity[cur_ent++]); + cur_ent %= params->num_entities; + drm_mock_sched_job_set_duration_us(job, params->job_us); + if (params->dep_chain && prev) + drm_sched_job_add_dependency(&job->base, + dma_fence_get(&prev->base.s_fence->finished)); + drm_mock_sched_job_submit(job); + prev = job; + } + + done = drm_mock_sched_job_wait_finished(job, HZ); + KUNIT_ASSERT_TRUE(test, done); + + for (i = 0; i < params->num_entities; i++) + drm_mock_sched_entity_free(entity[i]); +} + +static void drm_sched_basic_entity_cleanup(struct kunit *test) +{ + struct drm_mock_sched_job *job, *mid, *prev = NULL; + struct drm_mock_scheduler *sched = test->priv; + struct drm_mock_sched_entity *entity[4]; + const unsigned int qd = 100; + unsigned int i, cur_ent = 0; + bool done; + + /* + * Submit a queue of jobs across different entities with an explicit + * chain of dependencies between them and trigger entity cleanup while + * the queue is still being processed. + */ + + for (i = 0; i < ARRAY_SIZE(entity); i++) + entity[i] = drm_mock_sched_entity_new(test, + DRM_SCHED_PRIORITY_NORMAL, + sched); + + for (i = 0; i < qd; i++) { + job = drm_mock_sched_job_new(test, entity[cur_ent++]); + cur_ent %= ARRAY_SIZE(entity); + drm_mock_sched_job_set_duration_us(job, 1000); + if (prev) + drm_sched_job_add_dependency(&job->base, + dma_fence_get(&prev->base.s_fence->finished)); + drm_mock_sched_job_submit(job); + if (i == qd / 2) + mid = job; + prev = job; + } + + done = drm_mock_sched_job_wait_finished(mid, HZ); + KUNIT_ASSERT_TRUE(test, done); + + /* Exit with half of the queue still pending to be executed. */ + for (i = 0; i < ARRAY_SIZE(entity); i++) + drm_mock_sched_entity_free(entity[i]); +} + +static struct kunit_case drm_sched_basic_tests[] = { + KUNIT_CASE(drm_sched_basic_submit), + KUNIT_CASE_PARAM(drm_sched_basic_test, drm_sched_basic_gen_params), + KUNIT_CASE(drm_sched_basic_entity_cleanup), + {} +}; + +static struct kunit_suite drm_sched_basic = { + .name = "drm_sched_basic_tests", + .init = drm_sched_basic_init, + .exit = drm_sched_basic_exit, + .test_cases = drm_sched_basic_tests, +}; + +static void drm_sched_basic_cancel(struct kunit *test) +{ + struct drm_mock_sched_entity *entity; + struct drm_mock_scheduler *sched; + struct drm_mock_sched_job *job; + bool done; + + /* + * Check that drm_sched_fini() uses the cancel_job() callback to cancel + * jobs that are still pending. + */ + + sched = drm_mock_sched_new(test, MAX_SCHEDULE_TIMEOUT); + entity = drm_mock_sched_entity_new(test, DRM_SCHED_PRIORITY_NORMAL, + sched); + + job = drm_mock_sched_job_new(test, entity); + + drm_mock_sched_job_submit(job); + + done = drm_mock_sched_job_wait_scheduled(job, HZ); + KUNIT_ASSERT_TRUE(test, done); + + drm_mock_sched_entity_free(entity); + drm_mock_sched_fini(sched); + + KUNIT_ASSERT_EQ(test, job->hw_fence.error, -ECANCELED); +} + +static struct kunit_case drm_sched_cancel_tests[] = { + KUNIT_CASE(drm_sched_basic_cancel), + {} +}; + +static struct kunit_suite drm_sched_cancel = { + .name = "drm_sched_basic_cancel_tests", + .init = drm_sched_basic_init, + .exit = drm_sched_basic_exit, + .test_cases = drm_sched_cancel_tests, +}; + +static void drm_sched_basic_timeout(struct kunit *test) +{ + struct drm_mock_scheduler *sched = test->priv; + struct drm_mock_sched_entity *entity; + struct drm_mock_sched_job *job; + bool done; + + /* + * Submit a single job against a scheduler with the timeout configured + * and verify that the timeout handling will run if the backend fails + * to complete it in time. + */ + + entity = drm_mock_sched_entity_new(test, + DRM_SCHED_PRIORITY_NORMAL, + sched); + job = drm_mock_sched_job_new(test, entity); + + drm_mock_sched_job_submit(job); + + done = drm_mock_sched_job_wait_scheduled(job, HZ); + KUNIT_ASSERT_TRUE(test, done); + + done = drm_mock_sched_job_wait_finished(job, MOCK_TIMEOUT / 2); + KUNIT_ASSERT_FALSE(test, done); + + KUNIT_ASSERT_EQ(test, + job->flags & DRM_MOCK_SCHED_JOB_TIMEDOUT, + 0); + + done = drm_mock_sched_job_wait_finished(job, MOCK_TIMEOUT); + KUNIT_ASSERT_FALSE(test, done); + + KUNIT_ASSERT_EQ(test, + job->flags & DRM_MOCK_SCHED_JOB_TIMEDOUT, + DRM_MOCK_SCHED_JOB_TIMEDOUT); + + drm_mock_sched_entity_free(entity); +} + +static void drm_sched_skip_reset(struct kunit *test) +{ + struct drm_mock_scheduler *sched = test->priv; + struct drm_mock_sched_entity *entity; + struct drm_mock_sched_job *job; + unsigned int i; + bool done; + + /* + * Submit a single job against a scheduler with the timeout configured + * and verify that if the job is still running, the timeout handler + * will skip the reset and allow the job to complete. + */ + + entity = drm_mock_sched_entity_new(test, + DRM_SCHED_PRIORITY_NORMAL, + sched); + job = drm_mock_sched_job_new(test, entity); + + job->flags = DRM_MOCK_SCHED_JOB_DONT_RESET; + + drm_mock_sched_job_submit(job); + + done = drm_mock_sched_job_wait_scheduled(job, HZ); + KUNIT_ASSERT_TRUE(test, done); + + done = drm_mock_sched_job_wait_finished(job, 2 * MOCK_TIMEOUT); + KUNIT_ASSERT_FALSE(test, done); + + KUNIT_ASSERT_EQ(test, + job->flags & DRM_MOCK_SCHED_JOB_RESET_SKIPPED, + DRM_MOCK_SCHED_JOB_RESET_SKIPPED); + + i = drm_mock_sched_advance(sched, 1); + KUNIT_ASSERT_EQ(test, i, 1); + + done = drm_mock_sched_job_wait_finished(job, HZ); + KUNIT_ASSERT_TRUE(test, done); + + drm_mock_sched_entity_free(entity); +} + +static struct kunit_case drm_sched_timeout_tests[] = { + KUNIT_CASE(drm_sched_basic_timeout), + KUNIT_CASE(drm_sched_skip_reset), + {} +}; + +static struct kunit_suite drm_sched_timeout = { + .name = "drm_sched_basic_timeout_tests", + .init = drm_sched_timeout_init, + .exit = drm_sched_basic_exit, + .test_cases = drm_sched_timeout_tests, +}; + +static void drm_sched_priorities(struct kunit *test) +{ + struct drm_mock_sched_entity *entity[DRM_SCHED_PRIORITY_COUNT]; + struct drm_mock_scheduler *sched = test->priv; + struct drm_mock_sched_job *job; + const unsigned int qd = 100; + unsigned int i, cur_ent = 0; + enum drm_sched_priority p; + bool done; + + /* + * Submit a bunch of jobs against entities configured with different + * priorities. + */ + + BUILD_BUG_ON(DRM_SCHED_PRIORITY_KERNEL > DRM_SCHED_PRIORITY_LOW); + BUILD_BUG_ON(ARRAY_SIZE(entity) != DRM_SCHED_PRIORITY_COUNT); + + for (p = DRM_SCHED_PRIORITY_KERNEL; p <= DRM_SCHED_PRIORITY_LOW; p++) + entity[p] = drm_mock_sched_entity_new(test, p, sched); + + for (i = 0; i < qd; i++) { + job = drm_mock_sched_job_new(test, entity[cur_ent++]); + cur_ent %= ARRAY_SIZE(entity); + drm_mock_sched_job_set_duration_us(job, 1000); + drm_mock_sched_job_submit(job); + } + + done = drm_mock_sched_job_wait_finished(job, HZ); + KUNIT_ASSERT_TRUE(test, done); + + for (i = 0; i < ARRAY_SIZE(entity); i++) + drm_mock_sched_entity_free(entity[i]); +} + +static void drm_sched_change_priority(struct kunit *test) +{ + struct drm_mock_sched_entity *entity[DRM_SCHED_PRIORITY_COUNT]; + struct drm_mock_scheduler *sched = test->priv; + struct drm_mock_sched_job *job; + const unsigned int qd = 1000; + unsigned int i, cur_ent = 0; + enum drm_sched_priority p; + + /* + * Submit a bunch of jobs against entities configured with different + * priorities and while waiting for them to complete, periodically keep + * changing their priorities. + * + * We set up the queue-depth (qd) and job duration so the priority + * changing loop has some time to interact with submissions to the + * backend and job completions as they progress. + */ + + for (p = DRM_SCHED_PRIORITY_KERNEL; p <= DRM_SCHED_PRIORITY_LOW; p++) + entity[p] = drm_mock_sched_entity_new(test, p, sched); + + for (i = 0; i < qd; i++) { + job = drm_mock_sched_job_new(test, entity[cur_ent++]); + cur_ent %= ARRAY_SIZE(entity); + drm_mock_sched_job_set_duration_us(job, 1000); + drm_mock_sched_job_submit(job); + } + + do { + drm_sched_entity_set_priority(&entity[cur_ent]->base, + (entity[cur_ent]->base.priority + 1) % + DRM_SCHED_PRIORITY_COUNT); + cur_ent++; + cur_ent %= ARRAY_SIZE(entity); + usleep_range(200, 500); + } while (!drm_mock_sched_job_is_finished(job)); + + for (i = 0; i < ARRAY_SIZE(entity); i++) + drm_mock_sched_entity_free(entity[i]); +} + +static struct kunit_case drm_sched_priority_tests[] = { + KUNIT_CASE(drm_sched_priorities), + KUNIT_CASE(drm_sched_change_priority), + {} +}; + +static struct kunit_suite drm_sched_priority = { + .name = "drm_sched_basic_priority_tests", + .init = drm_sched_basic_init, + .exit = drm_sched_basic_exit, + .test_cases = drm_sched_priority_tests, +}; + +static void drm_sched_test_modify_sched(struct kunit *test) +{ + unsigned int i, cur_ent = 0, cur_sched = 0; + struct drm_mock_sched_entity *entity[13]; + struct drm_mock_scheduler *sched[3]; + struct drm_mock_sched_job *job; + const unsigned int qd = 1000; + + /* + * Submit a bunch of jobs against entities configured with different + * schedulers and while waiting for them to complete, periodically keep + * changing schedulers associated with each entity. + * + * We set up the queue-depth (qd) and job duration so the sched modify + * loop has some time to interact with submissions to the backend and + * job completions as they progress. + * + * For the number of schedulers and entities we use primes in order to + * perturb the entity->sched assignments with less of a regular pattern. + */ + + for (i = 0; i < ARRAY_SIZE(sched); i++) + sched[i] = drm_mock_sched_new(test, MAX_SCHEDULE_TIMEOUT); + + for (i = 0; i < ARRAY_SIZE(entity); i++) + entity[i] = drm_mock_sched_entity_new(test, + DRM_SCHED_PRIORITY_NORMAL, + sched[i % ARRAY_SIZE(sched)]); + + for (i = 0; i < qd; i++) { + job = drm_mock_sched_job_new(test, entity[cur_ent++]); + cur_ent %= ARRAY_SIZE(entity); + drm_mock_sched_job_set_duration_us(job, 1000); + drm_mock_sched_job_submit(job); + } + + do { + struct drm_gpu_scheduler *modify; + + usleep_range(200, 500); + cur_ent++; + cur_ent %= ARRAY_SIZE(entity); + cur_sched++; + cur_sched %= ARRAY_SIZE(sched); + modify = &sched[cur_sched]->base; + drm_sched_entity_modify_sched(&entity[cur_ent]->base, &modify, + 1); + } while (!drm_mock_sched_job_is_finished(job)); + + for (i = 0; i < ARRAY_SIZE(entity); i++) + drm_mock_sched_entity_free(entity[i]); + + for (i = 0; i < ARRAY_SIZE(sched); i++) + drm_mock_sched_fini(sched[i]); +} + +static struct kunit_case drm_sched_modify_sched_tests[] = { + KUNIT_CASE(drm_sched_test_modify_sched), + {} +}; + +static struct kunit_suite drm_sched_modify_sched = { + .name = "drm_sched_basic_modify_sched_tests", + .test_cases = drm_sched_modify_sched_tests, +}; + +static void drm_sched_test_credits(struct kunit *test) +{ + struct drm_mock_sched_entity *entity; + struct drm_mock_scheduler *sched; + struct drm_mock_sched_job *job[2]; + bool done; + int i; + + /* + * Check that the configured credit limit is respected. + */ + + sched = drm_mock_sched_new(test, MAX_SCHEDULE_TIMEOUT); + sched->base.credit_limit = 1; + + entity = drm_mock_sched_entity_new(test, + DRM_SCHED_PRIORITY_NORMAL, + sched); + + job[0] = drm_mock_sched_job_new(test, entity); + job[1] = drm_mock_sched_job_new(test, entity); + + drm_mock_sched_job_submit(job[0]); + drm_mock_sched_job_submit(job[1]); + + done = drm_mock_sched_job_wait_scheduled(job[0], HZ); + KUNIT_ASSERT_TRUE(test, done); + + done = drm_mock_sched_job_wait_scheduled(job[1], HZ); + KUNIT_ASSERT_FALSE(test, done); + + i = drm_mock_sched_advance(sched, 1); + KUNIT_ASSERT_EQ(test, i, 1); + + done = drm_mock_sched_job_wait_scheduled(job[1], HZ); + KUNIT_ASSERT_TRUE(test, done); + + i = drm_mock_sched_advance(sched, 1); + KUNIT_ASSERT_EQ(test, i, 1); + + done = drm_mock_sched_job_wait_finished(job[1], HZ); + KUNIT_ASSERT_TRUE(test, done); + + drm_mock_sched_entity_free(entity); + drm_mock_sched_fini(sched); +} + +static struct kunit_case drm_sched_credits_tests[] = { + KUNIT_CASE(drm_sched_test_credits), + {} +}; + +static struct kunit_suite drm_sched_credits = { + .name = "drm_sched_basic_credits_tests", + .test_cases = drm_sched_credits_tests, +}; + +kunit_test_suites(&drm_sched_basic, + &drm_sched_timeout, + &drm_sched_cancel, + &drm_sched_priority, + &drm_sched_modify_sched, + &drm_sched_credits); |
