summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/scheduler/sched_entity.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/scheduler/sched_entity.c')
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c111
1 files changed, 65 insertions, 46 deletions
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 3c4f5a392b06..e671aa241720 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -28,10 +28,9 @@
#include <drm/drm_print.h>
#include <drm/gpu_scheduler.h>
-#include "gpu_scheduler_trace.h"
+#include "sched_internal.h"
-#define to_drm_sched_job(sched_job) \
- container_of((sched_job), struct drm_sched_job, queue_node)
+#include "gpu_scheduler_trace.h"
/**
* drm_sched_entity_init - Init a context entity used by scheduler when
@@ -51,7 +50,7 @@
* drm_sched_entity_set_priority(). For changing the set of schedulers
* @sched_list at runtime see drm_sched_entity_modify_sched().
*
- * An entity is cleaned up by callind drm_sched_entity_fini(). See also
+ * An entity is cleaned up by calling drm_sched_entity_fini(). See also
* drm_sched_entity_destroy().
*
* Returns 0 on success or a negative error code on failure.
@@ -71,13 +70,19 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
entity->guilty = guilty;
entity->num_sched_list = num_sched_list;
entity->priority = priority;
+ /*
+ * It's perfectly valid to initialize an entity without having a valid
+ * scheduler attached. It's just not valid to use the scheduler before it
+ * is initialized itself.
+ */
entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
RCU_INIT_POINTER(entity->last_scheduled, NULL);
RB_CLEAR_NODE(&entity->rb_tree_node);
- if (!sched_list[0]->sched_rq) {
- /* Warn drivers not to do this and to fix their DRM
- * calling order.
+ if (num_sched_list && !sched_list[0]->sched_rq) {
+ /* Since every entry covered by num_sched_list
+ * should be non-NULL and therefore we warn drivers
+ * not to do this and to fix their DRM calling order.
*/
pr_warn("%s: called with uninitialized scheduler\n", __func__);
} else if (num_sched_list) {
@@ -86,7 +91,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
* the lowest priority available.
*/
if (entity->priority >= sched_list[0]->num_rqs) {
- drm_err(sched_list[0], "entity with out-of-bounds priority:%u num_rqs:%u\n",
+ dev_err(sched_list[0]->dev, "entity has out-of-bounds priority: %u. num_rqs: %u\n",
entity->priority, sched_list[0]->num_rqs);
entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1,
(s32) DRM_SCHED_PRIORITY_KERNEL);
@@ -99,7 +104,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
/* We start in an idle state. */
complete_all(&entity->entity_idle);
- spin_lock_init(&entity->rq_lock);
+ spin_lock_init(&entity->lock);
spsc_queue_init(&entity->job_queue);
atomic_set(&entity->fence_seq, 0);
@@ -127,8 +132,10 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
{
WARN_ON(!num_sched_list || !sched_list);
+ spin_lock(&entity->lock);
entity->sched_list = sched_list;
entity->num_sched_list = num_sched_list;
+ spin_unlock(&entity->lock);
}
EXPORT_SYMBOL(drm_sched_entity_modify_sched);
@@ -144,18 +151,6 @@ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
return false;
}
-/* Return true if entity could provide a job. */
-bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
-{
- if (spsc_queue_peek(&entity->job_queue) == NULL)
- return false;
-
- if (READ_ONCE(entity->dependency))
- return false;
-
- return true;
-}
-
/**
* drm_sched_entity_error - return error of last scheduled job
* @entity: scheduler entity to check
@@ -181,6 +176,7 @@ static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
{
struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
+ drm_sched_fence_scheduled(job->s_fence, NULL);
drm_sched_fence_finished(job->s_fence, -ESRCH);
WARN_ON(job->s_fence->parent);
job->sched->ops->free_job(job);
@@ -236,10 +232,10 @@ static void drm_sched_entity_kill(struct drm_sched_entity *entity)
if (!entity->rq)
return;
- spin_lock(&entity->rq_lock);
+ spin_lock(&entity->lock);
entity->stopped = true;
drm_sched_rq_remove_entity(entity->rq, entity);
- spin_unlock(&entity->rq_lock);
+ spin_unlock(&entity->lock);
/* Make sure this entity is not used by the scheduler at the moment */
wait_for_completion(&entity->entity_idle);
@@ -247,13 +243,20 @@ static void drm_sched_entity_kill(struct drm_sched_entity *entity)
/* The entity is guaranteed to not be used by the scheduler */
prev = rcu_dereference_check(entity->last_scheduled, true);
dma_fence_get(prev);
- while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
+ while ((job = drm_sched_entity_queue_pop(entity))) {
struct drm_sched_fence *s_fence = job->s_fence;
dma_fence_get(&s_fence->finished);
- if (!prev || dma_fence_add_callback(prev, &job->finish_cb,
- drm_sched_entity_kill_jobs_cb))
+ if (!prev ||
+ dma_fence_add_callback(prev, &job->finish_cb,
+ drm_sched_entity_kill_jobs_cb)) {
+ /*
+ * Adding callback above failed.
+ * dma_fence_put() checks for NULL.
+ */
+ dma_fence_put(prev);
drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
+ }
prev = &s_fence->finished;
}
@@ -364,8 +367,8 @@ static void drm_sched_entity_clear_dep(struct dma_fence *f,
}
/*
- * drm_sched_entity_clear_dep - callback to clear the entities dependency and
- * wake up scheduler
+ * drm_sched_entity_wakeup - callback to clear the entity's dependency and
+ * wake up the scheduler
*/
static void drm_sched_entity_wakeup(struct dma_fence *f,
struct dma_fence_cb *cb)
@@ -374,7 +377,7 @@ static void drm_sched_entity_wakeup(struct dma_fence *f,
container_of(cb, struct drm_sched_entity, cb);
drm_sched_entity_clear_dep(f, cb);
- drm_sched_wakeup(entity->rq->sched, entity);
+ drm_sched_wakeup(entity->rq->sched);
}
/**
@@ -383,14 +386,14 @@ static void drm_sched_entity_wakeup(struct dma_fence *f,
* @entity: scheduler entity
* @priority: scheduler priority
*
- * Update the priority of runqueus used for the entity.
+ * Update the priority of runqueues used for the entity.
*/
void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
enum drm_sched_priority priority)
{
- spin_lock(&entity->rq_lock);
+ spin_lock(&entity->lock);
entity->priority = priority;
- spin_unlock(&entity->rq_lock);
+ spin_unlock(&entity->lock);
}
EXPORT_SYMBOL(drm_sched_entity_set_priority);
@@ -469,7 +472,7 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
{
struct drm_sched_job *sched_job;
- sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
+ sched_job = drm_sched_entity_queue_peek(entity);
if (!sched_job)
return NULL;
@@ -505,9 +508,18 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) {
struct drm_sched_job *next;
- next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
- if (next)
- drm_sched_rq_update_fifo(entity, next->submit_ts);
+ next = drm_sched_entity_queue_peek(entity);
+ if (next) {
+ struct drm_sched_rq *rq;
+
+ spin_lock(&entity->lock);
+ rq = entity->rq;
+ spin_lock(&rq->lock);
+ drm_sched_rq_update_fifo_locked(entity, rq,
+ next->submit_ts);
+ spin_unlock(&rq->lock);
+ spin_unlock(&entity->lock);
+ }
}
/* Jobs and entities might have different lifecycles. Since we're
@@ -547,14 +559,14 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
if (fence && !dma_fence_is_signaled(fence))
return;
- spin_lock(&entity->rq_lock);
+ spin_lock(&entity->lock);
sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
rq = sched ? sched->sched_rq[entity->priority] : NULL;
if (rq != entity->rq) {
drm_sched_rq_remove_entity(entity->rq, entity);
entity->rq = rq;
}
- spin_unlock(&entity->rq_lock);
+ spin_unlock(&entity->lock);
if (entity->num_sched_list == 1)
entity->sched_list = NULL;
@@ -568,8 +580,6 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
* fence sequence number this function should be called with drm_sched_job_arm()
* under common lock for the struct drm_sched_entity that was set up for
* @sched_job in drm_sched_job_init().
- *
- * Returns 0 for success, negative error code otherwise.
*/
void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
{
@@ -591,22 +601,31 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
/* first job wakes up scheduler */
if (first) {
+ struct drm_gpu_scheduler *sched;
+ struct drm_sched_rq *rq;
+
/* Add the entity to the run queue */
- spin_lock(&entity->rq_lock);
+ spin_lock(&entity->lock);
if (entity->stopped) {
- spin_unlock(&entity->rq_lock);
+ spin_unlock(&entity->lock);
DRM_ERROR("Trying to push to a killed entity\n");
return;
}
- drm_sched_rq_add_entity(entity->rq, entity);
- spin_unlock(&entity->rq_lock);
+ rq = entity->rq;
+ sched = rq->sched;
+
+ spin_lock(&rq->lock);
+ drm_sched_rq_add_entity(rq, entity);
if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
- drm_sched_rq_update_fifo(entity, submit_ts);
+ drm_sched_rq_update_fifo_locked(entity, rq, submit_ts);
+
+ spin_unlock(&rq->lock);
+ spin_unlock(&entity->lock);
- drm_sched_wakeup(entity->rq->sched, entity);
+ drm_sched_wakeup(sched);
}
}
EXPORT_SYMBOL(drm_sched_entity_push_job);