summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/panthor/panthor_sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/panthor/panthor_sched.c')
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c364
1 files changed, 261 insertions, 103 deletions
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index 3d1f57e3990f..b834123a6560 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -5,6 +5,7 @@
#include <drm/drm_exec.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include <drm/gpu_scheduler.h>
#include <drm/panthor_drm.h>
@@ -360,17 +361,23 @@ struct panthor_queue {
/** @entity: DRM scheduling entity used for this queue. */
struct drm_sched_entity entity;
- /**
- * @remaining_time: Time remaining before the job timeout expires.
- *
- * The job timeout is suspended when the queue is not scheduled by the
- * FW. Every time we suspend the timer, we need to save the remaining
- * time so we can restore it later on.
- */
- unsigned long remaining_time;
+ /** @name: DRM scheduler name for this queue. */
+ char *name;
- /** @timeout_suspended: True if the job timeout was suspended. */
- bool timeout_suspended;
+ /** @timeout: Queue timeout related fields. */
+ struct {
+ /** @timeout.work: Work executed when a queue timeout occurs. */
+ struct delayed_work work;
+
+ /**
+ * @timeout.remaining: Time remaining before a queue timeout.
+ *
+ * When the timer is running, this value is set to MAX_SCHEDULE_TIMEOUT.
+ * When the timer is suspended, it's set to the time remaining when the
+ * timer was suspended.
+ */
+ unsigned long remaining;
+ } timeout;
/**
* @doorbell_id: Doorbell assigned to this queue.
@@ -895,11 +902,18 @@ static void group_free_queue(struct panthor_group *group, struct panthor_queue *
if (IS_ERR_OR_NULL(queue))
return;
- drm_sched_entity_destroy(&queue->entity);
+ /* This should have been disabled before that point. */
+ drm_WARN_ON(&group->ptdev->base,
+ disable_delayed_work_sync(&queue->timeout.work));
+
+ if (queue->entity.fence_context)
+ drm_sched_entity_destroy(&queue->entity);
if (queue->scheduler.ops)
drm_sched_fini(&queue->scheduler);
+ kfree(queue->name);
+
panthor_queue_put_syncwait_obj(queue);
panthor_kernel_bo_destroy(queue->ringbuf);
@@ -1039,6 +1053,115 @@ group_unbind_locked(struct panthor_group *group)
return 0;
}
+static bool
+group_is_idle(struct panthor_group *group)
+{
+ struct panthor_device *ptdev = group->ptdev;
+ u32 inactive_queues;
+
+ if (group->csg_id >= 0)
+ return ptdev->scheduler->csg_slots[group->csg_id].idle;
+
+ inactive_queues = group->idle_queues | group->blocked_queues;
+ return hweight32(inactive_queues) == group->queue_count;
+}
+
+static void
+queue_reset_timeout_locked(struct panthor_queue *queue)
+{
+ lockdep_assert_held(&queue->fence_ctx.lock);
+
+ if (queue->timeout.remaining != MAX_SCHEDULE_TIMEOUT) {
+ mod_delayed_work(queue->scheduler.timeout_wq,
+ &queue->timeout.work,
+ msecs_to_jiffies(JOB_TIMEOUT_MS));
+ }
+}
+
+static bool
+group_can_run(struct panthor_group *group)
+{
+ return group->state != PANTHOR_CS_GROUP_TERMINATED &&
+ group->state != PANTHOR_CS_GROUP_UNKNOWN_STATE &&
+ !group->destroyed && group->fatal_queues == 0 &&
+ !group->timedout;
+}
+
+static bool
+queue_timeout_is_suspended(struct panthor_queue *queue)
+{
+ /* When running, the remaining time is set to MAX_SCHEDULE_TIMEOUT. */
+ return queue->timeout.remaining != MAX_SCHEDULE_TIMEOUT;
+}
+
+static void
+queue_suspend_timeout_locked(struct panthor_queue *queue)
+{
+ unsigned long qtimeout, now;
+ struct panthor_group *group;
+ struct panthor_job *job;
+ bool timer_was_active;
+
+ lockdep_assert_held(&queue->fence_ctx.lock);
+
+ /* Already suspended, nothing to do. */
+ if (queue_timeout_is_suspended(queue))
+ return;
+
+ job = list_first_entry_or_null(&queue->fence_ctx.in_flight_jobs,
+ struct panthor_job, node);
+ group = job ? job->group : NULL;
+
+ /* If the queue is blocked and the group is idle, we want the timer to
+ * keep running because the group can't be unblocked by other queues,
+ * so it has to come from an external source, and we want to timebox
+ * this external signalling.
+ */
+ if (group && group_can_run(group) &&
+ (group->blocked_queues & BIT(job->queue_idx)) &&
+ group_is_idle(group))
+ return;
+
+ now = jiffies;
+ qtimeout = queue->timeout.work.timer.expires;
+
+ /* Cancel the timer. */
+ timer_was_active = cancel_delayed_work(&queue->timeout.work);
+ if (!timer_was_active || !job)
+ queue->timeout.remaining = msecs_to_jiffies(JOB_TIMEOUT_MS);
+ else if (time_after(qtimeout, now))
+ queue->timeout.remaining = qtimeout - now;
+ else
+ queue->timeout.remaining = 0;
+
+ if (WARN_ON_ONCE(queue->timeout.remaining > msecs_to_jiffies(JOB_TIMEOUT_MS)))
+ queue->timeout.remaining = msecs_to_jiffies(JOB_TIMEOUT_MS);
+}
+
+static void
+queue_suspend_timeout(struct panthor_queue *queue)
+{
+ spin_lock(&queue->fence_ctx.lock);
+ queue_suspend_timeout_locked(queue);
+ spin_unlock(&queue->fence_ctx.lock);
+}
+
+static void
+queue_resume_timeout(struct panthor_queue *queue)
+{
+ spin_lock(&queue->fence_ctx.lock);
+
+ if (queue_timeout_is_suspended(queue)) {
+ mod_delayed_work(queue->scheduler.timeout_wq,
+ &queue->timeout.work,
+ queue->timeout.remaining);
+
+ queue->timeout.remaining = MAX_SCHEDULE_TIMEOUT;
+ }
+
+ spin_unlock(&queue->fence_ctx.lock);
+}
+
/**
* cs_slot_prog_locked() - Program a queue slot
* @ptdev: Device.
@@ -1077,10 +1200,8 @@ cs_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
CS_IDLE_EMPTY |
CS_STATE_MASK |
CS_EXTRACT_EVENT);
- if (queue->iface.input->insert != queue->iface.input->extract && queue->timeout_suspended) {
- drm_sched_resume_timeout(&queue->scheduler, queue->remaining_time);
- queue->timeout_suspended = false;
- }
+ if (queue->iface.input->insert != queue->iface.input->extract)
+ queue_resume_timeout(queue);
}
/**
@@ -1107,14 +1228,7 @@ cs_slot_reset_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
CS_STATE_STOP,
CS_STATE_MASK);
- /* If the queue is blocked, we want to keep the timeout running, so
- * we can detect unbounded waits and kill the group when that happens.
- */
- if (!(group->blocked_queues & BIT(cs_id)) && !queue->timeout_suspended) {
- queue->remaining_time = drm_sched_suspend_timeout(&queue->scheduler);
- queue->timeout_suspended = true;
- WARN_ON(queue->remaining_time > msecs_to_jiffies(JOB_TIMEOUT_MS));
- }
+ queue_suspend_timeout(queue);
return 0;
}
@@ -1133,11 +1247,13 @@ csg_slot_sync_priority_locked(struct panthor_device *ptdev, u32 csg_id)
{
struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
struct panthor_fw_csg_iface *csg_iface;
+ u64 endpoint_req;
lockdep_assert_held(&ptdev->scheduler->lock);
csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
- csg_slot->priority = (csg_iface->input->endpoint_req & CSG_EP_REQ_PRIORITY_MASK) >> 28;
+ endpoint_req = panthor_fw_csg_endpoint_req_get(ptdev, csg_iface);
+ csg_slot->priority = CSG_EP_REQ_PRIORITY_GET(endpoint_req);
}
/**
@@ -1297,6 +1413,7 @@ csg_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 priority)
struct panthor_csg_slot *csg_slot;
struct panthor_group *group;
u32 queue_mask = 0, i;
+ u64 endpoint_req;
lockdep_assert_held(&ptdev->scheduler->lock);
@@ -1323,10 +1440,12 @@ csg_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 priority)
csg_iface->input->allow_compute = group->compute_core_mask;
csg_iface->input->allow_fragment = group->fragment_core_mask;
csg_iface->input->allow_other = group->tiler_core_mask;
- csg_iface->input->endpoint_req = CSG_EP_REQ_COMPUTE(group->max_compute_cores) |
- CSG_EP_REQ_FRAGMENT(group->max_fragment_cores) |
- CSG_EP_REQ_TILER(group->max_tiler_cores) |
- CSG_EP_REQ_PRIORITY(priority);
+ endpoint_req = CSG_EP_REQ_COMPUTE(group->max_compute_cores) |
+ CSG_EP_REQ_FRAGMENT(group->max_fragment_cores) |
+ CSG_EP_REQ_TILER(group->max_tiler_cores) |
+ CSG_EP_REQ_PRIORITY(priority);
+ panthor_fw_csg_endpoint_req_set(ptdev, csg_iface, endpoint_req);
+
csg_iface->input->config = panthor_vm_as(group->vm);
if (group->suspend_buf)
@@ -1411,7 +1530,7 @@ cs_slot_process_fault_event_locked(struct panthor_device *ptdev,
fault = cs_iface->output->fault;
info = cs_iface->output->fault_info;
- if (queue && CS_EXCEPTION_TYPE(fault) == DRM_PANTHOR_EXCEPTION_CS_INHERIT_FAULT) {
+ if (queue) {
u64 cs_extract = queue->iface.output->extract;
struct panthor_job *job;
@@ -1909,28 +2028,6 @@ tick_ctx_is_full(const struct panthor_scheduler *sched,
return ctx->group_count == sched->csg_slot_count;
}
-static bool
-group_is_idle(struct panthor_group *group)
-{
- struct panthor_device *ptdev = group->ptdev;
- u32 inactive_queues;
-
- if (group->csg_id >= 0)
- return ptdev->scheduler->csg_slots[group->csg_id].idle;
-
- inactive_queues = group->idle_queues | group->blocked_queues;
- return hweight32(inactive_queues) == group->queue_count;
-}
-
-static bool
-group_can_run(struct panthor_group *group)
-{
- return group->state != PANTHOR_CS_GROUP_TERMINATED &&
- group->state != PANTHOR_CS_GROUP_UNKNOWN_STATE &&
- !group->destroyed && group->fatal_queues == 0 &&
- !group->timedout;
-}
-
static void
tick_ctx_pick_groups_from_list(const struct panthor_scheduler *sched,
struct panthor_sched_tick_ctx *ctx,
@@ -2224,9 +2321,9 @@ tick_ctx_apply(struct panthor_scheduler *sched, struct panthor_sched_tick_ctx *c
continue;
}
- panthor_fw_update_reqs(csg_iface, endpoint_req,
- CSG_EP_REQ_PRIORITY(new_csg_prio),
- CSG_EP_REQ_PRIORITY_MASK);
+ panthor_fw_csg_endpoint_req_update(ptdev, csg_iface,
+ CSG_EP_REQ_PRIORITY(new_csg_prio),
+ CSG_EP_REQ_PRIORITY_MASK);
csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG,
CSG_ENDPOINT_CONFIG);
@@ -2612,6 +2709,7 @@ static void group_schedule_locked(struct panthor_group *group, u32 queue_mask)
static void queue_stop(struct panthor_queue *queue,
struct panthor_job *bad_job)
{
+ disable_delayed_work_sync(&queue->timeout.work);
drm_sched_stop(&queue->scheduler, bad_job ? &bad_job->base : NULL);
}
@@ -2623,6 +2721,7 @@ static void queue_start(struct panthor_queue *queue)
list_for_each_entry(job, &queue->scheduler.pending_list, base.list)
job->base.s_fence->parent = dma_fence_get(job->done_fence);
+ enable_delayed_work(&queue->timeout.work);
drm_sched_start(&queue->scheduler, 0);
}
@@ -2689,7 +2788,6 @@ void panthor_sched_suspend(struct panthor_device *ptdev)
{
struct panthor_scheduler *sched = ptdev->scheduler;
struct panthor_csg_slots_upd_ctx upd_ctx;
- struct panthor_group *group;
u32 suspended_slots;
u32 i;
@@ -2743,13 +2841,23 @@ void panthor_sched_suspend(struct panthor_device *ptdev)
while (slot_mask) {
u32 csg_id = ffs(slot_mask) - 1;
struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
+ struct panthor_group *group = csg_slot->group;
/* Terminate command timedout, but the soft-reset will
* automatically terminate all active groups, so let's
* force the state to halted here.
*/
- if (csg_slot->group->state != PANTHOR_CS_GROUP_TERMINATED)
- csg_slot->group->state = PANTHOR_CS_GROUP_TERMINATED;
+ if (group->state != PANTHOR_CS_GROUP_TERMINATED) {
+ group->state = PANTHOR_CS_GROUP_TERMINATED;
+
+ /* Reset the queue slots manually if the termination
+ * request failed.
+ */
+ for (i = 0; i < group->queue_count; i++) {
+ if (group->queues[i])
+ cs_slot_reset_locked(ptdev, csg_id, i);
+ }
+ }
slot_mask &= ~BIT(csg_id);
}
}
@@ -2779,8 +2887,8 @@ void panthor_sched_suspend(struct panthor_device *ptdev)
for (i = 0; i < sched->csg_slot_count; i++) {
struct panthor_csg_slot *csg_slot = &sched->csg_slots[i];
+ struct panthor_group *group = csg_slot->group;
- group = csg_slot->group;
if (!group)
continue;
@@ -2909,35 +3017,47 @@ void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile)
xa_unlock(&gpool->xa);
}
-static void group_sync_upd_work(struct work_struct *work)
+static bool queue_check_job_completion(struct panthor_queue *queue)
{
- struct panthor_group *group =
- container_of(work, struct panthor_group, sync_upd_work);
+ struct panthor_syncobj_64b *syncobj = NULL;
struct panthor_job *job, *job_tmp;
+ bool cookie, progress = false;
LIST_HEAD(done_jobs);
- u32 queue_idx;
- bool cookie;
cookie = dma_fence_begin_signalling();
- for (queue_idx = 0; queue_idx < group->queue_count; queue_idx++) {
- struct panthor_queue *queue = group->queues[queue_idx];
- struct panthor_syncobj_64b *syncobj;
+ spin_lock(&queue->fence_ctx.lock);
+ list_for_each_entry_safe(job, job_tmp, &queue->fence_ctx.in_flight_jobs, node) {
+ if (!syncobj) {
+ struct panthor_group *group = job->group;
- if (!queue)
- continue;
+ syncobj = group->syncobjs->kmap +
+ (job->queue_idx * sizeof(*syncobj));
+ }
- syncobj = group->syncobjs->kmap + (queue_idx * sizeof(*syncobj));
+ if (syncobj->seqno < job->done_fence->seqno)
+ break;
- spin_lock(&queue->fence_ctx.lock);
- list_for_each_entry_safe(job, job_tmp, &queue->fence_ctx.in_flight_jobs, node) {
- if (syncobj->seqno < job->done_fence->seqno)
- break;
+ list_move_tail(&job->node, &done_jobs);
+ dma_fence_signal_locked(job->done_fence);
+ }
- list_move_tail(&job->node, &done_jobs);
- dma_fence_signal_locked(job->done_fence);
- }
- spin_unlock(&queue->fence_ctx.lock);
+ if (list_empty(&queue->fence_ctx.in_flight_jobs)) {
+ /* If we have no job left, we cancel the timer, and reset remaining
+ * time to its default so it can be restarted next time
+ * queue_resume_timeout() is called.
+ */
+ queue_suspend_timeout_locked(queue);
+
+ /* If there's no job pending, we consider it progress to avoid a
+ * spurious timeout if the timeout handler and the sync update
+ * handler raced.
+ */
+ progress = true;
+ } else if (!list_empty(&done_jobs)) {
+ queue_reset_timeout_locked(queue);
+ progress = true;
}
+ spin_unlock(&queue->fence_ctx.lock);
dma_fence_end_signalling(cookie);
list_for_each_entry_safe(job, job_tmp, &done_jobs, node) {
@@ -2947,6 +3067,27 @@ static void group_sync_upd_work(struct work_struct *work)
panthor_job_put(&job->base);
}
+ return progress;
+}
+
+static void group_sync_upd_work(struct work_struct *work)
+{
+ struct panthor_group *group =
+ container_of(work, struct panthor_group, sync_upd_work);
+ u32 queue_idx;
+ bool cookie;
+
+ cookie = dma_fence_begin_signalling();
+ for (queue_idx = 0; queue_idx < group->queue_count; queue_idx++) {
+ struct panthor_queue *queue = group->queues[queue_idx];
+
+ if (!queue)
+ continue;
+
+ queue_check_job_completion(queue);
+ }
+ dma_fence_end_signalling(cookie);
+
group_put(group);
}
@@ -3194,17 +3335,6 @@ queue_run_job(struct drm_sched_job *sched_job)
queue->iface.input->insert = job->ringbuf.end;
if (group->csg_id < 0) {
- /* If the queue is blocked, we want to keep the timeout running, so we
- * can detect unbounded waits and kill the group when that happens.
- * Otherwise, we suspend the timeout so the time we spend waiting for
- * a CSG slot is not counted.
- */
- if (!(group->blocked_queues & BIT(job->queue_idx)) &&
- !queue->timeout_suspended) {
- queue->remaining_time = drm_sched_suspend_timeout(&queue->scheduler);
- queue->timeout_suspended = true;
- }
-
group_schedule_locked(group, BIT(job->queue_idx));
} else {
gpu_write(ptdev, CSF_DOORBELL(queue->doorbell_id), 1);
@@ -3213,6 +3343,7 @@ queue_run_job(struct drm_sched_job *sched_job)
pm_runtime_get(ptdev->base.dev);
sched->pm.has_ref = true;
}
+ queue_resume_timeout(queue);
panthor_devfreq_record_busy(sched->ptdev);
}
@@ -3262,7 +3393,6 @@ queue_timedout_job(struct drm_sched_job *sched_job)
mutex_unlock(&sched->lock);
queue_start(queue);
-
return DRM_GPU_SCHED_STAT_RESET;
}
@@ -3305,11 +3435,23 @@ static u32 calc_profiling_ringbuf_num_slots(struct panthor_device *ptdev,
return DIV_ROUND_UP(cs_ringbuf_size, min_profiled_job_instrs * sizeof(u64));
}
+static void queue_timeout_work(struct work_struct *work)
+{
+ struct panthor_queue *queue = container_of(work, struct panthor_queue,
+ timeout.work.work);
+ bool progress;
+
+ progress = queue_check_job_completion(queue);
+ if (!progress)
+ drm_sched_fault(&queue->scheduler);
+}
+
static struct panthor_queue *
group_create_queue(struct panthor_group *group,
- const struct drm_panthor_queue_create *args)
+ const struct drm_panthor_queue_create *args,
+ u64 drm_client_id, u32 gid, u32 qid)
{
- const struct drm_sched_init_args sched_args = {
+ struct drm_sched_init_args sched_args = {
.ops = &panthor_queue_sched_ops,
.submit_wq = group->ptdev->scheduler->wq,
.num_rqs = 1,
@@ -3320,9 +3462,8 @@ group_create_queue(struct panthor_group *group,
* their profiling status.
*/
.credit_limit = args->ringbuf_size / sizeof(u64),
- .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS),
+ .timeout = MAX_SCHEDULE_TIMEOUT,
.timeout_wq = group->ptdev->reset.wq,
- .name = "panthor-queue",
.dev = group->ptdev->base.dev,
};
struct drm_gpu_scheduler *drm_sched;
@@ -3343,6 +3484,8 @@ group_create_queue(struct panthor_group *group,
if (!queue)
return ERR_PTR(-ENOMEM);
+ queue->timeout.remaining = msecs_to_jiffies(JOB_TIMEOUT_MS);
+ INIT_DELAYED_WORK(&queue->timeout.work, queue_timeout_work);
queue->fence_ctx.id = dma_fence_context_alloc(1);
spin_lock_init(&queue->fence_ctx.lock);
INIT_LIST_HEAD(&queue->fence_ctx.in_flight_jobs);
@@ -3397,12 +3540,23 @@ group_create_queue(struct panthor_group *group,
if (ret)
goto err_free_queue;
+ /* assign a unique name */
+ queue->name = kasprintf(GFP_KERNEL, "panthor-queue-%llu-%u-%u", drm_client_id, gid, qid);
+ if (!queue->name) {
+ ret = -ENOMEM;
+ goto err_free_queue;
+ }
+
+ sched_args.name = queue->name;
+
ret = drm_sched_init(&queue->scheduler, &sched_args);
if (ret)
goto err_free_queue;
drm_sched = &queue->scheduler;
ret = drm_sched_entity_init(&queue->entity, 0, &drm_sched, 1, NULL);
+ if (ret)
+ goto err_free_queue;
return queue;
@@ -3446,7 +3600,8 @@ static void add_group_kbo_sizes(struct panthor_device *ptdev,
int panthor_group_create(struct panthor_file *pfile,
const struct drm_panthor_group_create *group_args,
- const struct drm_panthor_queue_create *queue_args)
+ const struct drm_panthor_queue_create *queue_args,
+ u64 drm_client_id)
{
struct panthor_device *ptdev = pfile->ptdev;
struct panthor_group_pool *gpool = pfile->groups;
@@ -3539,12 +3694,16 @@ int panthor_group_create(struct panthor_file *pfile,
memset(group->syncobjs->kmap, 0,
group_args->queues.count * sizeof(struct panthor_syncobj_64b));
+ ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL);
+ if (ret)
+ goto err_put_group;
+
for (i = 0; i < group_args->queues.count; i++) {
- group->queues[i] = group_create_queue(group, &queue_args[i]);
+ group->queues[i] = group_create_queue(group, &queue_args[i], drm_client_id, gid, i);
if (IS_ERR(group->queues[i])) {
ret = PTR_ERR(group->queues[i]);
group->queues[i] = NULL;
- goto err_put_group;
+ goto err_erase_gid;
}
group->queue_count++;
@@ -3552,10 +3711,6 @@ int panthor_group_create(struct panthor_file *pfile,
group->idle_queues = GENMASK(group->queue_count - 1, 0);
- ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL);
- if (ret)
- goto err_put_group;
-
mutex_lock(&sched->reset.lock);
if (atomic_read(&sched->reset.in_progress)) {
panthor_group_stop(group);
@@ -3574,6 +3729,9 @@ int panthor_group_create(struct panthor_file *pfile,
return gid;
+err_erase_gid:
+ xa_erase(&gpool->xa, gid);
+
err_put_group:
group_put(group);
return ret;
@@ -3855,7 +4013,9 @@ void panthor_sched_unplug(struct panthor_device *ptdev)
{
struct panthor_scheduler *sched = ptdev->scheduler;
- cancel_delayed_work_sync(&sched->tick_work);
+ disable_delayed_work_sync(&sched->tick_work);
+ disable_work_sync(&sched->fw_events_work);
+ disable_work_sync(&sched->sync_upd_work);
mutex_lock(&sched->lock);
if (sched->pm.has_ref) {
@@ -3873,8 +4033,6 @@ static void panthor_sched_fini(struct drm_device *ddev, void *res)
if (!sched || !sched->csg_slot_count)
return;
- cancel_delayed_work_sync(&sched->tick_work);
-
if (sched->wq)
destroy_workqueue(sched->wq);