summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
diff options
context:
space:
mode:
authorMatthew Auld <matthew.auld@intel.com>2022-08-19 13:39:04 +0100
committerLucas De Marchi <lucas.demarchi@intel.com>2022-08-20 09:41:56 -0700
commit54c204c522fd2a887b52c7672b9238903ba59a8b (patch)
tree95f6410be8cdafa00c21bae44916b91673361deb /drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
parent6a079903847cce1dd06345127d2a32f26d2cd9c6 (diff)
Revert "drm/i915/guc: Add delay to disable scheduling after pin count goes to zero"
This reverts commit 6a079903847cce1dd06345127d2a32f26d2cd9c6. Everything in CI using GuC is now timing out[1], and killing the machine with this change (perhaps a deadlock?). CI was recently on fire due to some changes coming in from -rc1, so likely the pre-merge CI results for this series were invalid? For now just revert, unless GuC experts already have a fix in mind. [1] https://intel-gfx-ci.01.org/tree/drm-tip/index.html? Signed-off-by: Matthew Auld <matthew.auld@intel.com> Cc: Matthew Brost <matthew.brost@intel.com> Cc: Alan Previn <alan.previn.teres.alexis@intel.com> Cc: John Harrison <John.C.Harrison@Intel.com> Reviewed-by: John Harrison <John.C.Harrison@Intel.com> Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20220819123904.913750-1-matthew.auld@intel.com
Diffstat (limited to 'drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c')
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c154
1 files changed, 25 insertions, 129 deletions
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index a0cebb4590e9..0d56b615bf78 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -65,13 +65,7 @@
* corresponding G2H returns indicating the scheduling disable operation has
* completed it is safe to unpin the context. While a disable is in flight it
* isn't safe to resubmit the context so a fence is used to stall all future
- * requests of that context until the G2H is returned. Because this interaction
- * with the GuC takes a non-zero amount of time we delay the disabling of
- * scheduling after the pin count goes to zero by a configurable period of time
- * (see SCHED_DISABLE_DELAY_MS). The thought is this gives the user a window of
- * time to resubmit something on the context before doing this costly operation.
- * This delay is only done if the context isn't closed and the guc_id usage is
- * less than a threshold (see NUM_SCHED_DISABLE_GUC_IDS_THRESHOLD).
+ * requests of that context until the G2H is returned.
*
* Context deregistration:
* Before a context can be destroyed or if we steal its guc_id we must
@@ -1995,9 +1989,6 @@ static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
if (unlikely(ret < 0))
return ret;
- if (!intel_context_is_parent(ce))
- ++guc->submission_state.guc_ids_in_use;
-
ce->guc_id.id = ret;
return 0;
}
@@ -2007,16 +1998,14 @@ static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce)
GEM_BUG_ON(intel_context_is_child(ce));
if (!context_guc_id_invalid(ce)) {
- if (intel_context_is_parent(ce)) {
+ if (intel_context_is_parent(ce))
bitmap_release_region(guc->submission_state.guc_ids_bitmap,
ce->guc_id.id,
order_base_2(ce->parallel.number_children
+ 1));
- } else {
- --guc->submission_state.guc_ids_in_use;
+ else
ida_simple_remove(&guc->submission_state.guc_ids,
ce->guc_id.id);
- }
clr_ctx_id_mapping(guc, ce->guc_id.id);
set_context_guc_id_invalid(ce);
}
@@ -3004,98 +2993,41 @@ guc_context_revoke(struct intel_context *ce, struct i915_request *rq,
}
}
-static void guc_context_sched_disable(struct intel_context *ce);
-
-static void do_sched_disable(struct intel_guc *guc, struct intel_context *ce,
- unsigned long flags)
- __releases(ce->guc_state.lock)
+static void guc_context_sched_disable(struct intel_context *ce)
{
+ struct intel_guc *guc = ce_to_guc(ce);
+ unsigned long flags;
struct intel_runtime_pm *runtime_pm = &ce->engine->gt->i915->runtime_pm;
intel_wakeref_t wakeref;
+ u16 guc_id;
- lockdep_assert_held(&ce->guc_state.lock);
-
- spin_unlock_irqrestore(&ce->guc_state.lock, flags);
-
- with_intel_runtime_pm(runtime_pm, wakeref)
- guc_context_sched_disable(ce);
-}
-
-static bool bypass_sched_disable(struct intel_guc *guc,
- struct intel_context *ce)
-{
- lockdep_assert_held(&ce->guc_state.lock);
GEM_BUG_ON(intel_context_is_child(ce));
- if (submission_disabled(guc) || context_guc_id_invalid(ce) ||
- !ctx_id_mapped(guc, ce->guc_id.id)) {
- clr_context_enabled(ce);
- return true;
- }
-
- return !context_enabled(ce);
-}
-
-static void __delay_sched_disable(struct work_struct *wrk)
-{
- struct intel_context *ce =
- container_of(wrk, typeof(*ce), guc_state.sched_disable_delay.work);
- struct intel_guc *guc = ce_to_guc(ce);
- unsigned long flags;
-
spin_lock_irqsave(&ce->guc_state.lock, flags);
- if (bypass_sched_disable(guc, ce)) {
- spin_unlock_irqrestore(&ce->guc_state.lock, flags);
- intel_context_sched_disable_unpin(ce);
- } else {
- do_sched_disable(guc, ce, flags);
- }
-}
-
-static bool guc_id_pressure(struct intel_guc *guc, struct intel_context *ce)
-{
- /*
- * parent contexts are perma-pinned, if we are unpinning do schedule
- * disable immediately.
- */
- if (intel_context_is_parent(ce))
- return true;
-
/*
- * If we are beyond the threshold for avail guc_ids, do schedule disable immediately.
+ * We have to check if the context has been disabled by another thread,
+ * check if submssion has been disabled to seal a race with reset and
+ * finally check if any more requests have been committed to the
+ * context ensursing that a request doesn't slip through the
+ * 'context_pending_disable' fence.
*/
- return guc->submission_state.guc_ids_in_use >
- guc->submission_state.sched_disable_gucid_threshold;
-}
-
-static void guc_context_sched_disable(struct intel_context *ce)
-{
- struct intel_guc *guc = ce_to_guc(ce);
- u64 delay = guc->submission_state.sched_disable_delay_ms;
- unsigned long flags;
-
- spin_lock_irqsave(&ce->guc_state.lock, flags);
-
- if (bypass_sched_disable(guc, ce)) {
- spin_unlock_irqrestore(&ce->guc_state.lock, flags);
- intel_context_sched_disable_unpin(ce);
- } else if (!intel_context_is_closed(ce) && !guc_id_pressure(guc, ce) &&
- delay) {
+ if (unlikely(!context_enabled(ce) || submission_disabled(guc) ||
+ context_has_committed_requests(ce))) {
+ clr_context_enabled(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
- mod_delayed_work(system_unbound_wq,
- &ce->guc_state.sched_disable_delay,
- msecs_to_jiffies(delay));
- } else {
- do_sched_disable(guc, ce, flags);
+ goto unpin;
}
-}
+ guc_id = prep_context_pending_disable(ce);
-static void guc_context_close(struct intel_context *ce)
-{
- if (test_bit(CONTEXT_GUC_INIT, &ce->flags) &&
- cancel_delayed_work(&ce->guc_state.sched_disable_delay))
- __delay_sched_disable(&ce->guc_state.sched_disable_delay.work);
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+
+ with_intel_runtime_pm(runtime_pm, wakeref)
+ __guc_context_sched_disable(guc, ce, guc_id);
+
+ return;
+unpin:
+ intel_context_sched_disable_unpin(ce);
}
static inline void guc_lrc_desc_unpin(struct intel_context *ce)
@@ -3414,8 +3346,6 @@ static void remove_from_context(struct i915_request *rq)
static const struct intel_context_ops guc_context_ops = {
.alloc = guc_context_alloc,
- .close = guc_context_close,
-
.pre_pin = guc_context_pre_pin,
.pin = guc_context_pin,
.unpin = guc_context_unpin,
@@ -3498,10 +3428,6 @@ static void guc_context_init(struct intel_context *ce)
rcu_read_unlock();
ce->guc_state.prio = map_i915_prio_to_guc_prio(prio);
-
- INIT_DELAYED_WORK(&ce->guc_state.sched_disable_delay,
- __delay_sched_disable);
-
set_bit(CONTEXT_GUC_INIT, &ce->flags);
}
@@ -3539,9 +3465,6 @@ static int guc_request_alloc(struct i915_request *rq)
if (unlikely(!test_bit(CONTEXT_GUC_INIT, &ce->flags)))
guc_context_init(ce);
- if (cancel_delayed_work(&ce->guc_state.sched_disable_delay))
- intel_context_sched_disable_unpin(ce);
-
/*
* Call pin_guc_id here rather than in the pinning step as with
* dma_resv, contexts can be repeatedly pinned / unpinned trashing the
@@ -3672,8 +3595,6 @@ static int guc_virtual_context_alloc(struct intel_context *ce)
static const struct intel_context_ops virtual_guc_context_ops = {
.alloc = guc_virtual_context_alloc,
- .close = guc_context_close,
-
.pre_pin = guc_virtual_context_pre_pin,
.pin = guc_virtual_context_pin,
.unpin = guc_virtual_context_unpin,
@@ -3763,8 +3684,6 @@ static void guc_child_context_destroy(struct kref *kref)
static const struct intel_context_ops virtual_parent_context_ops = {
.alloc = guc_virtual_context_alloc,
- .close = guc_context_close,
-
.pre_pin = guc_context_pre_pin,
.pin = guc_parent_context_pin,
.unpin = guc_parent_context_unpin,
@@ -4295,26 +4214,6 @@ static bool __guc_submission_selected(struct intel_guc *guc)
return i915->params.enable_guc & ENABLE_GUC_SUBMISSION;
}
-int intel_guc_sched_disable_gucid_threshold_max(struct intel_guc *guc)
-{
- return guc->submission_state.num_guc_ids - NUMBER_MULTI_LRC_GUC_ID(guc);
-}
-
-/*
- * This default value of 33 milisecs (+1 milisec round up) ensures 30fps or higher
- * workloads are able to enjoy the latency reduction when delaying the schedule-disable
- * operation. This matches the 30fps game-render + encode (real world) workload this
- * knob was tested against.
- */
-#define SCHED_DISABLE_DELAY_MS 34
-
-/*
- * A threshold of 75% is a reasonable starting point considering that real world apps
- * generally don't get anywhere near this.
- */
-#define NUM_SCHED_DISABLE_GUCIDS_DEFAULT_THRESHOLD(__guc) \
- (((intel_guc_sched_disable_gucid_threshold_max(guc)) * 3) / 4)
-
void intel_guc_submission_init_early(struct intel_guc *guc)
{
xa_init_flags(&guc->context_lookup, XA_FLAGS_LOCK_IRQ);
@@ -4331,10 +4230,7 @@ void intel_guc_submission_init_early(struct intel_guc *guc)
spin_lock_init(&guc->timestamp.lock);
INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
- guc->submission_state.sched_disable_delay_ms = SCHED_DISABLE_DELAY_MS;
guc->submission_state.num_guc_ids = GUC_MAX_CONTEXT_ID;
- guc->submission_state.sched_disable_gucid_threshold =
- NUM_SCHED_DISABLE_GUCIDS_DEFAULT_THRESHOLD(guc);
guc->submission_supported = __guc_submission_supported(guc);
guc->submission_selected = __guc_submission_selected(guc);
}