summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gt/intel_engine_pm.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2019-08-16 13:09:59 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2019-08-16 18:02:06 +0100
commit6c69a45445af924378371ce3ae3fbe20c4657b94 (patch)
tree5426a2907e0afa73ecfa02e9a5a5027795f7bc31 /drivers/gpu/drm/i915/gt/intel_engine_pm.c
parentf789fbb1eb16de8db1e2eb7b6648f85464326195 (diff)
drm/i915/gt: Mark context->active_count as protected by timeline->mutex
We use timeline->mutex to protect modifications to context->active_count, and the associated enable/disable callbacks. Due to complications with engine-pm barrier there is a path where we used a "superlock" to provide serialised protect and so could not unconditionally assert with lockdep that it was always held. However, we can mark the mutex as taken (noting that we may be nested underneath ourselves) which means we can be reassured the right timeline->mutex is always treated as held and let lockdep roam free. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190816121000.8507-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/gt/intel_engine_pm.c')
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c14
1 files changed, 14 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index f3f0109f9e22..5f03f7dcad72 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -37,6 +37,16 @@ static int __engine_unpark(struct intel_wakeref *wf)
return 0;
}
+static inline void __timeline_mark_lock(struct intel_context *ce)
+{
+ mutex_acquire(&ce->timeline->mutex.dep_map, 2, 0, _THIS_IP_);
+}
+
+static inline void __timeline_mark_unlock(struct intel_context *ce)
+{
+ mutex_release(&ce->timeline->mutex.dep_map, 0, _THIS_IP_);
+}
+
static bool switch_to_kernel_context(struct intel_engine_cs *engine)
{
struct i915_request *rq;
@@ -61,6 +71,8 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
* retiring the last request, thus all rings should be empty and
* all timelines idle.
*/
+ __timeline_mark_lock(engine->kernel_context);
+
rq = __i915_request_create(engine->kernel_context, GFP_NOWAIT);
if (IS_ERR(rq))
/* Context switch failed, hope for the best! Maybe reset? */
@@ -80,6 +92,8 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
__intel_wakeref_defer_park(&engine->wakeref);
__i915_request_queue(rq, NULL);
+ __timeline_mark_unlock(engine->kernel_context);
+
return false;
}