diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_runtime_pm.c')
| -rw-r--r-- | drivers/gpu/drm/i915/intel_runtime_pm.c | 354 |
1 files changed, 121 insertions, 233 deletions
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 129746713d07..d11c2814b787 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -29,6 +29,7 @@ #include <linux/pm_runtime.h> #include <drm/drm_print.h> +#include <drm/intel/display_parent_interface.h> #include "i915_drv.h" #include "i915_trace.h" @@ -50,184 +51,46 @@ * present for a given platform. */ -#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) - -#include <linux/sort.h> - -#define STACKDEPTH 8 - -static noinline depot_stack_handle_t __save_depot_stack(void) +static struct drm_i915_private *rpm_to_i915(struct intel_runtime_pm *rpm) { - unsigned long entries[STACKDEPTH]; - unsigned int n; - - n = stack_trace_save(entries, ARRAY_SIZE(entries), 1); - return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN); + return container_of(rpm, struct drm_i915_private, runtime_pm); } +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) + static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) { - spin_lock_init(&rpm->debug.lock); - stack_depot_init(); + if (!rpm->debug.class) + ref_tracker_dir_init(&rpm->debug, INTEL_REFTRACK_DEAD_COUNT, + "intel_runtime_pm"); } -static noinline depot_stack_handle_t +static intel_wakeref_t track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) { - depot_stack_handle_t stack, *stacks; - unsigned long flags; - - if (rpm->no_wakeref_tracking) - return -1; - - stack = __save_depot_stack(); - if (!stack) - return -1; - - spin_lock_irqsave(&rpm->debug.lock, flags); - - if (!rpm->debug.count) - rpm->debug.last_acquire = stack; - - stacks = krealloc(rpm->debug.owners, - (rpm->debug.count + 1) * sizeof(*stacks), - GFP_NOWAIT | __GFP_NOWARN); - if (stacks) { - stacks[rpm->debug.count++] = stack; - rpm->debug.owners = stacks; - } else { - stack = -1; - } - - spin_unlock_irqrestore(&rpm->debug.lock, flags); + if (!rpm->available || rpm->no_wakeref_tracking) + return INTEL_WAKEREF_DEF; - return stack; + return intel_ref_tracker_alloc(&rpm->debug); } static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm, - depot_stack_handle_t stack) -{ - struct drm_i915_private *i915 = container_of(rpm, - struct drm_i915_private, - runtime_pm); - unsigned long flags, n; - bool found = false; - - if (unlikely(stack == -1)) - return; - - spin_lock_irqsave(&rpm->debug.lock, flags); - for (n = rpm->debug.count; n--; ) { - if (rpm->debug.owners[n] == stack) { - memmove(rpm->debug.owners + n, - rpm->debug.owners + n + 1, - (--rpm->debug.count - n) * sizeof(stack)); - found = true; - break; - } - } - spin_unlock_irqrestore(&rpm->debug.lock, flags); - - if (drm_WARN(&i915->drm, !found, - "Unmatched wakeref (tracking %lu), count %u\n", - rpm->debug.count, atomic_read(&rpm->wakeref_count))) { - char *buf; - - buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN); - if (!buf) - return; - - stack_depot_snprint(stack, buf, PAGE_SIZE, 2); - DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf); - - stack = READ_ONCE(rpm->debug.last_release); - if (stack) { - stack_depot_snprint(stack, buf, PAGE_SIZE, 2); - DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf); - } - - kfree(buf); - } -} - -static int cmphandle(const void *_a, const void *_b) -{ - const depot_stack_handle_t * const a = _a, * const b = _b; - - if (*a < *b) - return -1; - else if (*a > *b) - return 1; - else - return 0; -} - -static void -__print_intel_runtime_pm_wakeref(struct drm_printer *p, - const struct intel_runtime_pm_debug *dbg) + intel_wakeref_t wakeref) { - unsigned long i; - char *buf; - - buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN); - if (!buf) + if (!rpm->available || rpm->no_wakeref_tracking) return; - if (dbg->last_acquire) { - stack_depot_snprint(dbg->last_acquire, buf, PAGE_SIZE, 2); - drm_printf(p, "Wakeref last acquired:\n%s", buf); - } - - if (dbg->last_release) { - stack_depot_snprint(dbg->last_release, buf, PAGE_SIZE, 2); - drm_printf(p, "Wakeref last released:\n%s", buf); - } - - drm_printf(p, "Wakeref count: %lu\n", dbg->count); - - sort(dbg->owners, dbg->count, sizeof(*dbg->owners), cmphandle, NULL); - - for (i = 0; i < dbg->count; i++) { - depot_stack_handle_t stack = dbg->owners[i]; - unsigned long rep; - - rep = 1; - while (i + 1 < dbg->count && dbg->owners[i + 1] == stack) - rep++, i++; - stack_depot_snprint(stack, buf, PAGE_SIZE, 2); - drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf); - } - - kfree(buf); -} - -static noinline void -__untrack_all_wakerefs(struct intel_runtime_pm_debug *debug, - struct intel_runtime_pm_debug *saved) -{ - *saved = *debug; - - debug->owners = NULL; - debug->count = 0; - debug->last_release = __save_depot_stack(); + intel_ref_tracker_free(&rpm->debug, wakeref); } -static void -dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug) +static void untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm) { - if (debug->count) { - struct drm_printer p = drm_debug_printer("i915"); - - __print_intel_runtime_pm_wakeref(&p, debug); - } - - kfree(debug->owners); + ref_tracker_dir_exit(&rpm->debug); } static noinline void __intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm) { - struct intel_runtime_pm_debug dbg = {}; unsigned long flags; if (!atomic_dec_and_lock_irqsave(&rpm->wakeref_count, @@ -235,60 +98,14 @@ __intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm) flags)) return; - __untrack_all_wakerefs(&rpm->debug, &dbg); - spin_unlock_irqrestore(&rpm->debug.lock, flags); - - dump_and_free_wakeref_tracking(&dbg); -} - -static noinline void -untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm) -{ - struct intel_runtime_pm_debug dbg = {}; - unsigned long flags; - - spin_lock_irqsave(&rpm->debug.lock, flags); - __untrack_all_wakerefs(&rpm->debug, &dbg); + ref_tracker_dir_print_locked(&rpm->debug, INTEL_REFTRACK_PRINT_LIMIT); spin_unlock_irqrestore(&rpm->debug.lock, flags); - - dump_and_free_wakeref_tracking(&dbg); } void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm, struct drm_printer *p) { - struct intel_runtime_pm_debug dbg = {}; - - do { - unsigned long alloc = dbg.count; - depot_stack_handle_t *s; - - spin_lock_irq(&rpm->debug.lock); - dbg.count = rpm->debug.count; - if (dbg.count <= alloc) { - memcpy(dbg.owners, - rpm->debug.owners, - dbg.count * sizeof(*s)); - } - dbg.last_acquire = rpm->debug.last_acquire; - dbg.last_release = rpm->debug.last_release; - spin_unlock_irq(&rpm->debug.lock); - if (dbg.count <= alloc) - break; - - s = krealloc(dbg.owners, - dbg.count * sizeof(*s), - GFP_NOWAIT | __GFP_NOWARN); - if (!s) - goto out; - - dbg.owners = s; - } while (1); - - __print_intel_runtime_pm_wakeref(p, &dbg); - -out: - kfree(dbg.owners); + intel_ref_tracker_show(&rpm->debug, p); } #else @@ -297,14 +114,14 @@ static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) { } -static depot_stack_handle_t +static intel_wakeref_t track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) { - return -1; + return INTEL_WAKEREF_DEF; } static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm, - intel_wakeref_t wref) + intel_wakeref_t wakeref) { } @@ -349,9 +166,7 @@ intel_runtime_pm_release(struct intel_runtime_pm *rpm, int wakelock) static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm, bool wakelock) { - struct drm_i915_private *i915 = container_of(rpm, - struct drm_i915_private, - runtime_pm); + struct drm_i915_private *i915 = rpm_to_i915(rpm); int ret; ret = pm_runtime_get_sync(rpm->kdev); @@ -363,6 +178,82 @@ static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm, return track_intel_runtime_pm_wakeref(rpm); } +static struct intel_runtime_pm *drm_to_rpm(const struct drm_device *drm) +{ + struct drm_i915_private *i915 = to_i915(drm); + + return &i915->runtime_pm; +} + +static struct ref_tracker *i915_display_rpm_get(const struct drm_device *drm) +{ + return intel_runtime_pm_get(drm_to_rpm(drm)); +} + +static struct ref_tracker *i915_display_rpm_get_raw(const struct drm_device *drm) +{ + return intel_runtime_pm_get_raw(drm_to_rpm(drm)); +} + +static struct ref_tracker *i915_display_rpm_get_if_in_use(const struct drm_device *drm) +{ + return intel_runtime_pm_get_if_in_use(drm_to_rpm(drm)); +} + +static struct ref_tracker *i915_display_rpm_get_noresume(const struct drm_device *drm) +{ + return intel_runtime_pm_get_noresume(drm_to_rpm(drm)); +} + +static void i915_display_rpm_put(const struct drm_device *drm, struct ref_tracker *wakeref) +{ + intel_runtime_pm_put(drm_to_rpm(drm), wakeref); +} + +static void i915_display_rpm_put_raw(const struct drm_device *drm, struct ref_tracker *wakeref) +{ + intel_runtime_pm_put_raw(drm_to_rpm(drm), wakeref); +} + +static void i915_display_rpm_put_unchecked(const struct drm_device *drm) +{ + intel_runtime_pm_put_unchecked(drm_to_rpm(drm)); +} + +static bool i915_display_rpm_suspended(const struct drm_device *drm) +{ + return intel_runtime_pm_suspended(drm_to_rpm(drm)); +} + +static void i915_display_rpm_assert_held(const struct drm_device *drm) +{ + assert_rpm_wakelock_held(drm_to_rpm(drm)); +} + +static void i915_display_rpm_assert_block(const struct drm_device *drm) +{ + disable_rpm_wakeref_asserts(drm_to_rpm(drm)); +} + +static void i915_display_rpm_assert_unblock(const struct drm_device *drm) +{ + enable_rpm_wakeref_asserts(drm_to_rpm(drm)); +} + +const struct intel_display_rpm_interface i915_display_rpm_interface = { + .get = i915_display_rpm_get, + .get_raw = i915_display_rpm_get_raw, + .get_if_in_use = i915_display_rpm_get_if_in_use, + .get_noresume = i915_display_rpm_get_noresume, + .put = i915_display_rpm_put, + .put_raw = i915_display_rpm_put_raw, + .put_unchecked = i915_display_rpm_put_unchecked, + .suspended = i915_display_rpm_suspended, + .assert_held = i915_display_rpm_assert_held, + .assert_block = i915_display_rpm_assert_block, + .assert_unblock = i915_display_rpm_assert_unblock +}; + /** * intel_runtime_pm_get_raw - grab a raw runtime pm reference * @rpm: the intel_runtime_pm structure @@ -434,8 +325,11 @@ static intel_wakeref_t __intel_runtime_pm_get_if_active(struct intel_runtime_pm * function, since the power state is undefined. This applies * atm to the late/early system suspend/resume handlers. */ - if (pm_runtime_get_if_active(rpm->kdev, ignore_usecount) <= 0) - return 0; + if ((ignore_usecount && + pm_runtime_get_if_active(rpm->kdev) <= 0) || + (!ignore_usecount && + pm_runtime_get_if_in_use(rpm->kdev) <= 0)) + return NULL; } intel_runtime_pm_acquire(rpm, true); @@ -457,15 +351,11 @@ intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm) * intel_runtime_pm_get_noresume - grab a runtime pm reference * @rpm: the intel_runtime_pm structure * - * This function grabs a device-level runtime pm reference (mostly used for GEM - * code to ensure the GTT or GT is on). + * This function grabs a device-level runtime pm reference. * - * It will _not_ power up the device but instead only check that it's powered - * on. Therefore it is only valid to call this functions from contexts where - * the device is known to be powered up and where trying to power it up would - * result in hilarity and deadlocks. That pretty much means only the system - * suspend/resume code where this is used to grab runtime pm references for - * delayed setup down in work items. + * It will _not_ resume the device but instead only get an extra wakeref. + * Therefore it is only valid to call this functions from contexts where + * the device is known to be active and with another wakeref previously hold. * * Any runtime pm reference obtained by this function must have a symmetric * call to intel_runtime_pm_put() to release the reference again. @@ -474,7 +364,7 @@ intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm) */ intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm) { - assert_rpm_wakelock_held(rpm); + assert_rpm_raw_wakeref_held(rpm); pm_runtime_get_noresume(rpm->kdev); intel_runtime_pm_acquire(rpm, true); @@ -525,7 +415,7 @@ intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref) */ void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm) { - __intel_runtime_pm_put(rpm, -1, true); + __intel_runtime_pm_put(rpm, INTEL_WAKEREF_DEF, true); } #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) @@ -556,9 +446,7 @@ void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref) */ void intel_runtime_pm_enable(struct intel_runtime_pm *rpm) { - struct drm_i915_private *i915 = container_of(rpm, - struct drm_i915_private, - runtime_pm); + struct drm_i915_private *i915 = rpm_to_i915(rpm); struct device *kdev = rpm->kdev; /* @@ -566,7 +454,7 @@ void intel_runtime_pm_enable(struct intel_runtime_pm *rpm) * leave the device suspended skipping the driver's suspend handlers * if the device was already runtime suspended. This is needed due to * the difference in our runtime and system suspend sequence and - * becaue the HDA driver may require us to enable the audio power + * because the HDA driver may require us to enable the audio power * domain during system suspend. */ dev_pm_set_driver_flags(kdev, DPM_FLAG_NO_DIRECT_COMPLETE); @@ -611,9 +499,7 @@ void intel_runtime_pm_enable(struct intel_runtime_pm *rpm) void intel_runtime_pm_disable(struct intel_runtime_pm *rpm) { - struct drm_i915_private *i915 = container_of(rpm, - struct drm_i915_private, - runtime_pm); + struct drm_i915_private *i915 = rpm_to_i915(rpm); struct device *kdev = rpm->kdev; /* Transfer rpm ownership back to core */ @@ -628,9 +514,7 @@ void intel_runtime_pm_disable(struct intel_runtime_pm *rpm) void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm) { - struct drm_i915_private *i915 = container_of(rpm, - struct drm_i915_private, - runtime_pm); + struct drm_i915_private *i915 = rpm_to_i915(rpm); int count = atomic_read(&rpm->wakeref_count); intel_wakeref_auto_fini(&rpm->userfault_wakeref); @@ -639,22 +523,26 @@ void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm) "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n", intel_rpm_raw_wakeref_count(count), intel_rpm_wakelock_count(count)); +} +void intel_runtime_pm_driver_last_release(struct intel_runtime_pm *rpm) +{ + intel_runtime_pm_driver_release(rpm); untrack_all_intel_runtime_pm_wakerefs(rpm); } void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm) { - struct drm_i915_private *i915 = - container_of(rpm, struct drm_i915_private, runtime_pm); + struct drm_i915_private *i915 = rpm_to_i915(rpm); struct pci_dev *pdev = to_pci_dev(i915->drm.dev); struct device *kdev = &pdev->dev; rpm->kdev = kdev; rpm->available = HAS_RUNTIME_PM(i915); + atomic_set(&rpm->wakeref_count, 0); init_intel_runtime_pm_wakeref(rpm); INIT_LIST_HEAD(&rpm->lmem_userfault_list); spin_lock_init(&rpm->lmem_userfault_lock); - intel_wakeref_auto_init(&rpm->userfault_wakeref, rpm); + intel_wakeref_auto_init(&rpm->userfault_wakeref, i915); } |
