summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gt/intel_gt_pm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gt/intel_gt_pm.c')
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c86
1 files changed, 56 insertions, 30 deletions
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index c0fa41e4c803..c7f59d60fac6 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -3,18 +3,23 @@
* Copyright © 2019 Intel Corporation
*/
+#include <linux/string_helpers.h>
#include <linux/suspend.h>
+#include "display/intel_display_power.h"
+
#include "i915_drv.h"
+#include "i915_irq.h"
#include "i915_params.h"
#include "intel_context.h"
#include "intel_engine_pm.h"
#include "intel_gt.h"
#include "intel_gt_clock_utils.h"
+#include "intel_gt_mcr.h"
#include "intel_gt_pm.h"
+#include "intel_gt_print.h"
#include "intel_gt_requests.h"
#include "intel_llc.h"
-#include "intel_pm.h"
#include "intel_rc6.h"
#include "intel_rps.h"
#include "intel_wakeref.h"
@@ -25,19 +30,20 @@
static void user_forcewake(struct intel_gt *gt, bool suspend)
{
int count = atomic_read(&gt->user_wakeref);
+ intel_wakeref_t wakeref;
/* Inside suspend/resume so single threaded, no races to worry about. */
if (likely(!count))
return;
- intel_gt_pm_get(gt);
+ wakeref = intel_gt_pm_get(gt);
if (suspend) {
GEM_BUG_ON(count > atomic_read(&gt->wakeref.count));
atomic_sub(count, &gt->wakeref.count);
} else {
atomic_add(count, &gt->wakeref.count);
}
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
}
static void runtime_begin(struct intel_gt *gt)
@@ -66,6 +72,7 @@ static int __gt_unpark(struct intel_wakeref *wf)
{
struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
struct drm_i915_private *i915 = gt->i915;
+ struct intel_display *display = i915->display;
GT_TRACE(gt, "\n");
@@ -80,12 +87,12 @@ static int __gt_unpark(struct intel_wakeref *wf)
* Work around it by grabbing a GT IRQ power domain whilst there is any
* GT activity, preventing any DC state transitions.
*/
- gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
+ gt->awake = intel_display_power_get(display, POWER_DOMAIN_GT_IRQ);
GEM_BUG_ON(!gt->awake);
intel_rc6_unpark(&gt->rc6);
intel_rps_unpark(&gt->rps);
- i915_pmu_gt_unparked(i915);
+ i915_pmu_gt_unparked(gt);
intel_guc_busyness_unpark(gt);
intel_gt_unpark_requests(gt);
@@ -99,6 +106,7 @@ static int __gt_park(struct intel_wakeref *wf)
struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
intel_wakeref_t wakeref = fetch_and_zero(&gt->awake);
struct drm_i915_private *i915 = gt->i915;
+ struct intel_display *display = i915->display;
GT_TRACE(gt, "\n");
@@ -107,7 +115,7 @@ static int __gt_park(struct intel_wakeref *wf)
intel_guc_busyness_park(gt);
i915_vma_parked(gt);
- i915_pmu_gt_parked(i915);
+ i915_pmu_gt_parked(gt);
intel_rps_park(&gt->rps);
intel_rc6_park(&gt->rc6);
@@ -116,7 +124,7 @@ static int __gt_park(struct intel_wakeref *wf)
/* Defer dropping the display power well for 100ms, it's slow! */
GEM_BUG_ON(!wakeref);
- intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
+ intel_display_power_put_async(display, POWER_DOMAIN_GT_IRQ, wakeref);
return 0;
}
@@ -128,7 +136,14 @@ static const struct intel_wakeref_ops wf_ops = {
void intel_gt_pm_init_early(struct intel_gt *gt)
{
- intel_wakeref_init(&gt->wakeref, gt->uncore->rpm, &wf_ops);
+ /*
+ * We access the runtime_pm structure via gt->i915 here rather than
+ * gt->uncore as we do elsewhere in the file because gt->uncore is not
+ * yet initialized for all tiles at this point in the driver startup.
+ * runtime_pm is per-device rather than per-tile, so this is still the
+ * correct structure.
+ */
+ intel_wakeref_init(&gt->wakeref, gt->i915, &wf_ops, "GT");
seqcount_mutex_init(&gt->stats.lock, &gt->wakeref.mutex);
}
@@ -145,10 +160,10 @@ void intel_gt_pm_init(struct intel_gt *gt)
static bool reset_engines(struct intel_gt *gt)
{
- if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
+ if (intel_gt_gpu_reset_clobbers_display(gt))
return false;
- return __intel_gt_reset(gt, ALL_ENGINES) == 0;
+ return intel_gt_reset_all_engines(gt) == 0;
}
static void gt_sanitize(struct intel_gt *gt, bool force)
@@ -157,7 +172,7 @@ static void gt_sanitize(struct intel_gt *gt, bool force)
enum intel_engine_id id;
intel_wakeref_t wakeref;
- GT_TRACE(gt, "force:%s", yesno(force));
+ GT_TRACE(gt, "force:%s\n", str_yes_no(force));
/* Use a raw wakeref to avoid calling intel_display_power_get early */
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
@@ -174,15 +189,16 @@ static void gt_sanitize(struct intel_gt *gt, bool force)
if (intel_gt_is_wedged(gt))
intel_gt_unset_wedged(gt);
- for_each_engine(engine, gt, id)
+ /* For GuC mode, ensure submission is disabled before stopping ring */
+ intel_uc_reset_prepare(&gt->uc);
+
+ for_each_engine(engine, gt, id) {
if (engine->reset.prepare)
engine->reset.prepare(engine);
- intel_uc_reset_prepare(&gt->uc);
-
- for_each_engine(engine, gt, id)
if (engine->sanitize)
engine->sanitize(engine);
+ }
if (reset_engines(gt) || force) {
for_each_engine(engine, gt, id)
@@ -206,10 +222,26 @@ void intel_gt_pm_fini(struct intel_gt *gt)
intel_rc6_fini(&gt->rc6);
}
+void intel_gt_resume_early(struct intel_gt *gt)
+{
+ /*
+ * Sanitize steer semaphores during driver resume. This is necessary
+ * to address observed cases of steer semaphores being
+ * held after a suspend operation. Confirmation from the hardware team
+ * assures the safety of this operation, as no lock acquisitions
+ * by other agents occur during driver load/resume process.
+ */
+ intel_gt_mcr_lock_sanitize(gt);
+
+ intel_uncore_resume_early(gt->uncore);
+ intel_gt_check_and_clear_faults(gt);
+}
+
int intel_gt_resume(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
int err;
err = intel_gt_has_unrecoverable_error(gt);
@@ -226,7 +258,7 @@ int intel_gt_resume(struct intel_gt *gt)
*/
gt_sanitize(gt, true);
- intel_gt_pm_get(gt);
+ wakeref = intel_gt_pm_get(gt);
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
intel_rc6_sanitize(&gt->rc6);
@@ -238,8 +270,7 @@ int intel_gt_resume(struct intel_gt *gt)
/* Only when the HW is re-initialised, can we replay the requests */
err = intel_gt_init_hw(gt);
if (err) {
- i915_probe_error(gt->i915,
- "Failed to initialize GPU, declaring it wedged!\n");
+ gt_probe_error(gt, "Failed to initialize GPU, declaring it wedged!\n");
goto err_wedged;
}
@@ -256,9 +287,8 @@ int intel_gt_resume(struct intel_gt *gt)
intel_engine_pm_put(engine);
if (err) {
- drm_err(&gt->i915->drm,
- "Failed to restart %s (%d)\n",
- engine->name, err);
+ gt_err(gt, "Failed to restart %s (%d)\n",
+ engine->name, err);
goto err_wedged;
}
}
@@ -267,13 +297,12 @@ int intel_gt_resume(struct intel_gt *gt)
intel_uc_resume(&gt->uc);
- intel_pxp_resume(&gt->pxp);
-
user_forcewake(gt, false);
out_fw:
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
+ intel_gt_bind_context_set_ready(gt);
return err;
err_wedged:
@@ -300,10 +329,9 @@ static void wait_for_suspend(struct intel_gt *gt)
void intel_gt_suspend_prepare(struct intel_gt *gt)
{
+ intel_gt_bind_context_set_unready(gt);
user_forcewake(gt, true);
wait_for_suspend(gt);
-
- intel_pxp_suspend_prepare(&gt->pxp);
}
static suspend_state_t pm_suspend_target(void)
@@ -328,7 +356,6 @@ void intel_gt_suspend_late(struct intel_gt *gt)
GEM_BUG_ON(gt->awake);
intel_uc_suspend(&gt->uc);
- intel_pxp_suspend(&gt->pxp);
/*
* On disabling the device, we want to turn off HW access to memory
@@ -356,7 +383,7 @@ void intel_gt_suspend_late(struct intel_gt *gt)
void intel_gt_runtime_suspend(struct intel_gt *gt)
{
- intel_pxp_runtime_suspend(&gt->pxp);
+ intel_gt_bind_context_set_unready(gt);
intel_uc_runtime_suspend(&gt->uc);
GT_TRACE(gt, "\n");
@@ -374,8 +401,7 @@ int intel_gt_runtime_resume(struct intel_gt *gt)
if (ret)
return ret;
- intel_pxp_runtime_resume(&gt->pxp);
-
+ intel_gt_bind_context_set_ready(gt);
return 0;
}