summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2023-03-22 06:49:01 +1000
committerDave Airlie <airlied@redhat.com>2023-03-22 10:25:12 +1000
commitd240daa2c40d384aa01d68163ce5c12625b92d10 (patch)
tree9a2d858dd0b8828334c06f77d39847dd9a6d078f /drivers
parentc6265f5c2f502e442c4f339f121bedbc990c12e7 (diff)
parentd2a9692ad4295e227e3352fdbf14b8491b01e1c9 (diff)
Merge tag 'drm-intel-gt-next-2023-03-16' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
Driver Changes: - Fix issue #6333: "list_add corruption" and full system lockup from performance monitoring (Janusz) - Give the punit time to settle before fatally failing (Aravind, Chris) - Don't use stolen memory or BAR for ring buffers on LLC platforms (John) - Add missing ecodes and correct timeline seqno on GuC error captures (John) - Make sure DSM size has correct 1MiB granularity on Gen12+ (Nirmoy, Lucas) - Fix potential SSEU max_subslices array-index-out-of-bounds access on Gen11 (Andrea) - Whitelist COMMON_SLICE_CHICKEN3 for UMD access on Gen12+ (Matt R.) - Apply Wa_1408615072/Wa_1407596294 correctly on Gen11 (Matt R) - Apply LNCF/LBCF workarounds correctly on XeHP SDV/PVC/DG2 (Matt R) - Implement Wa_1606376872 for Xe_LP (Gustavo) - Consider GSI offset when doing MCR lookups on Meteorlake+ (Matt R.) - Add engine TLB invalidation for Meteorlake (Matt R.) - Fix GSC Driver-FLR completion on Meteorlake (Alan) - Fix GSC races on driver load/unload on Meteorlake+ (Daniele) - Disable MC6 for MTL A step (Badal) - Consolidate TLB invalidation flow (Tvrtko) - Improve debug GuC/HuC debug messages (Michal Wa., John) - Move fd_install after last use of fence (Rob) - Initialize the obj flags for shmem objects (Aravind) - Fix missing debug object activation (Nirmoy) - Probe lmem before the stolen portion (Matt A) - Improve clean up of GuC busyness stats worker (John) - Fix missing return code checks in GuC submission init (John) - Annotate two more workaround/tuning registers as MCR on PVC (Matt R) - Fix GEN8_MISCCPCTL definition and remove unused INF_UNIT_LEVEL_CLKGATE (Lucas) - Use sysfs_emit() and sysfs_emit_at() (Nirmoy) - Make kobj_type structures constant (Thomas W.) - make kobj attributes const on gt/ (Jani) - Remove the unused virtualized start hack on buddy allocator (Matt A) - Remove redundant check for DG1 (Lucas) - Move DG2 tuning to the right function (Lucas) - Rename dev_priv to i915 for private data naming consistency in gt/ (Andi) - Remove unnecessary whitelisting of CS_CTX_TIMESTAMP on Xe_HP platforms (Matt R.) - - Escape wildcard in method names in kerneldoc (Bagas) - Selftest improvements (Chris, Jonathan, Tvrtko, Anshuman, Tejas) - Fix sparse warnings (Jani) [airlied: fix unused variable in intel_workarounds] Signed-off-by: Dave Airlie <airlied@redhat.com> From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/ZBMSb42yjjzczRhj@jlahtine-mobl.ger.corp.intel.com
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c19
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c137
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gsc.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c165
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c27
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_print.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c132
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_gt_pm.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c10
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_tlb.c388
-rw-r--r--drivers/gpu/drm/i915/gt/sysfs_engines.c72
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c9
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c28
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c78
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c21
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_print.h3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c21
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c61
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c140
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c44
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c56
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c116
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc.c42
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c23
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c11
-rw-r--r--drivers/gpu/drm/i915/i915_active.c28
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h9
-rw-r--r--drivers/gpu/drm/i915/i915_ttm_buddy_manager.c7
-rw-r--r--drivers/gpu/drm/i915/intel_pcode.c35
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c13
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_huc.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c134
49 files changed, 1240 insertions, 665 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 90a967374b1a..d8e06e783e30 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -909,7 +909,7 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
dsm_base = intel_uncore_read64(uncore, GEN12_DSMBASE) & GEN12_BDSM_MASK;
if (WARN_ON(lmem_size < dsm_base))
return ERR_PTR(-ENODEV);
- dsm_size = lmem_size - dsm_base;
+ dsm_size = ALIGN_DOWN(lmem_size - dsm_base, SZ_1M);
}
io_size = dsm_size;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index 3bb1f7f0110e..ff81af4c8202 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -108,31 +108,30 @@ struct tiled_blits {
u32 height;
};
-static bool supports_x_tiling(const struct drm_i915_private *i915)
+static bool fastblit_supports_x_tiling(const struct drm_i915_private *i915)
{
int gen = GRAPHICS_VER(i915);
+ /* XY_FAST_COPY_BLT does not exist on pre-gen9 platforms */
+ drm_WARN_ON(&i915->drm, gen < 9);
+
if (gen < 12)
return true;
- if (!HAS_LMEM(i915) || IS_DG1(i915))
+ if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
return false;
- return true;
+ return HAS_DISPLAY(i915);
}
static bool fast_blit_ok(const struct blit_buffer *buf)
{
- int gen = GRAPHICS_VER(buf->vma->vm->i915);
-
- if (gen < 9)
+ /* XY_FAST_COPY_BLT does not exist on pre-gen9 platforms */
+ if (GRAPHICS_VER(buf->vma->vm->i915) < 9)
return false;
- if (gen < 12)
- return true;
-
/* filter out platforms with unsupported X-tile support in fastblit */
- if (buf->tiling == CLIENT_TILING_X && !supports_x_tiling(buf->vma->vm->i915))
+ if (buf->tiling == CLIENT_TILING_X && !fastblit_supports_x_tiling(buf->vma->vm->i915))
return false;
return true;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index d4e29da74612..ad3413242100 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -9,6 +9,7 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_internal.h"
+#include "gt/intel_gt_print.h"
#include "gt/intel_gt_regs.h"
#include "i915_cmd_parser.h"
@@ -1143,12 +1144,130 @@ err_put:
return ret;
}
+static int intel_engine_init_tlb_invalidation(struct intel_engine_cs *engine)
+{
+ static const union intel_engine_tlb_inv_reg gen8_regs[] = {
+ [RENDER_CLASS].reg = GEN8_RTCR,
+ [VIDEO_DECODE_CLASS].reg = GEN8_M1TCR, /* , GEN8_M2TCR */
+ [VIDEO_ENHANCEMENT_CLASS].reg = GEN8_VTCR,
+ [COPY_ENGINE_CLASS].reg = GEN8_BTCR,
+ };
+ static const union intel_engine_tlb_inv_reg gen12_regs[] = {
+ [RENDER_CLASS].reg = GEN12_GFX_TLB_INV_CR,
+ [VIDEO_DECODE_CLASS].reg = GEN12_VD_TLB_INV_CR,
+ [VIDEO_ENHANCEMENT_CLASS].reg = GEN12_VE_TLB_INV_CR,
+ [COPY_ENGINE_CLASS].reg = GEN12_BLT_TLB_INV_CR,
+ [COMPUTE_CLASS].reg = GEN12_COMPCTX_TLB_INV_CR,
+ };
+ static const union intel_engine_tlb_inv_reg xehp_regs[] = {
+ [RENDER_CLASS].mcr_reg = XEHP_GFX_TLB_INV_CR,
+ [VIDEO_DECODE_CLASS].mcr_reg = XEHP_VD_TLB_INV_CR,
+ [VIDEO_ENHANCEMENT_CLASS].mcr_reg = XEHP_VE_TLB_INV_CR,
+ [COPY_ENGINE_CLASS].mcr_reg = XEHP_BLT_TLB_INV_CR,
+ [COMPUTE_CLASS].mcr_reg = XEHP_COMPCTX_TLB_INV_CR,
+ };
+ static const union intel_engine_tlb_inv_reg xelpmp_regs[] = {
+ [VIDEO_DECODE_CLASS].reg = GEN12_VD_TLB_INV_CR,
+ [VIDEO_ENHANCEMENT_CLASS].reg = GEN12_VE_TLB_INV_CR,
+ [OTHER_CLASS].reg = XELPMP_GSC_TLB_INV_CR,
+ };
+ struct drm_i915_private *i915 = engine->i915;
+ const unsigned int instance = engine->instance;
+ const unsigned int class = engine->class;
+ const union intel_engine_tlb_inv_reg *regs;
+ union intel_engine_tlb_inv_reg reg;
+ unsigned int num = 0;
+ u32 val;
+
+ /*
+ * New platforms should not be added with catch-all-newer (>=)
+ * condition so that any later platform added triggers the below warning
+ * and in turn mandates a human cross-check of whether the invalidation
+ * flows have compatible semantics.
+ *
+ * For instance with the 11.00 -> 12.00 transition three out of five
+ * respective engine registers were moved to masked type. Then after the
+ * 12.00 -> 12.50 transition multi cast handling is required too.
+ */
+
+ if (engine->gt->type == GT_MEDIA) {
+ if (MEDIA_VER_FULL(i915) == IP_VER(13, 0)) {
+ regs = xelpmp_regs;
+ num = ARRAY_SIZE(xelpmp_regs);
+ }
+ } else {
+ if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 71) ||
+ GRAPHICS_VER_FULL(i915) == IP_VER(12, 70) ||
+ GRAPHICS_VER_FULL(i915) == IP_VER(12, 50) ||
+ GRAPHICS_VER_FULL(i915) == IP_VER(12, 55)) {
+ regs = xehp_regs;
+ num = ARRAY_SIZE(xehp_regs);
+ } else if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 0) ||
+ GRAPHICS_VER_FULL(i915) == IP_VER(12, 10)) {
+ regs = gen12_regs;
+ num = ARRAY_SIZE(gen12_regs);
+ } else if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) <= 11) {
+ regs = gen8_regs;
+ num = ARRAY_SIZE(gen8_regs);
+ } else if (GRAPHICS_VER(i915) < 8) {
+ return 0;
+ }
+ }
+
+ if (gt_WARN_ONCE(engine->gt, !num,
+ "Platform does not implement TLB invalidation!"))
+ return -ENODEV;
+
+ if (gt_WARN_ON_ONCE(engine->gt,
+ class >= num ||
+ (!regs[class].reg.reg &&
+ !regs[class].mcr_reg.reg)))
+ return -ERANGE;
+
+ reg = regs[class];
+
+ if (regs == xelpmp_regs && class == OTHER_CLASS) {
+ /*
+ * There's only a single GSC instance, but it uses register bit
+ * 1 instead of either 0 or OTHER_GSC_INSTANCE.
+ */
+ GEM_WARN_ON(instance != OTHER_GSC_INSTANCE);
+ val = 1;
+ } else if (regs == gen8_regs && class == VIDEO_DECODE_CLASS && instance == 1) {
+ reg.reg = GEN8_M2TCR;
+ val = 0;
+ } else {
+ val = instance;
+ }
+
+ val = BIT(val);
+
+ engine->tlb_inv.mcr = regs == xehp_regs;
+ engine->tlb_inv.reg = reg;
+ engine->tlb_inv.done = val;
+
+ if (GRAPHICS_VER(i915) >= 12 &&
+ (engine->class == VIDEO_DECODE_CLASS ||
+ engine->class == VIDEO_ENHANCEMENT_CLASS ||
+ engine->class == COMPUTE_CLASS ||
+ engine->class == OTHER_CLASS))
+ engine->tlb_inv.request = _MASKED_BIT_ENABLE(val);
+ else
+ engine->tlb_inv.request = val;
+
+ return 0;
+}
+
static int engine_setup_common(struct intel_engine_cs *engine)
{
int err;
init_llist_head(&engine->barrier_tasks);
+ err = intel_engine_init_tlb_invalidation(engine);
+ if (err)
+ return err;
+
err = init_status_page(engine);
if (err)
return err;
@@ -1939,13 +2058,13 @@ static const char *repr_timer(const struct timer_list *t)
static void intel_engine_print_registers(struct intel_engine_cs *engine,
struct drm_printer *m)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ struct drm_i915_private *i915 = engine->i915;
struct intel_engine_execlists * const execlists = &engine->execlists;
u64 addr;
- if (engine->id == RENDER_CLASS && IS_GRAPHICS_VER(dev_priv, 4, 7))
+ if (engine->id == RENDER_CLASS && IS_GRAPHICS_VER(i915, 4, 7))
drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
- if (HAS_EXECLISTS(dev_priv)) {
+ if (HAS_EXECLISTS(i915)) {
drm_printf(m, "\tEL_STAT_HI: 0x%08x\n",
ENGINE_READ(engine, RING_EXECLIST_STATUS_HI));
drm_printf(m, "\tEL_STAT_LO: 0x%08x\n",
@@ -1966,7 +2085,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : "");
}
- if (GRAPHICS_VER(dev_priv) >= 6) {
+ if (GRAPHICS_VER(i915) >= 6) {
drm_printf(m, "\tRING_IMR: 0x%08x\n",
ENGINE_READ(engine, RING_IMR));
drm_printf(m, "\tRING_ESR: 0x%08x\n",
@@ -1983,15 +2102,15 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
addr = intel_engine_get_last_batch_head(engine);
drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
- if (GRAPHICS_VER(dev_priv) >= 8)
+ if (GRAPHICS_VER(i915) >= 8)
addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW);
- else if (GRAPHICS_VER(dev_priv) >= 4)
+ else if (GRAPHICS_VER(i915) >= 4)
addr = ENGINE_READ(engine, RING_DMA_FADD);
else
addr = ENGINE_READ(engine, DMA_FADD_I8XX);
drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
- if (GRAPHICS_VER(dev_priv) >= 4) {
+ if (GRAPHICS_VER(i915) >= 4) {
drm_printf(m, "\tIPEIR: 0x%08x\n",
ENGINE_READ(engine, RING_IPEIR));
drm_printf(m, "\tIPEHR: 0x%08x\n",
@@ -2001,7 +2120,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
}
- if (HAS_EXECLISTS(dev_priv) && !intel_engine_uses_guc(engine)) {
+ if (HAS_EXECLISTS(i915) && !intel_engine_uses_guc(engine)) {
struct i915_request * const *port, *rq;
const u32 *hws =
&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
@@ -2067,7 +2186,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
}
rcu_read_unlock();
i915_sched_engine_active_unlock_bh(engine->sched_engine);
- } else if (GRAPHICS_VER(dev_priv) > 6) {
+ } else if (GRAPHICS_VER(i915) > 6) {
drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
ENGINE_READ(engine, RING_PP_DIR_BASE));
drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 4fd54fb8810f..0a071e5da1a8 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -341,6 +341,18 @@ struct intel_engine_guc_stats {
u64 start_gt_clk;
};
+union intel_engine_tlb_inv_reg {
+ i915_reg_t reg;
+ i915_mcr_reg_t mcr_reg;
+};
+
+struct intel_engine_tlb_inv {
+ bool mcr;
+ union intel_engine_tlb_inv_reg reg;
+ u32 request;
+ u32 done;
+};
+
struct intel_engine_cs {
struct drm_i915_private *i915;
struct intel_gt *gt;
@@ -372,6 +384,8 @@ struct intel_engine_cs {
u32 context_size;
u32 mmio_base;
+ struct intel_engine_tlb_inv tlb_inv;
+
/*
* Some w/a require forcewake to be held (which prevents RC6) while
* a particular engine is active. If so, we set fw_domain to which
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index 2af1ae3831df..e10507fa71ce 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -394,6 +394,7 @@
#define MI_LOAD_URB_MEM MI_INSTR(0x2C, 0)
#define MI_STORE_URB_MEM MI_INSTR(0x2D, 0)
#define MI_CONDITIONAL_BATCH_BUFFER_END MI_INSTR(0x36, 0)
+#define MI_DO_COMPARE REG_BIT(21)
#define STATE_BASE_ADDRESS \
((0x3 << 29) | (0x0 << 27) | (0x1 << 24) | (0x1 << 16))
diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.h b/drivers/gpu/drm/i915/gt/intel_gsc.h
index fcac1775e9c3..7ab3ca0f9f26 100644
--- a/drivers/gpu/drm/i915/gt/intel_gsc.h
+++ b/drivers/gpu/drm/i915/gt/intel_gsc.h
@@ -33,7 +33,7 @@ struct intel_gsc {
} intf[INTEL_GSC_NUM_INTERFACES];
};
-void intel_gsc_init(struct intel_gsc *gsc, struct drm_i915_private *dev_priv);
+void intel_gsc_init(struct intel_gsc *gsc, struct drm_i915_private *i915);
void intel_gsc_fini(struct intel_gsc *gsc);
void intel_gsc_irq_handler(struct intel_gt *gt, u32 iir);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index f4f694f12907..256cecf99dd1 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -784,6 +784,29 @@ void intel_gt_driver_unregister(struct intel_gt *gt)
intel_gsc_fini(&gt->gsc);
/*
+ * If we unload the driver and wedge before the GSC worker is complete,
+ * the worker will hit an error on its submission to the GSC engine and
+ * then exit. This is hard to hit for a user, but it is reproducible
+ * with skipping selftests. The error is handled gracefully by the
+ * worker, so there are no functional issues, but we still end up with
+ * an error message in dmesg, which is something we want to avoid as
+ * this is a supported scenario. We could modify the worker to better
+ * handle a wedging occurring during its execution, but that gets
+ * complicated for a couple of reasons:
+ * - We do want the error on runtime wedging, because there are
+ * implications for subsystems outside of GT (i.e., PXP, HDCP), it's
+ * only the error on driver unload that we want to silence.
+ * - The worker is responsible for multiple submissions (GSC FW load,
+ * HuC auth, SW proxy), so all of those will have to be adapted to
+ * handle the wedged_on_fini scenario.
+ * Therefore, it's much simpler to just wait for the worker to be done
+ * before wedging on driver removal, also considering that the worker
+ * will likely already be idle in the great majority of non-selftest
+ * scenarios.
+ */
+ intel_gsc_uc_flush_work(&gt->uc.gsc);
+
+ /*
* Upon unregistering the device to prevent any new users, cancel
* all in-flight requests so that we can quickly unbind the active
* resources.
@@ -981,35 +1004,6 @@ void intel_gt_info_print(const struct intel_gt_info *info,
intel_sseu_dump(&info->sseu, p);
}
-struct reg_and_bit {
- union {
- i915_reg_t reg;
- i915_mcr_reg_t mcr_reg;
- };
- u32 bit;
-};
-
-static struct reg_and_bit
-get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8,
- const i915_reg_t *regs, const unsigned int num)
-{
- const unsigned int class = engine->class;
- struct reg_and_bit rb = { };
-
- if (gt_WARN_ON_ONCE(engine->gt, class >= num || !regs[class].reg))
- return rb;
-
- rb.reg = regs[class];
- if (gen8 && class == VIDEO_DECODE_CLASS)
- rb.reg.reg += 4 * engine->instance; /* GEN8_M2TCR */
- else
- rb.bit = engine->instance;
-
- rb.bit = BIT(rb.bit);
-
- return rb;
-}
-
/*
* HW architecture suggest typical invalidation time at 40us,
* with pessimistic cases up to 100us and a recommendation to
@@ -1023,14 +1017,20 @@ get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8,
* but are now considered MCR registers. Since they exist within a GAM range,
* the primary instance of the register rolls up the status from each unit.
*/
-static int wait_for_invalidate(struct intel_gt *gt, struct reg_and_bit rb)
+static int wait_for_invalidate(struct intel_engine_cs *engine)
{
- if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50))
- return intel_gt_mcr_wait_for_reg(gt, rb.mcr_reg, rb.bit, 0,
+ if (engine->tlb_inv.mcr)
+ return intel_gt_mcr_wait_for_reg(engine->gt,
+ engine->tlb_inv.reg.mcr_reg,
+ engine->tlb_inv.done,
+ 0,
TLB_INVAL_TIMEOUT_US,
TLB_INVAL_TIMEOUT_MS);
else
- return __intel_wait_for_register_fw(gt->uncore, rb.reg, rb.bit, 0,
+ return __intel_wait_for_register_fw(engine->gt->uncore,
+ engine->tlb_inv.reg.reg,
+ engine->tlb_inv.done,
+ 0,
TLB_INVAL_TIMEOUT_US,
TLB_INVAL_TIMEOUT_MS,
NULL);
@@ -1038,62 +1038,14 @@ static int wait_for_invalidate(struct intel_gt *gt, struct reg_and_bit rb)
static void mmio_invalidate_full(struct intel_gt *gt)
{
- static const i915_reg_t gen8_regs[] = {
- [RENDER_CLASS] = GEN8_RTCR,
- [VIDEO_DECODE_CLASS] = GEN8_M1TCR, /* , GEN8_M2TCR */
- [VIDEO_ENHANCEMENT_CLASS] = GEN8_VTCR,
- [COPY_ENGINE_CLASS] = GEN8_BTCR,
- };
- static const i915_reg_t gen12_regs[] = {
- [RENDER_CLASS] = GEN12_GFX_TLB_INV_CR,
- [VIDEO_DECODE_CLASS] = GEN12_VD_TLB_INV_CR,
- [VIDEO_ENHANCEMENT_CLASS] = GEN12_VE_TLB_INV_CR,
- [COPY_ENGINE_CLASS] = GEN12_BLT_TLB_INV_CR,
- [COMPUTE_CLASS] = GEN12_COMPCTX_TLB_INV_CR,
- };
- static const i915_mcr_reg_t xehp_regs[] = {
- [RENDER_CLASS] = XEHP_GFX_TLB_INV_CR,
- [VIDEO_DECODE_CLASS] = XEHP_VD_TLB_INV_CR,
- [VIDEO_ENHANCEMENT_CLASS] = XEHP_VE_TLB_INV_CR,
- [COPY_ENGINE_CLASS] = XEHP_BLT_TLB_INV_CR,
- [COMPUTE_CLASS] = XEHP_COMPCTX_TLB_INV_CR,
- };
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
struct intel_engine_cs *engine;
intel_engine_mask_t awake, tmp;
enum intel_engine_id id;
- const i915_reg_t *regs;
- unsigned int num = 0;
unsigned long flags;
- /*
- * New platforms should not be added with catch-all-newer (>=)
- * condition so that any later platform added triggers the below warning
- * and in turn mandates a human cross-check of whether the invalidation
- * flows have compatible semantics.
- *
- * For instance with the 11.00 -> 12.00 transition three out of five
- * respective engine registers were moved to masked type. Then after the
- * 12.00 -> 12.50 transition multi cast handling is required too.
- */
-
- if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 50) ||
- GRAPHICS_VER_FULL(i915) == IP_VER(12, 55)) {
- regs = NULL;
- num = ARRAY_SIZE(xehp_regs);
- } else if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 0) ||
- GRAPHICS_VER_FULL(i915) == IP_VER(12, 10)) {
- regs = gen12_regs;
- num = ARRAY_SIZE(gen12_regs);
- } else if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) <= 11) {
- regs = gen8_regs;
- num = ARRAY_SIZE(gen8_regs);
- } else if (GRAPHICS_VER(i915) < 8) {
- return;
- }
-
- if (gt_WARN_ONCE(gt, !num, "Platform does not implement TLB invalidation!"))
+ if (GRAPHICS_VER(i915) < 8)
return;
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
@@ -1103,33 +1055,18 @@ static void mmio_invalidate_full(struct intel_gt *gt)
awake = 0;
for_each_engine(engine, gt, id) {
- struct reg_and_bit rb;
-
if (!intel_engine_pm_is_awake(engine))
continue;
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
- u32 val = BIT(engine->instance);
-
- if (engine->class == VIDEO_DECODE_CLASS ||
- engine->class == VIDEO_ENHANCEMENT_CLASS ||
- engine->class == COMPUTE_CLASS)
- val = _MASKED_BIT_ENABLE(val);
+ if (engine->tlb_inv.mcr)
intel_gt_mcr_multicast_write_fw(gt,
- xehp_regs[engine->class],
- val);
- } else {
- rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
- if (!i915_mmio_reg_offset(rb.reg))
- continue;
-
- if (GRAPHICS_VER(i915) == 12 && (engine->class == VIDEO_DECODE_CLASS ||
- engine->class == VIDEO_ENHANCEMENT_CLASS ||
- engine->class == COMPUTE_CLASS))
- rb.bit = _MASKED_BIT_ENABLE(rb.bit);
-
- intel_uncore_write_fw(uncore, rb.reg, rb.bit);
- }
+ engine->tlb_inv.reg.mcr_reg,
+ engine->tlb_inv.request);
+ else
+ intel_uncore_write_fw(uncore,
+ engine->tlb_inv.reg.reg,
+ engine->tlb_inv.request);
+
awake |= engine->mask;
}
@@ -1148,17 +1085,9 @@ static void mmio_invalidate_full(struct intel_gt *gt)
intel_gt_mcr_unlock(gt, flags);
for_each_engine_masked(engine, gt, awake, tmp) {
- struct reg_and_bit rb;
-
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
- rb.mcr_reg = xehp_regs[engine->class];
- rb.bit = BIT(engine->instance);
- } else {
- rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
- }
-
- if (wait_for_invalidate(gt, rb))
- gt_err_ratelimited(gt, "%s TLB invalidation did not complete in %ums!\n",
+ if (wait_for_invalidate(engine))
+ gt_err_ratelimited(gt,
+ "%s TLB invalidation did not complete in %ums!\n",
engine->name, TLB_INVAL_TIMEOUT_MS);
}
@@ -1204,3 +1133,7 @@ unlock:
mutex_unlock(&gt->tlb.invalidate_lock);
}
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_tlb.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
index 3bb1c701d5ff..0b414eae1683 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
@@ -35,7 +35,7 @@
* ignored.
*/
-#define HAS_MSLICE_STEERING(dev_priv) (INTEL_INFO(dev_priv)->has_mslice_steering)
+#define HAS_MSLICE_STEERING(i915) (INTEL_INFO(i915)->has_mslice_steering)
static const char * const intel_steering_types[] = {
"L3BANK",
@@ -364,6 +364,7 @@ static u32 rw_with_mcr_steering(struct intel_gt *gt,
* function call.
*/
void intel_gt_mcr_lock(struct intel_gt *gt, unsigned long *flags)
+ __acquires(&gt->mcr_lock)
{
unsigned long __flags;
int err = 0;
@@ -410,6 +411,7 @@ void intel_gt_mcr_lock(struct intel_gt *gt, unsigned long *flags)
* Context: Releases gt->mcr_lock
*/
void intel_gt_mcr_unlock(struct intel_gt *gt, unsigned long flags)
+ __releases(&gt->mcr_lock)
{
spin_unlock_irqrestore(&gt->mcr_lock, flags);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 85ae7dc079f2..e02cb90723ae 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -20,31 +20,10 @@
#include "intel_rc6.h"
#include "intel_rps.h"
#include "intel_wakeref.h"
-#include "intel_pcode.h"
#include "pxp/intel_pxp_pm.h"
#define I915_GT_SUSPEND_IDLE_TIMEOUT (HZ / 2)
-static void mtl_media_busy(struct intel_gt *gt)
-{
- /* Wa_14017073508: mtl */
- if (IS_MTL_GRAPHICS_STEP(gt->i915, P, STEP_A0, STEP_B0) &&
- gt->type == GT_MEDIA)
- snb_pcode_write_p(gt->uncore, PCODE_MBOX_GT_STATE,
- PCODE_MBOX_GT_STATE_MEDIA_BUSY,
- PCODE_MBOX_GT_STATE_DOMAIN_MEDIA, 0);
-}
-
-static void mtl_media_idle(struct intel_gt *gt)
-{
- /* Wa_14017073508: mtl */
- if (IS_MTL_GRAPHICS_STEP(gt->i915, P, STEP_A0, STEP_B0) &&
- gt->type == GT_MEDIA)
- snb_pcode_write_p(gt->uncore, PCODE_MBOX_GT_STATE,
- PCODE_MBOX_GT_STATE_MEDIA_NOT_BUSY,
- PCODE_MBOX_GT_STATE_DOMAIN_MEDIA, 0);
-}
-
static void user_forcewake(struct intel_gt *gt, bool suspend)
{
int count = atomic_read(&gt->user_wakeref);
@@ -92,9 +71,6 @@ static int __gt_unpark(struct intel_wakeref *wf)
GT_TRACE(gt, "\n");
- /* Wa_14017073508: mtl */
- mtl_media_busy(gt);
-
/*
* It seems that the DMC likes to transition between the DC states a lot
* when there are no connected displays (no active power domains) during
@@ -144,9 +120,6 @@ static int __gt_park(struct intel_wakeref *wf)
GEM_BUG_ON(!wakeref);
intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
- /* Wa_14017073508: mtl */
- mtl_media_idle(gt);
-
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_print.h b/drivers/gpu/drm/i915/gt/intel_gt_print.h
index 5d9da355ce24..55a336a9ff06 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_print.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_print.h
@@ -28,6 +28,9 @@
#define gt_err_ratelimited(_gt, _fmt, ...) \
drm_err_ratelimited(&(_gt)->i915->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+#define gt_notice_ratelimited(_gt, _fmt, ...) \
+ dev_notice_ratelimited((_gt)->i915->drm.dev, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+
#define gt_probe_error(_gt, _fmt, ...) \
do { \
if (i915_error_injected()) \
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index df07e1e799e3..4aecb5a7b631 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -478,6 +478,9 @@
#define HDC_FORCE_NON_COHERENT (1 << 4)
#define HDC_BARRIER_PERFORMANCE_DISABLE (1 << 10)
+#define COMMON_SLICE_CHICKEN4 _MMIO(0x7300)
+#define DISABLE_TDC_LOAD_BALANCING_CALC REG_BIT(6)
+
#define GEN8_HDC_CHICKEN1 _MMIO(0x7304)
#define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304)
@@ -767,9 +770,6 @@
#define GEN10_DFR_RATIO_EN_AND_CHICKEN MCR_REG(0x9550)
#define DFR_DISABLE (1 << 9)
-#define INF_UNIT_LEVEL_CLKGATE MCR_REG(0x9560)
-#define CGPSF_CLKGATE_DIS (1 << 3)
-
#define MICRO_BP0_0 _MMIO(0x9800)
#define MICRO_BP0_2 _MMIO(0x9804)
#define MICRO_BP0_1 _MMIO(0x9808)
@@ -1091,6 +1091,7 @@
#define XEHP_BLT_TLB_INV_CR MCR_REG(0xcee4)
#define GEN12_COMPCTX_TLB_INV_CR _MMIO(0xcf04)
#define XEHP_COMPCTX_TLB_INV_CR MCR_REG(0xcf04)
+#define XELPMP_GSC_TLB_INV_CR _MMIO(0xcf04) /* media GT only */
#define XEHP_MERT_MOD_CTRL MCR_REG(0xcf28)
#define RENDER_MOD_CTRL MCR_REG(0xcf2c)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
index 6629e4c72b6b..33cba406b569 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
@@ -72,7 +72,7 @@ static void kobj_gt_release(struct kobject *kobj)
{
}
-static struct kobj_type kobj_gt_type = {
+static const struct kobj_type kobj_gt_type = {
.release = kobj_gt_release,
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = id_groups,
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 5c91622dfca4..f4150f61f39c 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -486,6 +486,7 @@ static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
static bool rc6_supported(struct intel_rc6 *rc6)
{
struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ struct intel_gt *gt = rc6_to_gt(rc6);
if (!HAS_RC6(i915))
return false;
@@ -502,6 +503,13 @@ static bool rc6_supported(struct intel_rc6 *rc6)
return false;
}
+ if (IS_MTL_MEDIA_STEP(gt->i915, STEP_A0, STEP_B0) &&
+ gt->type == GT_MEDIA) {
+ drm_notice(&i915->drm,
+ "Media RC6 disabled on A step\n");
+ return false;
+ }
+
return true;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_reset_types.h b/drivers/gpu/drm/i915/gt/intel_reset_types.h
index 9312b29f5a97..80351f0a856c 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset_types.h
@@ -51,7 +51,7 @@ struct intel_reset {
/**
* Waitqueue to signal when the reset has completed. Used by clients
- * that wait for dev_priv->mm.wedged to settle.
+ * that wait for i915->mm.wedged to settle.
*/
wait_queue_head_t queue;
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 827adb0cfaea..3fd795c3263f 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -1052,9 +1052,9 @@ static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
static void ring_release(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ struct drm_i915_private *i915 = engine->i915;
- drm_WARN_ON(&dev_priv->drm, GRAPHICS_VER(dev_priv) > 2 &&
+ drm_WARN_ON(&i915->drm, GRAPHICS_VER(i915) > 2 &&
(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
intel_engine_cleanup_common(engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_rps_types.h b/drivers/gpu/drm/i915/gt/intel_rps_types.h
index 9173ec75f2b8..6507fa3f6d1e 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps_types.h
@@ -57,7 +57,7 @@ struct intel_rps {
/*
* work, interrupts_enabled and pm_iir are protected by
- * dev_priv->irq_lock
+ * i915->irq_lock
*/
struct timer_list timer;
struct work_struct work;
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h
index aa87d3832d60..d7e8c374f153 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.h
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.h
@@ -27,7 +27,7 @@ struct drm_printer;
* is only relevant to pre-Xe_HP platforms (Xe_HP and beyond use the
* I915_MAX_SS_FUSE_BITS value below).
*/
-#define GEN_MAX_SS_PER_HSW_SLICE 6
+#define GEN_MAX_SS_PER_HSW_SLICE 8
/*
* Maximum number of 32-bit registers used by hardware to express the
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 8859eb118510..e7ee24bcad89 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -743,9 +743,13 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
FF_MODE2_GS_TIMER_224,
0, false);
- if (!IS_DG1(i915))
+ if (!IS_DG1(i915)) {
/* Wa_1806527549 */
wa_masked_en(wal, HIZ_CHICKEN, HZ_DEPTH_TEST_LE_GE_OPT_DISABLE);
+
+ /* Wa_1606376872 */
+ wa_masked_en(wal, COMMON_SLICE_CHICKEN4, DISABLE_TDC_LOAD_BALANCING_CALC);
+ }
}
static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -1472,21 +1476,15 @@ gen12_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
static void
dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
- struct drm_i915_private *i915 = gt->i915;
-
gen12_gt_workarounds_init(gt, wal);
/* Wa_1409420604:dg1 */
- if (IS_DG1(i915))
- wa_mcr_write_or(wal,
- SUBSLICE_UNIT_LEVEL_CLKGATE2,
- CPSSUNIT_CLKGATE_DIS);
+ wa_mcr_write_or(wal, SUBSLICE_UNIT_LEVEL_CLKGATE2,
+ CPSSUNIT_CLKGATE_DIS);
/* Wa_1408615072:dg1 */
/* Empirical testing shows this register is unaffected by engine reset. */
- if (IS_DG1(i915))
- wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
- VSUNIT_CLKGATE_DIS_TGL);
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, VSUNIT_CLKGATE_DIS_TGL);
}
static void
@@ -1499,6 +1497,12 @@ xehpsdv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
/* Wa_1409757795:xehpsdv */
wa_mcr_write_or(wal, SCCGCTL94DC, CG3DDISURB);
+ /* Wa_18011725039:xehpsdv */
+ if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_B0)) {
+ wa_mcr_masked_dis(wal, MLTICTXCTL, TDONRENDER);
+ wa_mcr_write_or(wal, L3SQCREG1_CCS0, FLUSHALLNONCOH);
+ }
+
/* Wa_16011155590:xehpsdv */
if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
@@ -1548,6 +1552,9 @@ xehpsdv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
/* Wa_14014368820:xehpsdv */
wa_mcr_write_or(wal, XEHP_GAMCNTRL_CTRL,
INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE);
+
+ /* Wa_14010670810:xehpsdv */
+ wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
}
static void
@@ -1650,13 +1657,6 @@ dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
/* Wa_14014830051:dg2 */
wa_mcr_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN);
- /*
- * The following are not actually "workarounds" but rather
- * recommended tuning settings documented in the bspec's
- * performance guide section.
- */
- wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
-
/* Wa_14015795083 */
wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
@@ -1669,6 +1669,9 @@ dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
/* Wa_1509235366:dg2 */
wa_mcr_write_or(wal, XEHP_GAMCNTRL_CTRL,
INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE);
+
+ /* Wa_14010648519:dg2 */
+ wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
}
static void
@@ -1684,6 +1687,9 @@ pvc_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
wa_mcr_write_or(wal, XEHP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
wa_mcr_write_or(wal, XEHP_VEBX_MOD_CTRL, FORCE_MISS_FTLB);
+
+ /* Wa_16016694945 */
+ wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_OVRLSCCC);
}
static void
@@ -1724,11 +1730,38 @@ xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
debug_dump_steering(gt);
}
+/*
+ * The bspec performance guide has recommended MMIO tuning settings. These
+ * aren't truly "workarounds" but we want to program them through the
+ * workaround infrastructure to make sure they're (re)applied at the proper
+ * times.
+ *
+ * The programming in this function is for settings that persist through
+ * engine resets and also are not part of any engine's register state context.
+ * I.e., settings that only need to be re-applied in the event of a full GT
+ * reset.
+ */
+static void gt_tuning_settings(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+ if (IS_PONTEVECCHIO(gt->i915)) {
+ wa_mcr_write(wal, XEHPC_L3SCRUB,
+ SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK);
+ wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_HOSTCACHEEN);
+ }
+
+ if (IS_DG2(gt->i915)) {
+ wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
+ wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
+ }
+}
+
static void
gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = gt->i915;
+ gt_tuning_settings(gt, wal);
+
if (gt->type == GT_MEDIA) {
if (MEDIA_VER(i915) >= 13)
xelpmp_gt_workarounds_init(gt, wal);
@@ -2154,23 +2187,20 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine)
/* Wa_1806527549:tgl */
whitelist_reg(w, HIZ_CHICKEN);
+
+ /* Required by recommended tuning setting (not a workaround) */
+ whitelist_reg(w, GEN11_COMMON_SLICE_CHICKEN3);
+
break;
default:
break;
}
}
-static void xehpsdv_whitelist_build(struct intel_engine_cs *engine)
-{
- allow_read_ctx_timestamp(engine);
-}
-
static void dg2_whitelist_build(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
- allow_read_ctx_timestamp(engine);
-
switch (engine->class) {
case RENDER_CLASS:
/*
@@ -2187,6 +2217,9 @@ static void dg2_whitelist_build(struct intel_engine_cs *engine)
RING_FORCE_TO_NONPRIV_ACCESS_RD |
RING_FORCE_TO_NONPRIV_RANGE_4);
+ /* Required by recommended tuning setting (not a workaround) */
+ whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3);
+
break;
case COMPUTE_CLASS:
/* Wa_16011157294:dg2_g10 */
@@ -2218,12 +2251,25 @@ static void blacklist_trtt(struct intel_engine_cs *engine)
static void pvc_whitelist_build(struct intel_engine_cs *engine)
{
- allow_read_ctx_timestamp(engine);
-
/* Wa_16014440446:pvc */
blacklist_trtt(engine);
}
+static void mtl_whitelist_build(struct intel_engine_cs *engine)
+{
+ struct i915_wa_list *w = &engine->whitelist;
+
+ switch (engine->class) {
+ case RENDER_CLASS:
+ /* Required by recommended tuning setting (not a workaround) */
+ whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3);
+
+ break;
+ default:
+ break;
+ }
+}
+
void intel_engine_init_whitelist(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
@@ -2232,13 +2278,13 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
wa_init_start(w, engine->gt, "whitelist", engine->name);
if (IS_METEORLAKE(i915))
- ; /* noop; none at this time */
+ mtl_whitelist_build(engine);
else if (IS_PONTEVECCHIO(i915))
pvc_whitelist_build(engine);
else if (IS_DG2(i915))
dg2_whitelist_build(engine);
else if (IS_XEHPSDV(i915))
- xehpsdv_whitelist_build(engine);
+ ; /* none needed */
else if (GRAPHICS_VER(i915) == 12)
tgl_whitelist_build(engine);
else if (GRAPHICS_VER(i915) == 11)
@@ -2403,16 +2449,12 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
MDQ_ARBITRATION_MODE | UGM_BACKUP_MODE);
}
- if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) {
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0))
/* Wa_22010430635:dg2 */
wa_mcr_masked_en(wal,
GEN9_ROW_CHICKEN4,
GEN12_DISABLE_GRF_CLEAR);
- /* Wa_14010648519:dg2 */
- wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
- }
-
/* Wa_14013202645:dg2 */
if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) ||
IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0))
@@ -2897,16 +2939,8 @@ static void
add_render_compute_tuning_settings(struct drm_i915_private *i915,
struct i915_wa_list *wal)
{
- if (IS_PONTEVECCHIO(i915)) {
- wa_mcr_write(wal, XEHPC_L3SCRUB,
- SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK);
- wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_HOSTCACHEEN);
- }
-
- if (IS_DG2(i915)) {
- wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
+ if (IS_DG2(i915))
wa_mcr_write_clr_set(wal, RT_CTRL, STACKID_CTRL, STACKID_CTRL_512);
- }
/*
* This tuning setting proves beneficial only on ATS-M designs; the
@@ -2988,11 +3022,6 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
0, false);
}
- if (IS_PONTEVECCHIO(i915)) {
- /* Wa_16016694945 */
- wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_OVRLSCCC);
- }
-
if (IS_XEHPSDV(i915)) {
/* Wa_1409954639 */
wa_mcr_masked_en(wal,
@@ -3004,18 +3033,9 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
GEN9_ROW_CHICKEN4,
GEN12_DISABLE_GRF_CLEAR);
- /* Wa_14010670810:xehpsdv */
- wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
-
/* Wa_14010449647:xehpsdv */
wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1,
GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
-
- /* Wa_18011725039:xehpsdv */
- if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_B0)) {
- wa_mcr_masked_dis(wal, MLTICTXCTL, TDONRENDER);
- wa_mcr_write_or(wal, L3SQCREG1_CCS0, FLUSHALLNONCOH);
- }
}
if (IS_DG2(i915) || IS_PONTEVECCHIO(i915)) {
diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
index b46425aeb2f0..0971241707ce 100644
--- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
@@ -63,8 +63,8 @@ static void measure_clocks(struct intel_engine_cs *engine,
udelay(1000);
- dt[i] = ktime_sub(ktime_get(), dt[i]);
cycles[i] += read_timestamp(engine);
+ dt[i] = ktime_sub(ktime_get(), dt[i]);
local_irq_enable();
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
index 6755bbc4ebda..84e77e8dbba1 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rps.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -299,13 +299,13 @@ int live_rps_clock_interval(void *arg)
for (i = 0; i < 5; i++) {
preempt_disable();
- dt_[i] = ktime_get();
cycles_[i] = -intel_uncore_read_fw(gt->uncore, GEN6_RP_CUR_UP_EI);
+ dt_[i] = ktime_get();
udelay(1000);
- dt_[i] = ktime_sub(ktime_get(), dt_[i]);
cycles_[i] += intel_uncore_read_fw(gt->uncore, GEN6_RP_CUR_UP_EI);
+ dt_[i] = ktime_sub(ktime_get(), dt_[i]);
preempt_enable();
}
@@ -537,8 +537,8 @@ static u64 __measure_frequency(u32 *cntr, int duration_ms)
{
u64 dc, dt;
- dt = ktime_get();
dc = READ_ONCE(*cntr);
+ dt = ktime_get();
usleep_range(1000 * duration_ms, 2000 * duration_ms);
dc = READ_ONCE(*cntr) - dc;
dt = ktime_get() - dt;
@@ -566,8 +566,8 @@ static u64 __measure_cs_frequency(struct intel_engine_cs *engine,
{
u64 dc, dt;
- dt = ktime_get();
dc = intel_uncore_read_fw(engine->uncore, CS_GPR(0));
+ dt = ktime_get();
usleep_range(1000 * duration_ms, 2000 * duration_ms);
dc = intel_uncore_read_fw(engine->uncore, CS_GPR(0)) - dc;
dt = ktime_get() - dt;
@@ -1094,8 +1094,8 @@ static u64 __measure_power(int duration_ms)
{
u64 dE, dt;
- dt = ktime_get();
dE = librapl_energy_uJ();
+ dt = ktime_get();
usleep_range(1000 * duration_ms, 2000 * duration_ms);
dE = librapl_energy_uJ() - dE;
dt = ktime_get() - dt;
diff --git a/drivers/gpu/drm/i915/gt/selftest_tlb.c b/drivers/gpu/drm/i915/gt/selftest_tlb.c
new file mode 100644
index 000000000000..e6cac1f15d6e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_tlb.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "i915_selftest.h"
+
+#include "gem/i915_gem_internal.h"
+#include "gem/i915_gem_region.h"
+
+#include "gen8_engine_cs.h"
+#include "i915_gem_ww.h"
+#include "intel_engine_regs.h"
+#include "intel_gpu_commands.h"
+#include "intel_context.h"
+#include "intel_gt.h"
+#include "intel_ring.h"
+
+#include "selftests/igt_flush_test.h"
+#include "selftests/i915_random.h"
+
+static void vma_set_qw(struct i915_vma *vma, u64 addr, u64 val)
+{
+ GEM_BUG_ON(addr < i915_vma_offset(vma));
+ GEM_BUG_ON(addr >= i915_vma_offset(vma) + i915_vma_size(vma) + sizeof(val));
+ memset64(page_mask_bits(vma->obj->mm.mapping) +
+ (addr - i915_vma_offset(vma)), val, 1);
+}
+
+static int
+pte_tlbinv(struct intel_context *ce,
+ struct i915_vma *va,
+ struct i915_vma *vb,
+ u64 align,
+ void (*tlbinv)(struct i915_address_space *vm, u64 addr, u64 length),
+ u64 length,
+ struct rnd_state *prng)
+{
+ struct drm_i915_gem_object *batch;
+ struct drm_mm_node vb_node;
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ u64 addr;
+ int err;
+ u32 *cs;
+
+ batch = i915_gem_object_create_internal(ce->vm->i915, 4096);
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
+
+ vma = i915_vma_instance(batch, ce->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto out;
+
+ /* Pin va at random but aligned offset after vma */
+ addr = round_up(vma->node.start + vma->node.size, align);
+ /* MI_CONDITIONAL_BATCH_BUFFER_END limits address to 48b */
+ addr = igt_random_offset(prng, addr, min(ce->vm->total, BIT_ULL(48)),
+ va->size, align);
+ err = i915_vma_pin(va, 0, 0, addr | PIN_OFFSET_FIXED | PIN_USER);
+ if (err) {
+ pr_err("Cannot pin at %llx+%llx\n", addr, va->size);
+ goto out;
+ }
+ GEM_BUG_ON(i915_vma_offset(va) != addr);
+ if (vb != va) {
+ vb_node = vb->node;
+ vb->node = va->node; /* overwrites the _same_ PTE */
+ }
+
+ /*
+ * Now choose random dword at the 1st pinned page.
+ *
+ * SZ_64K pages on dg1 require that the whole PT be marked
+ * containing 64KiB entries. So we make sure that vma
+ * covers the whole PT, despite being randomly aligned to 64KiB
+ * and restrict our sampling to the 2MiB PT within where
+ * we know that we will be using 64KiB pages.
+ */
+ if (align == SZ_64K)
+ addr = round_up(addr, SZ_2M);
+ addr = igt_random_offset(prng, addr, addr + align, 8, 8);
+
+ if (va != vb)
+ pr_info("%s(%s): Sampling %llx, with alignment %llx, using PTE size %x (phys %x, sg %x), invalidate:%llx+%llx\n",
+ ce->engine->name, va->obj->mm.region->name ?: "smem",
+ addr, align, va->resource->page_sizes_gtt,
+ va->page_sizes.phys, va->page_sizes.sg,
+ addr & -length, length);
+
+ cs = i915_gem_object_pin_map_unlocked(batch, I915_MAP_WC);
+ *cs++ = MI_NOOP; /* for later termination */
+ /*
+ * Sample the target to see if we spot the updated backing store.
+ * Gen8 VCS compares immediate value with bitwise-and of two
+ * consecutive DWORDS pointed by addr, other gen/engines compare value
+ * with DWORD pointed by addr. Moreover we want to exercise DWORD size
+ * invalidations. To fulfill all these requirements below values
+ * have been chosen.
+ */
+ *cs++ = MI_CONDITIONAL_BATCH_BUFFER_END | MI_DO_COMPARE | 2;
+ *cs++ = 0; /* break if *addr == 0 */
+ *cs++ = lower_32_bits(addr);
+ *cs++ = upper_32_bits(addr);
+ vma_set_qw(va, addr, -1);
+ vma_set_qw(vb, addr, 0);
+
+ /* Keep sampling until we get bored */
+ *cs++ = MI_BATCH_BUFFER_START | BIT(8) | 1;
+ *cs++ = lower_32_bits(i915_vma_offset(vma));
+ *cs++ = upper_32_bits(i915_vma_offset(vma));
+
+ i915_gem_object_flush_map(batch);
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_va;
+ }
+
+ err = rq->engine->emit_bb_start(rq, i915_vma_offset(vma), 0, 0);
+ if (err) {
+ i915_request_add(rq);
+ goto out_va;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ /* Short sleep to sanitycheck the batch is spinning before we begin */
+ msleep(10);
+ if (va == vb) {
+ if (!i915_request_completed(rq)) {
+ pr_err("%s(%s): Semaphore sanitycheck failed %llx, with alignment %llx, using PTE size %x (phys %x, sg %x)\n",
+ ce->engine->name, va->obj->mm.region->name ?: "smem",
+ addr, align, va->resource->page_sizes_gtt,
+ va->page_sizes.phys, va->page_sizes.sg);
+ err = -EIO;
+ }
+ } else if (!i915_request_completed(rq)) {
+ struct i915_vma_resource vb_res = {
+ .bi.pages = vb->obj->mm.pages,
+ .bi.page_sizes = vb->obj->mm.page_sizes,
+ .start = i915_vma_offset(vb),
+ .vma_size = i915_vma_size(vb)
+ };
+ unsigned int pte_flags = 0;
+
+ /* Flip the PTE between A and B */
+ if (i915_gem_object_is_lmem(vb->obj))
+ pte_flags |= PTE_LM;
+ ce->vm->insert_entries(ce->vm, &vb_res, 0, pte_flags);
+
+ /* Flush the PTE update to concurrent HW */
+ tlbinv(ce->vm, addr & -length, length);
+
+ if (wait_for(i915_request_completed(rq), HZ / 2)) {
+ pr_err("%s: Request did not complete; the COND_BBE did not read the updated PTE\n",
+ ce->engine->name);
+ err = -EINVAL;
+ }
+ } else {
+ pr_err("Spinner ended unexpectedly\n");
+ err = -EIO;
+ }
+ i915_request_put(rq);
+
+ cs = page_mask_bits(batch->mm.mapping);
+ *cs = MI_BATCH_BUFFER_END;
+ wmb();
+
+out_va:
+ if (vb != va)
+ vb->node = vb_node;
+ i915_vma_unpin(va);
+ if (i915_vma_unbind_unlocked(va))
+ err = -EIO;
+out:
+ i915_gem_object_put(batch);
+ return err;
+}
+
+static struct drm_i915_gem_object *create_lmem(struct intel_gt *gt)
+{
+ /*
+ * Allocation of largest possible page size allows to test all types
+ * of pages.
+ */
+ return i915_gem_object_create_lmem(gt->i915, SZ_1G, I915_BO_ALLOC_CONTIGUOUS);
+}
+
+static struct drm_i915_gem_object *create_smem(struct intel_gt *gt)
+{
+ /*
+ * SZ_64K pages require covering the whole 2M PT (gen8 to tgl/dg1).
+ * While that does not require the whole 2M block to be contiguous
+ * it is easier to make it so, since we need that for SZ_2M pagees.
+ * Since we randomly offset the start of the vma, we need a 4M object
+ * so that there is a 2M range within it is suitable for SZ_64K PTE.
+ */
+ return i915_gem_object_create_internal(gt->i915, SZ_4M);
+}
+
+static int
+mem_tlbinv(struct intel_gt *gt,
+ struct drm_i915_gem_object *(*create_fn)(struct intel_gt *),
+ void (*tlbinv)(struct i915_address_space *vm, u64 addr, u64 length))
+{
+ unsigned int ppgtt_size = RUNTIME_INFO(gt->i915)->ppgtt_size;
+ struct intel_engine_cs *engine;
+ struct drm_i915_gem_object *A, *B;
+ struct i915_ppgtt *ppgtt;
+ struct i915_vma *va, *vb;
+ enum intel_engine_id id;
+ I915_RND_STATE(prng);
+ void *vaddr;
+ int err;
+
+ /*
+ * Check that the TLB invalidate is able to revoke an active
+ * page. We load a page into a spinning COND_BBE loop and then
+ * remap that page to a new physical address. The old address, and
+ * so the loop keeps spinning, is retained in the TLB cache until
+ * we issue an invalidate.
+ */
+
+ A = create_fn(gt);
+ if (IS_ERR(A))
+ return PTR_ERR(A);
+
+ vaddr = i915_gem_object_pin_map_unlocked(A, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto out_a;
+ }
+
+ B = create_fn(gt);
+ if (IS_ERR(B)) {
+ err = PTR_ERR(B);
+ goto out_a;
+ }
+
+ vaddr = i915_gem_object_pin_map_unlocked(B, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto out_b;
+ }
+
+ GEM_BUG_ON(A->base.size != B->base.size);
+ if ((A->mm.page_sizes.phys | B->mm.page_sizes.phys) & (A->base.size - 1))
+ pr_warn("Failed to allocate contiguous pages for size %zx\n",
+ A->base.size);
+
+ ppgtt = i915_ppgtt_create(gt, 0);
+ if (IS_ERR(ppgtt)) {
+ err = PTR_ERR(ppgtt);
+ goto out_b;
+ }
+
+ va = i915_vma_instance(A, &ppgtt->vm, NULL);
+ if (IS_ERR(va)) {
+ err = PTR_ERR(va);
+ goto out_vm;
+ }
+
+ vb = i915_vma_instance(B, &ppgtt->vm, NULL);
+ if (IS_ERR(vb)) {
+ err = PTR_ERR(vb);
+ goto out_vm;
+ }
+
+ err = 0;
+ for_each_engine(engine, gt, id) {
+ struct i915_gem_ww_ctx ww;
+ struct intel_context *ce;
+ int bit;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ i915_vm_put(ce->vm);
+ ce->vm = i915_vm_get(&ppgtt->vm);
+
+ for_i915_gem_ww(&ww, err, true)
+ err = intel_context_pin_ww(ce, &ww);
+ if (err)
+ goto err_put;
+
+ for_each_set_bit(bit,
+ (unsigned long *)&RUNTIME_INFO(gt->i915)->page_sizes,
+ BITS_PER_TYPE(RUNTIME_INFO(gt->i915)->page_sizes)) {
+ unsigned int len;
+
+ if (BIT_ULL(bit) < i915_vm_obj_min_alignment(va->vm, va->obj))
+ continue;
+
+ /* sanitycheck the semaphore wake up */
+ err = pte_tlbinv(ce, va, va,
+ BIT_ULL(bit),
+ NULL, SZ_4K,
+ &prng);
+ if (err)
+ goto err_unpin;
+
+ for (len = 2; len <= ppgtt_size; len = min(2 * len, ppgtt_size)) {
+ err = pte_tlbinv(ce, va, vb,
+ BIT_ULL(bit),
+ tlbinv,
+ BIT_ULL(len),
+ &prng);
+ if (err)
+ goto err_unpin;
+ if (len == ppgtt_size)
+ break;
+ }
+ }
+err_unpin:
+ intel_context_unpin(ce);
+err_put:
+ intel_context_put(ce);
+ if (err)
+ break;
+ }
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+out_vm:
+ i915_vm_put(&ppgtt->vm);
+out_b:
+ i915_gem_object_put(B);
+out_a:
+ i915_gem_object_put(A);
+ return err;
+}
+
+static void tlbinv_full(struct i915_address_space *vm, u64 addr, u64 length)
+{
+ intel_gt_invalidate_tlb(vm->gt, intel_gt_tlb_seqno(vm->gt) | 1);
+}
+
+static int invalidate_full(void *arg)
+{
+ struct intel_gt *gt = arg;
+ int err;
+
+ if (GRAPHICS_VER(gt->i915) < 8)
+ return 0; /* TLB invalidate not implemented */
+
+ err = mem_tlbinv(gt, create_smem, tlbinv_full);
+ if (err == 0)
+ err = mem_tlbinv(gt, create_lmem, tlbinv_full);
+ if (err == -ENODEV || err == -ENXIO)
+ err = 0;
+
+ return err;
+}
+
+int intel_tlb_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(invalidate_full),
+ };
+ struct intel_gt *gt;
+ unsigned int i;
+
+ for_each_gt(gt, i915, i) {
+ int err;
+
+ if (intel_gt_is_wedged(gt))
+ continue;
+
+ err = intel_gt_live_subtests(tests, gt);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gt/sysfs_engines.c b/drivers/gpu/drm/i915/gt/sysfs_engines.c
index f2d9858d827c..021f51d9b456 100644
--- a/drivers/gpu/drm/i915/gt/sysfs_engines.c
+++ b/drivers/gpu/drm/i915/gt/sysfs_engines.c
@@ -24,37 +24,37 @@ static struct intel_engine_cs *kobj_to_engine(struct kobject *kobj)
static ssize_t
name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
- return sprintf(buf, "%s\n", kobj_to_engine(kobj)->name);
+ return sysfs_emit(buf, "%s\n", kobj_to_engine(kobj)->name);
}
-static struct kobj_attribute name_attr =
+static const struct kobj_attribute name_attr =
__ATTR(name, 0444, name_show, NULL);
static ssize_t
class_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_class);
+ return sysfs_emit(buf, "%d\n", kobj_to_engine(kobj)->uabi_class);
}
-static struct kobj_attribute class_attr =
+static const struct kobj_attribute class_attr =
__ATTR(class, 0444, class_show, NULL);
static ssize_t
inst_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_instance);
+ return sysfs_emit(buf, "%d\n", kobj_to_engine(kobj)->uabi_instance);
}
-static struct kobj_attribute inst_attr =
+static const struct kobj_attribute inst_attr =
__ATTR(instance, 0444, inst_show, NULL);
static ssize_t
mmio_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
- return sprintf(buf, "0x%x\n", kobj_to_engine(kobj)->mmio_base);
+ return sysfs_emit(buf, "0x%x\n", kobj_to_engine(kobj)->mmio_base);
}
-static struct kobj_attribute mmio_attr =
+static const struct kobj_attribute mmio_attr =
__ATTR(mmio_base, 0444, mmio_show, NULL);
static const char * const vcs_caps[] = {
@@ -107,11 +107,9 @@ __caps_show(struct intel_engine_cs *engine,
for_each_set_bit(n, &caps, show_unknown ? BITS_PER_LONG : count) {
if (n >= count || !repr[n]) {
if (GEM_WARN_ON(show_unknown))
- len += snprintf(buf + len, PAGE_SIZE - len,
- "[%x] ", n);
+ len += sysfs_emit_at(buf, len, "[%x] ", n);
} else {
- len += snprintf(buf + len, PAGE_SIZE - len,
- "%s ", repr[n]);
+ len += sysfs_emit_at(buf, len, "%s ", repr[n]);
}
if (GEM_WARN_ON(len >= PAGE_SIZE))
break;
@@ -127,7 +125,7 @@ caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
return __caps_show(engine, engine->uabi_capabilities, buf, true);
}
-static struct kobj_attribute caps_attr =
+static const struct kobj_attribute caps_attr =
__ATTR(capabilities, 0444, caps_show, NULL);
static ssize_t
@@ -136,7 +134,7 @@ all_caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
return __caps_show(kobj_to_engine(kobj), -1, buf, false);
}
-static struct kobj_attribute all_caps_attr =
+static const struct kobj_attribute all_caps_attr =
__ATTR(known_capabilities, 0444, all_caps_show, NULL);
static ssize_t
@@ -182,10 +180,10 @@ max_spin_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->props.max_busywait_duration_ns);
+ return sysfs_emit(buf, "%lu\n", engine->props.max_busywait_duration_ns);
}
-static struct kobj_attribute max_spin_attr =
+static const struct kobj_attribute max_spin_attr =
__ATTR(max_busywait_duration_ns, 0644, max_spin_show, max_spin_store);
static ssize_t
@@ -193,10 +191,10 @@ max_spin_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->defaults.max_busywait_duration_ns);
+ return sysfs_emit(buf, "%lu\n", engine->defaults.max_busywait_duration_ns);
}
-static struct kobj_attribute max_spin_def =
+static const struct kobj_attribute max_spin_def =
__ATTR(max_busywait_duration_ns, 0444, max_spin_default, NULL);
static ssize_t
@@ -236,10 +234,10 @@ timeslice_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->props.timeslice_duration_ms);
+ return sysfs_emit(buf, "%lu\n", engine->props.timeslice_duration_ms);
}
-static struct kobj_attribute timeslice_duration_attr =
+static const struct kobj_attribute timeslice_duration_attr =
__ATTR(timeslice_duration_ms, 0644, timeslice_show, timeslice_store);
static ssize_t
@@ -247,10 +245,10 @@ timeslice_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->defaults.timeslice_duration_ms);
+ return sysfs_emit(buf, "%lu\n", engine->defaults.timeslice_duration_ms);
}
-static struct kobj_attribute timeslice_duration_def =
+static const struct kobj_attribute timeslice_duration_def =
__ATTR(timeslice_duration_ms, 0444, timeslice_default, NULL);
static ssize_t
@@ -287,10 +285,10 @@ stop_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->props.stop_timeout_ms);
+ return sysfs_emit(buf, "%lu\n", engine->props.stop_timeout_ms);
}
-static struct kobj_attribute stop_timeout_attr =
+static const struct kobj_attribute stop_timeout_attr =
__ATTR(stop_timeout_ms, 0644, stop_show, stop_store);
static ssize_t
@@ -298,10 +296,10 @@ stop_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->defaults.stop_timeout_ms);
+ return sysfs_emit(buf, "%lu\n", engine->defaults.stop_timeout_ms);
}
-static struct kobj_attribute stop_timeout_def =
+static const struct kobj_attribute stop_timeout_def =
__ATTR(stop_timeout_ms, 0444, stop_default, NULL);
static ssize_t
@@ -343,10 +341,10 @@ preempt_timeout_show(struct kobject *kobj, struct kobj_attribute *attr,
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->props.preempt_timeout_ms);
+ return sysfs_emit(buf, "%lu\n", engine->props.preempt_timeout_ms);
}
-static struct kobj_attribute preempt_timeout_attr =
+static const struct kobj_attribute preempt_timeout_attr =
__ATTR(preempt_timeout_ms, 0644, preempt_timeout_show, preempt_timeout_store);
static ssize_t
@@ -355,10 +353,10 @@ preempt_timeout_default(struct kobject *kobj, struct kobj_attribute *attr,
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->defaults.preempt_timeout_ms);
+ return sysfs_emit(buf, "%lu\n", engine->defaults.preempt_timeout_ms);
}
-static struct kobj_attribute preempt_timeout_def =
+static const struct kobj_attribute preempt_timeout_def =
__ATTR(preempt_timeout_ms, 0444, preempt_timeout_default, NULL);
static ssize_t
@@ -399,10 +397,10 @@ heartbeat_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->props.heartbeat_interval_ms);
+ return sysfs_emit(buf, "%lu\n", engine->props.heartbeat_interval_ms);
}
-static struct kobj_attribute heartbeat_interval_attr =
+static const struct kobj_attribute heartbeat_interval_attr =
__ATTR(heartbeat_interval_ms, 0644, heartbeat_show, heartbeat_store);
static ssize_t
@@ -410,10 +408,10 @@ heartbeat_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->defaults.heartbeat_interval_ms);
+ return sysfs_emit(buf, "%lu\n", engine->defaults.heartbeat_interval_ms);
}
-static struct kobj_attribute heartbeat_interval_def =
+static const struct kobj_attribute heartbeat_interval_def =
__ATTR(heartbeat_interval_ms, 0444, heartbeat_default, NULL);
static void kobj_engine_release(struct kobject *kobj)
@@ -421,7 +419,7 @@ static void kobj_engine_release(struct kobject *kobj)
kfree(kobj);
}
-static struct kobj_type kobj_engine_type = {
+static const struct kobj_type kobj_engine_type = {
.release = kobj_engine_release,
.sysfs_ops = &kobj_sysfs_ops
};
@@ -449,7 +447,7 @@ kobj_engine(struct kobject *dir, struct intel_engine_cs *engine)
static void add_defaults(struct kobj_engine *parent)
{
- static const struct attribute *files[] = {
+ static const struct attribute * const files[] = {
&max_spin_def.attr,
&stop_timeout_def.attr,
#if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
@@ -485,7 +483,7 @@ static void add_defaults(struct kobj_engine *parent)
void intel_engines_add_sysfs(struct drm_i915_private *i915)
{
- static const struct attribute *files[] = {
+ static const struct attribute * const files[] = {
&name_attr.attr,
&class_attr.attr,
&inst_attr.attr,
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
index e73d4440c5e8..1d9fdfb11268 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
@@ -6,6 +6,7 @@
#include "gt/intel_engine_pm.h"
#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_print.h"
#include "gt/intel_ring.h"
#include "intel_gsc_fw.h"
@@ -88,9 +89,8 @@ out_rq:
i915_request_put(rq);
if (err)
- drm_err(&gsc_uc_to_gt(gsc)->i915->drm,
- "Request submission for GSC load failed (%d)\n",
- err);
+ gt_err(gsc_uc_to_gt(gsc), "Request submission for GSC load failed %pe\n",
+ ERR_PTR(err));
return err;
}
@@ -200,8 +200,7 @@ int intel_gsc_uc_fw_upload(struct intel_gsc_uc *gsc)
/* FW is not fully operational until we enable SW proxy */
intel_uc_fw_change_status(gsc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
- drm_info(&gt->i915->drm, "Loaded GSC firmware %s\n",
- gsc_fw->file_selected.path);
+ gt_info(gt, "Loaded GSC firmware %s\n", gsc_fw->file_selected.path);
return 0;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
index fd21dbd2663b..2d5b70b3384c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include "gt/intel_gt.h"
+#include "gt/intel_gt_print.h"
#include "intel_gsc_uc.h"
#include "intel_gsc_fw.h"
#include "i915_drv.h"
@@ -59,7 +60,6 @@ int intel_gsc_uc_init(struct intel_gsc_uc *gsc)
{
static struct lock_class_key gsc_lock;
struct intel_gt *gt = gsc_uc_to_gt(gsc);
- struct drm_i915_private *i915 = gt->i915;
struct intel_engine_cs *engine = gt->engine[GSC0];
struct intel_context *ce;
struct i915_vma *vma;
@@ -81,8 +81,7 @@ int intel_gsc_uc_init(struct intel_gsc_uc *gsc)
I915_GEM_HWS_GSC_ADDR,
&gsc_lock, "gsc_context");
if (IS_ERR(ce)) {
- drm_err(&gt->i915->drm,
- "failed to create GSC CS ctx for FW communication\n");
+ gt_err(gt, "failed to create GSC CS ctx for FW communication\n");
err = PTR_ERR(ce);
goto out_vma;
}
@@ -98,7 +97,7 @@ out_vma:
out_fw:
intel_uc_fw_fini(&gsc->fw);
out:
- i915_probe_error(i915, "failed with %d\n", err);
+ gt_probe_error(gt, "GSC init failed %pe\n", ERR_PTR(err));
return err;
}
@@ -117,7 +116,7 @@ void intel_gsc_uc_fini(struct intel_gsc_uc *gsc)
intel_uc_fw_fini(&gsc->fw);
}
-void intel_gsc_uc_suspend(struct intel_gsc_uc *gsc)
+void intel_gsc_uc_flush_work(struct intel_gsc_uc *gsc)
{
if (!intel_uc_fw_is_loadable(&gsc->fw))
return;
@@ -125,6 +124,25 @@ void intel_gsc_uc_suspend(struct intel_gsc_uc *gsc)
flush_work(&gsc->work);
}
+void intel_gsc_uc_resume(struct intel_gsc_uc *gsc)
+{
+ if (!intel_uc_fw_is_loadable(&gsc->fw))
+ return;
+
+ /*
+ * we only want to start the GSC worker from here in the actual resume
+ * flow and not during driver load. This is because GSC load is slow and
+ * therefore we want to make sure that the default state init completes
+ * first to not slow down the init thread. A separate call to
+ * intel_gsc_uc_load_start will ensure that the GSC is loaded during
+ * driver load.
+ */
+ if (!gsc_uc_to_gt(gsc)->engine[GSC0]->default_state)
+ return;
+
+ intel_gsc_uc_load_start(gsc);
+}
+
void intel_gsc_uc_load_start(struct intel_gsc_uc *gsc)
{
if (!intel_uc_fw_is_loadable(&gsc->fw))
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h
index 03fd0a8e8db1..5f50fa1ff8b9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h
@@ -26,6 +26,8 @@ void intel_gsc_uc_init_early(struct intel_gsc_uc *gsc);
int intel_gsc_uc_init(struct intel_gsc_uc *gsc);
void intel_gsc_uc_fini(struct intel_gsc_uc *gsc);
void intel_gsc_uc_suspend(struct intel_gsc_uc *gsc);
+void intel_gsc_uc_resume(struct intel_gsc_uc *gsc);
+void intel_gsc_uc_flush_work(struct intel_gsc_uc *gsc);
void intel_gsc_uc_load_start(struct intel_gsc_uc *gsc);
static inline bool intel_gsc_uc_is_supported(struct intel_gsc_uc *gsc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
index fc3b994626a4..cf49188db6a6 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
@@ -15,6 +15,7 @@
#include "guc_capture_fwif.h"
#include "intel_guc_capture.h"
#include "intel_guc_fwif.h"
+#include "intel_guc_print.h"
#include "i915_drv.h"
#include "i915_gpu_error.h"
#include "i915_irq.h"
@@ -353,7 +354,6 @@ guc_capture_alloc_steered_lists_xe_hpg(struct intel_guc *guc,
u32 ipver)
{
struct intel_gt *gt = guc_to_gt(guc);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct sseu_dev_info *sseu;
int slice, subslice, i, iter, num_steer_regs, num_tot_regs = 0;
const struct __guc_mmio_reg_descr_group *list;
@@ -402,7 +402,7 @@ guc_capture_alloc_steered_lists_xe_hpg(struct intel_guc *guc,
}
}
- drm_dbg(&i915->drm, "GuC-capture found %d-ext-regs.\n", num_tot_regs);
+ guc_dbg(guc, "capture found %d ext-regs.\n", num_tot_regs);
guc->capture->extlists = extlists;
}
@@ -477,7 +477,6 @@ guc_capture_list_init(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
struct guc_mmio_reg *ptr, u16 num_entries)
{
u32 i = 0, j = 0;
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
const struct __guc_mmio_reg_descr_group *reglists = guc->capture->reglists;
struct __guc_mmio_reg_descr_group *extlists = guc->capture->extlists;
const struct __guc_mmio_reg_descr_group *match;
@@ -509,8 +508,7 @@ guc_capture_list_init(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
}
}
if (i < num_entries)
- drm_dbg(&i915->drm, "GuC-capture: Init reglist short %d out %d.\n",
- (int)i, (int)num_entries);
+ guc_dbg(guc, "Got short capture reglist init: %d out %d.\n", i, num_entries);
return 0;
}
@@ -540,12 +538,11 @@ guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
size_t *size, bool is_purpose_est)
{
struct intel_guc_state_capture *gc = guc->capture;
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][classid];
int num_regs;
if (!gc->reglists) {
- drm_warn(&i915->drm, "GuC-capture: No reglist on this device\n");
+ guc_warn(guc, "No capture reglist for this device\n");
return -ENODEV;
}
@@ -557,9 +554,9 @@ guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
if (!is_purpose_est && owner == GUC_CAPTURE_LIST_INDEX_PF &&
!guc_capture_get_one_list(gc->reglists, owner, type, classid)) {
if (type == GUC_CAPTURE_LIST_TYPE_GLOBAL)
- drm_warn(&i915->drm, "Missing GuC-Err-Cap reglist Global!\n");
+ guc_warn(guc, "Missing capture reglist: global!\n");
else
- drm_warn(&i915->drm, "Missing GuC-Err-Cap reglist %s(%u):%s(%u)!\n",
+ guc_warn(guc, "Missing capture reglist: %s(%u):%s(%u)!\n",
__stringify_type(type), type,
__stringify_engclass(classid), classid);
return -ENODATA;
@@ -592,7 +589,6 @@ intel_guc_capture_getlist(struct intel_guc *guc, u32 owner, u32 type, u32 classi
{
struct intel_guc_state_capture *gc = guc->capture;
struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][classid];
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct guc_debug_capture_list *listnode;
int ret, num_regs;
u8 *caplist, *tmp;
@@ -623,7 +619,7 @@ intel_guc_capture_getlist(struct intel_guc *guc, u32 owner, u32 type, u32 classi
caplist = kzalloc(size, GFP_KERNEL);
if (!caplist) {
- drm_dbg(&i915->drm, "GuC-capture: failed to alloc cached caplist");
+ guc_dbg(guc, "Failed to alloc cached register capture list");
return -ENOMEM;
}
@@ -653,7 +649,6 @@ intel_guc_capture_getnullheader(struct intel_guc *guc,
void **outptr, size_t *size)
{
struct intel_guc_state_capture *gc = guc->capture;
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
int tmp = sizeof(u32) * 4;
void *null_header;
@@ -665,7 +660,7 @@ intel_guc_capture_getnullheader(struct intel_guc *guc,
null_header = kzalloc(tmp, GFP_KERNEL);
if (!null_header) {
- drm_dbg(&i915->drm, "GuC-capture: failed to alloc cached nulllist");
+ guc_dbg(guc, "Failed to alloc cached register capture null list");
return -ENOMEM;
}
@@ -727,7 +722,6 @@ guc_capture_output_min_size_est(struct intel_guc *guc)
static void check_guc_capture_size(struct intel_guc *guc)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
int min_size = guc_capture_output_min_size_est(guc);
int spare_size = min_size * GUC_CAPTURE_OVERBUFFER_MULTIPLIER;
u32 buffer_size = intel_guc_log_section_size_capture(&guc->log);
@@ -741,13 +735,13 @@ static void check_guc_capture_size(struct intel_guc *guc)
* INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE.
*/
if (min_size < 0)
- drm_warn(&i915->drm, "Failed to calculate GuC error state capture buffer minimum size: %d!\n",
+ guc_warn(guc, "Failed to calculate error state capture buffer minimum size: %d!\n",
min_size);
else if (min_size > buffer_size)
- drm_warn(&i915->drm, "GuC error state capture buffer maybe small: %d < %d\n",
+ guc_warn(guc, "Error state capture buffer maybe small: %d < %d\n",
buffer_size, min_size);
else if (spare_size > buffer_size)
- drm_dbg(&i915->drm, "GuC error state capture buffer lacks spare size: %d < %d (min = %d)\n",
+ guc_dbg(guc, "Error state capture buffer lacks spare size: %d < %d (min = %d)\n",
buffer_size, spare_size, min_size);
}
@@ -848,7 +842,6 @@ static int
guc_capture_log_remove_dw(struct intel_guc *guc, struct __guc_capture_bufstate *buf,
u32 *dw)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
int tries = 2;
int avail = 0;
u32 *src_data;
@@ -865,7 +858,7 @@ guc_capture_log_remove_dw(struct intel_guc *guc, struct __guc_capture_bufstate *
return 4;
}
if (avail)
- drm_dbg(&i915->drm, "GuC-Cap-Logs not dword aligned, skipping.\n");
+ guc_dbg(guc, "Register capture log not dword aligned, skipping.\n");
buf->rd = 0;
}
@@ -1118,13 +1111,12 @@ static void
__guc_capture_create_prealloc_nodes(struct intel_guc *guc)
{
struct __guc_capture_parsed_output *node = NULL;
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
int i;
for (i = 0; i < PREALLOC_NODES_MAX_COUNT; ++i) {
node = guc_capture_alloc_one_node(guc);
if (!node) {
- drm_warn(&i915->drm, "GuC Capture pre-alloc-cache failure\n");
+ guc_warn(guc, "Register capture pre-alloc-cache failure\n");
/* dont free the priors, use what we got and cleanup at shutdown */
return;
}
@@ -1169,7 +1161,6 @@ guc_capture_create_prealloc_nodes(struct intel_guc *guc)
static int
guc_capture_extract_reglists(struct intel_guc *guc, struct __guc_capture_bufstate *buf)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct guc_state_capture_group_header_t ghdr = {0};
struct guc_state_capture_header_t hdr = {0};
struct __guc_capture_parsed_output *node = NULL;
@@ -1183,7 +1174,7 @@ guc_capture_extract_reglists(struct intel_guc *guc, struct __guc_capture_bufstat
if (!i)
return -ENODATA;
if (i % sizeof(u32)) {
- drm_warn(&i915->drm, "GuC Capture new entries unaligned\n");
+ guc_warn(guc, "Got mis-aligned register capture entries\n");
ret = -EIO;
goto bailout;
}
@@ -1301,7 +1292,7 @@ guc_capture_extract_reglists(struct intel_guc *guc, struct __guc_capture_bufstat
break;
}
if (datatype != GUC_CAPTURE_LIST_TYPE_GLOBAL)
- drm_dbg(&i915->drm, "GuC Capture missing global dump: %08x!\n",
+ guc_dbg(guc, "Register capture missing global dump: %08x!\n",
datatype);
}
node->is_partial = is_partial;
@@ -1322,7 +1313,7 @@ guc_capture_extract_reglists(struct intel_guc *guc, struct __guc_capture_bufstat
numregs = FIELD_GET(CAP_HDR_NUM_MMIOS, hdr.num_mmios);
if (numregs > guc->capture->max_mmio_per_node) {
- drm_dbg(&i915->drm, "GuC Capture list extraction clipped by prealloc!\n");
+ guc_dbg(guc, "Register capture list extraction clipped by prealloc!\n");
numregs = guc->capture->max_mmio_per_node;
}
node->reginfo[datatype].num_regs = numregs;
@@ -1367,7 +1358,6 @@ static void __guc_capture_process_output(struct intel_guc *guc)
{
unsigned int buffer_size, read_offset, write_offset, full_count;
struct intel_uc *uc = container_of(guc, typeof(*uc), guc);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct guc_log_buffer_state log_buf_state_local;
struct guc_log_buffer_state *log_buf_state;
struct __guc_capture_bufstate buf;
@@ -1403,7 +1393,8 @@ static void __guc_capture_process_output(struct intel_guc *guc)
write_offset = buffer_size;
} else if (unlikely((read_offset > buffer_size) ||
(write_offset > buffer_size))) {
- drm_err(&i915->drm, "invalid GuC log capture buffer state!\n");
+ guc_err(guc, "Register capture buffer in invalid state: read = 0x%X, size = 0x%X!\n",
+ read_offset, buffer_size);
/* copy whole buffer as offsets are unreliable */
read_offset = 0;
write_offset = buffer_size;
@@ -1571,6 +1562,27 @@ int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *ebuf,
#endif //CONFIG_DRM_I915_CAPTURE_ERROR
+static void guc_capture_find_ecode(struct intel_engine_coredump *ee)
+{
+ struct gcap_reg_list_info *reginfo;
+ struct guc_mmio_reg *regs;
+ i915_reg_t reg_ipehr = RING_IPEHR(0);
+ i915_reg_t reg_instdone = RING_INSTDONE(0);
+ int i;
+
+ if (!ee->guc_capture_node)
+ return;
+
+ reginfo = ee->guc_capture_node->reginfo + GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE;
+ regs = reginfo->regs;
+ for (i = 0; i < reginfo->num_regs; i++) {
+ if (regs[i].offset == reg_ipehr.reg)
+ ee->ipehr = regs[i].value;
+ else if (regs[i].offset == reg_instdone.reg)
+ ee->instdone.instdone = regs[i].value;
+ }
+}
+
void intel_guc_capture_free_node(struct intel_engine_coredump *ee)
{
if (!ee || !ee->guc_capture_node)
@@ -1586,13 +1598,11 @@ void intel_guc_capture_get_matching_node(struct intel_gt *gt,
struct intel_context *ce)
{
struct __guc_capture_parsed_output *n, *ntmp;
- struct drm_i915_private *i915;
struct intel_guc *guc;
if (!gt || !ee || !ce)
return;
- i915 = gt->i915;
guc = &gt->uc.guc;
if (!guc->capture)
return;
@@ -1606,16 +1616,18 @@ void intel_guc_capture_get_matching_node(struct intel_gt *gt,
list_for_each_entry_safe(n, ntmp, &guc->capture->outlist, link) {
if (n->eng_inst == GUC_ID_TO_ENGINE_INSTANCE(ee->engine->guc_id) &&
n->eng_class == GUC_ID_TO_ENGINE_CLASS(ee->engine->guc_id) &&
- n->guc_id && n->guc_id == ce->guc_id.id &&
- (n->lrca & CTX_GTT_ADDRESS_MASK) && (n->lrca & CTX_GTT_ADDRESS_MASK) ==
- (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK)) {
+ n->guc_id == ce->guc_id.id &&
+ (n->lrca & CTX_GTT_ADDRESS_MASK) == (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK)) {
list_del(&n->link);
ee->guc_capture_node = n;
ee->guc_capture = guc->capture;
+ guc_capture_find_ecode(ee);
return;
}
}
- drm_dbg(&i915->drm, "GuC capture can't match ee to node\n");
+
+ guc_warn(guc, "No register capture node found for 0x%04X / 0x%08X\n",
+ ce->guc_id.id, ce->lrc.lrca);
}
void intel_guc_capture_process(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index c3792ddeec80..195db8c9d420 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -333,8 +333,7 @@ bool intel_guc_check_log_buf_overflow(struct intel_guc_log *log,
log->stats[type].sampled_overflow += 16;
}
- dev_notice_ratelimited(guc_to_gt(log_to_guc(log))->i915->drm.dev,
- "GuC log buffer overflow\n");
+ guc_notice_ratelimited(log_to_guc(log), "log buffer overflow\n");
}
return overflow;
@@ -521,7 +520,7 @@ void intel_guc_log_init_early(struct intel_guc_log *log)
static int guc_log_relay_create(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct rchan *guc_log_relay_chan;
size_t n_subbufs, subbuf_size;
int ret;
@@ -544,9 +543,9 @@ static int guc_log_relay_create(struct intel_guc_log *log)
n_subbufs = 8;
guc_log_relay_chan = relay_open("guc_log",
- dev_priv->drm.primary->debugfs_root,
+ i915->drm.primary->debugfs_root,
subbuf_size, n_subbufs,
- &relay_callbacks, dev_priv);
+ &relay_callbacks, i915);
if (!guc_log_relay_chan) {
guc_err(guc, "Couldn't create relay channel for logging\n");
@@ -571,7 +570,7 @@ static void guc_log_relay_destroy(struct intel_guc_log *log)
static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
intel_wakeref_t wakeref;
_guc_log_copy_debuglogs_for_relay(log);
@@ -580,7 +579,7 @@ static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
* Generally device is expected to be active only at this
* time, so get/put should be really quick.
*/
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
guc_action_flush_log_complete(guc);
}
@@ -662,7 +661,7 @@ void intel_guc_log_destroy(struct intel_guc_log *log)
int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
intel_wakeref_t wakeref;
int ret = 0;
@@ -676,12 +675,12 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
if (level < GUC_LOG_LEVEL_DISABLED || level > GUC_LOG_LEVEL_MAX)
return -EINVAL;
- mutex_lock(&dev_priv->drm.struct_mutex);
+ mutex_lock(&i915->drm.struct_mutex);
if (log->level == level)
goto out_unlock;
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
ret = guc_action_control_log(guc,
GUC_LOG_LEVEL_IS_VERBOSE(level),
GUC_LOG_LEVEL_IS_ENABLED(level),
@@ -694,7 +693,7 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
log->level = level;
out_unlock:
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&i915->drm.struct_mutex);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_print.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_print.h
index e75989d4ba06..2465d05638b4 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_print.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_print.h
@@ -30,6 +30,9 @@
#define guc_err_ratelimited(_guc, _fmt, ...) \
guc_printk((_guc), err_ratelimited, _fmt, ##__VA_ARGS__)
+#define guc_notice_ratelimited(_guc, _fmt, ...) \
+ guc_printk((_guc), notice_ratelimited, _fmt, ##__VA_ARGS__)
+
#define guc_probe_error(_guc, _fmt, ...) \
guc_printk((_guc), probe_error, _fmt, ##__VA_ARGS__)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
index b5855091cf6a..1adec6de223c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
@@ -6,25 +6,15 @@
#include <linux/string_helpers.h>
#include "intel_guc_rc.h"
+#include "intel_guc_print.h"
#include "gt/intel_gt.h"
#include "i915_drv.h"
static bool __guc_rc_supported(struct intel_guc *guc)
{
- struct intel_gt *gt = guc_to_gt(guc);
-
- /*
- * Wa_14017073508: mtl
- * Do not enable gucrc to avoid additional interrupts which
- * may disrupt pcode wa.
- */
- if (IS_MTL_GRAPHICS_STEP(gt->i915, P, STEP_A0, STEP_B0) &&
- gt->type == GT_MEDIA)
- return false;
-
/* GuC RC is unavailable for pre-Gen12 */
return guc->submission_supported &&
- GRAPHICS_VER(gt->i915) >= 12;
+ GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12;
}
static bool __guc_rc_selected(struct intel_guc *guc)
@@ -70,13 +60,12 @@ static int __guc_rc_control(struct intel_guc *guc, bool enable)
ret = guc_action_control_gucrc(guc, enable);
if (ret) {
- i915_probe_error(guc_to_gt(guc)->i915, "Failed to %s GuC RC (%pe)\n",
- str_enable_disable(enable), ERR_PTR(ret));
+ guc_probe_error(guc, "Failed to %s RC (%pe)\n",
+ str_enable_disable(enable), ERR_PTR(ret));
return ret;
}
- drm_info(&gt->i915->drm, "GuC RC: %s\n",
- str_enabled_disabled(enable));
+ guc_info(guc, "RC %s\n", str_enabled_disabled(enable));
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index 63464933cbce..026d73855f36 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -9,6 +9,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_guc_slpc.h"
+#include "intel_guc_print.h"
#include "intel_mchbar_regs.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_regs.h"
@@ -171,14 +172,12 @@ static int guc_action_slpc_query(struct intel_guc *guc, u32 offset)
static int slpc_query_task_state(struct intel_guc_slpc *slpc)
{
struct intel_guc *guc = slpc_to_guc(slpc);
- struct drm_i915_private *i915 = slpc_to_i915(slpc);
u32 offset = intel_guc_ggtt_offset(guc, slpc->vma);
int ret;
ret = guc_action_slpc_query(guc, offset);
if (unlikely(ret))
- i915_probe_error(i915, "Failed to query task state (%pe)\n",
- ERR_PTR(ret));
+ guc_probe_error(guc, "Failed to query task state: %pe\n", ERR_PTR(ret));
drm_clflush_virt_range(slpc->vaddr, SLPC_PAGE_SIZE_BYTES);
@@ -188,15 +187,14 @@ static int slpc_query_task_state(struct intel_guc_slpc *slpc)
static int slpc_set_param(struct intel_guc_slpc *slpc, u8 id, u32 value)
{
struct intel_guc *guc = slpc_to_guc(slpc);
- struct drm_i915_private *i915 = slpc_to_i915(slpc);
int ret;
GEM_BUG_ON(id >= SLPC_MAX_PARAM);
ret = guc_action_slpc_set_param(guc, id, value);
if (ret)
- i915_probe_error(i915, "Failed to set param %d to %u (%pe)\n",
- id, value, ERR_PTR(ret));
+ guc_probe_error(guc, "Failed to set param %d to %u: %pe\n",
+ id, value, ERR_PTR(ret));
return ret;
}
@@ -212,8 +210,8 @@ static int slpc_unset_param(struct intel_guc_slpc *slpc, u8 id)
static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq)
{
- struct drm_i915_private *i915 = slpc_to_i915(slpc);
struct intel_guc *guc = slpc_to_guc(slpc);
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
intel_wakeref_t wakeref;
int ret = 0;
@@ -236,9 +234,8 @@ static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq)
SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
freq);
if (ret)
- drm_notice(&i915->drm,
- "Failed to send set_param for min freq(%d): (%d)\n",
- freq, ret);
+ guc_notice(guc, "Failed to send set_param for min freq(%d): %pe\n",
+ freq, ERR_PTR(ret));
}
return ret;
@@ -267,7 +264,6 @@ static void slpc_boost_work(struct work_struct *work)
int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
{
struct intel_guc *guc = slpc_to_guc(slpc);
- struct drm_i915_private *i915 = slpc_to_i915(slpc);
u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
int err;
@@ -275,9 +271,7 @@ int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr);
if (unlikely(err)) {
- i915_probe_error(i915,
- "Failed to allocate SLPC struct (err=%pe)\n",
- ERR_PTR(err));
+ guc_probe_error(guc, "Failed to allocate SLPC struct: %pe\n", ERR_PTR(err));
return err;
}
@@ -338,7 +332,6 @@ static int guc_action_slpc_reset(struct intel_guc *guc, u32 offset)
static int slpc_reset(struct intel_guc_slpc *slpc)
{
- struct drm_i915_private *i915 = slpc_to_i915(slpc);
struct intel_guc *guc = slpc_to_guc(slpc);
u32 offset = intel_guc_ggtt_offset(guc, slpc->vma);
int ret;
@@ -346,15 +339,14 @@ static int slpc_reset(struct intel_guc_slpc *slpc)
ret = guc_action_slpc_reset(guc, offset);
if (unlikely(ret < 0)) {
- i915_probe_error(i915, "SLPC reset action failed (%pe)\n",
- ERR_PTR(ret));
+ guc_probe_error(guc, "SLPC reset action failed: %pe\n", ERR_PTR(ret));
return ret;
}
if (!ret) {
if (wait_for(slpc_is_running(slpc), SLPC_RESET_TIMEOUT_MS)) {
- i915_probe_error(i915, "SLPC not enabled! State = %s\n",
- slpc_get_state_string(slpc));
+ guc_probe_error(guc, "SLPC not enabled! State = %s\n",
+ slpc_get_state_string(slpc));
return -EIO;
}
}
@@ -495,8 +487,8 @@ int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val)
SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
val < slpc->rp1_freq);
if (ret) {
- i915_probe_error(i915, "Failed to toggle efficient freq (%pe)\n",
- ERR_PTR(ret));
+ guc_probe_error(slpc_to_guc(slpc), "Failed to toggle efficient freq: %pe\n",
+ ERR_PTR(ret));
goto out;
}
@@ -611,15 +603,12 @@ static int slpc_set_softlimits(struct intel_guc_slpc *slpc)
static bool is_slpc_min_freq_rpmax(struct intel_guc_slpc *slpc)
{
- struct drm_i915_private *i915 = slpc_to_i915(slpc);
int slpc_min_freq;
int ret;
ret = intel_guc_slpc_get_min_freq(slpc, &slpc_min_freq);
if (ret) {
- drm_err(&i915->drm,
- "Failed to get min freq: (%d)\n",
- ret);
+ guc_err(slpc_to_guc(slpc), "Failed to get min freq: %pe\n", ERR_PTR(ret));
return false;
}
@@ -685,9 +674,8 @@ int intel_guc_slpc_override_gucrc_mode(struct intel_guc_slpc *slpc, u32 mode)
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
ret = slpc_set_param(slpc, SLPC_PARAM_PWRGATE_RC_MODE, mode);
if (ret)
- drm_err(&i915->drm,
- "Override gucrc mode %d failed %d\n",
- mode, ret);
+ guc_err(slpc_to_guc(slpc), "Override RC mode %d failed: %pe\n",
+ mode, ERR_PTR(ret));
}
return ret;
@@ -702,9 +690,7 @@ int intel_guc_slpc_unset_gucrc_mode(struct intel_guc_slpc *slpc)
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
ret = slpc_unset_param(slpc, SLPC_PARAM_PWRGATE_RC_MODE);
if (ret)
- drm_err(&i915->drm,
- "Unsetting gucrc mode failed %d\n",
- ret);
+ guc_err(slpc_to_guc(slpc), "Unsetting RC mode failed: %pe\n", ERR_PTR(ret));
}
return ret;
@@ -725,7 +711,7 @@ int intel_guc_slpc_unset_gucrc_mode(struct intel_guc_slpc *slpc)
*/
int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
{
- struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ struct intel_guc *guc = slpc_to_guc(slpc);
int ret;
GEM_BUG_ON(!slpc->vma);
@@ -734,8 +720,7 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
ret = slpc_reset(slpc);
if (unlikely(ret < 0)) {
- i915_probe_error(i915, "SLPC Reset event returned (%pe)\n",
- ERR_PTR(ret));
+ guc_probe_error(guc, "SLPC Reset event returned: %pe\n", ERR_PTR(ret));
return ret;
}
@@ -743,7 +728,7 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
if (unlikely(ret < 0))
return ret;
- intel_guc_pm_intrmsk_enable(to_gt(i915));
+ intel_guc_pm_intrmsk_enable(slpc_to_gt(slpc));
slpc_get_rp_values(slpc);
@@ -753,16 +738,14 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
/* Set SLPC max limit to RP0 */
ret = slpc_use_fused_rp0(slpc);
if (unlikely(ret)) {
- i915_probe_error(i915, "Failed to set SLPC max to RP0 (%pe)\n",
- ERR_PTR(ret));
+ guc_probe_error(guc, "Failed to set SLPC max to RP0: %pe\n", ERR_PTR(ret));
return ret;
}
/* Revert SLPC min/max to softlimits if necessary */
ret = slpc_set_softlimits(slpc);
if (unlikely(ret)) {
- i915_probe_error(i915, "Failed to set SLPC softlimits (%pe)\n",
- ERR_PTR(ret));
+ guc_probe_error(guc, "Failed to set SLPC softlimits: %pe\n", ERR_PTR(ret));
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 53f3ed3244d5..88e881b100cf 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1352,6 +1352,16 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
return ns_to_ktime(total);
}
+static void guc_enable_busyness_worker(struct intel_guc *guc)
+{
+ mod_delayed_work(system_highpri_wq, &guc->timestamp.work, guc->timestamp.ping_delay);
+}
+
+static void guc_cancel_busyness_worker(struct intel_guc *guc)
+{
+ cancel_delayed_work_sync(&guc->timestamp.work);
+}
+
static void __reset_guc_busyness_stats(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
@@ -1360,7 +1370,7 @@ static void __reset_guc_busyness_stats(struct intel_guc *guc)
unsigned long flags;
ktime_t unused;
- cancel_delayed_work_sync(&guc->timestamp.work);
+ guc_cancel_busyness_worker(guc);
spin_lock_irqsave(&guc->timestamp.lock, flags);
@@ -1416,8 +1426,7 @@ static void guc_timestamp_ping(struct work_struct *wrk)
intel_gt_reset_unlock(gt, srcu);
- mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
- guc->timestamp.ping_delay);
+ guc_enable_busyness_worker(guc);
}
static int guc_action_enable_usage_stats(struct intel_guc *guc)
@@ -1432,20 +1441,26 @@ static int guc_action_enable_usage_stats(struct intel_guc *guc)
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
-static void guc_init_engine_stats(struct intel_guc *guc)
+static int guc_init_engine_stats(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
intel_wakeref_t wakeref;
+ int ret;
- mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
- guc->timestamp.ping_delay);
+ with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
+ ret = guc_action_enable_usage_stats(guc);
- with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref) {
- int ret = guc_action_enable_usage_stats(guc);
+ if (ret)
+ guc_err(guc, "Failed to enable usage stats: %pe\n", ERR_PTR(ret));
+ else
+ guc_enable_busyness_worker(guc);
- if (ret)
- guc_err(guc, "Failed to enable usage stats: %pe\n", ERR_PTR(ret));
- }
+ return ret;
+}
+
+static void guc_fini_engine_stats(struct intel_guc *guc)
+{
+ guc_cancel_busyness_worker(guc);
}
void intel_guc_busyness_park(struct intel_gt *gt)
@@ -1460,7 +1475,7 @@ void intel_guc_busyness_park(struct intel_gt *gt)
* and causes an unclaimed register access warning. Cancel the worker
* synchronously here.
*/
- cancel_delayed_work_sync(&guc->timestamp.work);
+ guc_cancel_busyness_worker(guc);
/*
* Before parking, we should sample engine busyness stats if we need to.
@@ -1487,8 +1502,7 @@ void intel_guc_busyness_unpark(struct intel_gt *gt)
spin_lock_irqsave(&guc->timestamp.lock, flags);
guc_update_pm_timestamp(guc, &unused);
spin_unlock_irqrestore(&guc->timestamp.lock, flags);
- mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
- guc->timestamp.ping_delay);
+ guc_enable_busyness_worker(guc);
}
static inline bool
@@ -4102,9 +4116,11 @@ static void guc_set_default_submission(struct intel_engine_cs *engine)
engine->submit_request = guc_submit_request;
}
-static inline void guc_kernel_context_pin(struct intel_guc *guc,
- struct intel_context *ce)
+static inline int guc_kernel_context_pin(struct intel_guc *guc,
+ struct intel_context *ce)
{
+ int ret;
+
/*
* Note: we purposefully do not check the returns below because
* the registration can only fail if a reset is just starting.
@@ -4112,16 +4128,24 @@ static inline void guc_kernel_context_pin(struct intel_guc *guc,
* isn't happening and even it did this code would be run again.
*/
- if (context_guc_id_invalid(ce))
- pin_guc_id(guc, ce);
+ if (context_guc_id_invalid(ce)) {
+ ret = pin_guc_id(guc, ce);
+
+ if (ret < 0)
+ return ret;
+ }
if (!test_bit(CONTEXT_GUC_INIT, &ce->flags))
guc_context_init(ce);
- try_context_registration(ce, true);
+ ret = try_context_registration(ce, true);
+ if (ret)
+ unpin_guc_id(guc, ce);
+
+ return ret;
}
-static inline void guc_init_lrc_mapping(struct intel_guc *guc)
+static inline int guc_init_submission(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
struct intel_engine_cs *engine;
@@ -4148,9 +4172,17 @@ static inline void guc_init_lrc_mapping(struct intel_guc *guc)
struct intel_context *ce;
list_for_each_entry(ce, &engine->pinned_contexts_list,
- pinned_contexts_link)
- guc_kernel_context_pin(guc, ce);
+ pinned_contexts_link) {
+ int ret = guc_kernel_context_pin(guc, ce);
+
+ if (ret) {
+ /* No point in trying to clean up as i915 will wedge on failure */
+ return ret;
+ }
+ }
}
+
+ return 0;
}
static void guc_release(struct intel_engine_cs *engine)
@@ -4393,30 +4425,57 @@ static int guc_init_global_schedule_policy(struct intel_guc *guc)
return ret;
}
-void intel_guc_submission_enable(struct intel_guc *guc)
+static void guc_route_semaphores(struct intel_guc *guc, bool to_guc)
{
struct intel_gt *gt = guc_to_gt(guc);
+ u32 val;
- /* Enable and route to GuC */
- if (GRAPHICS_VER(gt->i915) >= 12)
- intel_uncore_write(gt->uncore, GEN12_GUC_SEM_INTR_ENABLES,
- GUC_SEM_INTR_ROUTE_TO_GUC |
- GUC_SEM_INTR_ENABLE_ALL);
+ if (GRAPHICS_VER(gt->i915) < 12)
+ return;
+
+ if (to_guc)
+ val = GUC_SEM_INTR_ROUTE_TO_GUC | GUC_SEM_INTR_ENABLE_ALL;
+ else
+ val = 0;
- guc_init_lrc_mapping(guc);
- guc_init_engine_stats(guc);
- guc_init_global_schedule_policy(guc);
+ intel_uncore_write(gt->uncore, GEN12_GUC_SEM_INTR_ENABLES, val);
}
-void intel_guc_submission_disable(struct intel_guc *guc)
+int intel_guc_submission_enable(struct intel_guc *guc)
{
- struct intel_gt *gt = guc_to_gt(guc);
+ int ret;
+
+ /* Semaphore interrupt enable and route to GuC */
+ guc_route_semaphores(guc, true);
- /* Note: By the time we're here, GuC may have already been reset */
+ ret = guc_init_submission(guc);
+ if (ret)
+ goto fail_sem;
+
+ ret = guc_init_engine_stats(guc);
+ if (ret)
+ goto fail_sem;
+
+ ret = guc_init_global_schedule_policy(guc);
+ if (ret)
+ goto fail_stats;
+
+ return 0;
+
+fail_stats:
+ guc_fini_engine_stats(guc);
+fail_sem:
+ guc_route_semaphores(guc, false);
+ return ret;
+}
+
+/* Note: By the time we're here, GuC may have already been reset */
+void intel_guc_submission_disable(struct intel_guc *guc)
+{
+ guc_cancel_busyness_worker(guc);
- /* Disable and route to host */
- if (GRAPHICS_VER(gt->i915) >= 12)
- intel_uncore_write(gt->uncore, GEN12_GUC_SEM_INTR_ENABLES, 0x0);
+ /* Semaphore interrupt disable and route to host */
+ guc_route_semaphores(guc, false);
}
static bool __guc_submission_supported(struct intel_guc *guc)
@@ -4660,9 +4719,10 @@ static void guc_handle_context_reset(struct intel_guc *guc,
{
trace_intel_context_reset(ce);
- drm_dbg(&guc_to_gt(guc)->i915->drm, "Got GuC reset of 0x%04X, exiting = %d, banned = %d\n",
- ce->guc_id.id, test_bit(CONTEXT_EXITING, &ce->flags),
- test_bit(CONTEXT_BANNED, &ce->flags));
+ guc_dbg(guc, "Got context reset notification: 0x%04X on %s, exiting = %s, banned = %s\n",
+ ce->guc_id.id, ce->engine->name,
+ str_yes_no(intel_context_is_exiting(ce)),
+ str_yes_no(intel_context_is_banned(ce)));
if (likely(intel_context_is_schedulable(ce))) {
capture_error_state(guc, ce);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
index 5a95a9f0a8e3..c57b29cdb1a6 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
@@ -15,7 +15,7 @@ struct intel_engine_cs;
void intel_guc_submission_init_early(struct intel_guc *guc);
int intel_guc_submission_init(struct intel_guc *guc);
-void intel_guc_submission_enable(struct intel_guc *guc);
+int intel_guc_submission_enable(struct intel_guc *guc);
void intel_guc_submission_disable(struct intel_guc *guc);
void intel_guc_submission_fini(struct intel_guc *guc);
int intel_guc_preempt_work_create(struct intel_guc *guc);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index 410905da8e97..72884e21470b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include "gt/intel_gt.h"
+#include "gt/intel_gt_print.h"
#include "intel_guc_reg.h"
#include "intel_huc.h"
#include "i915_drv.h"
@@ -13,6 +14,15 @@
#include <linux/device/bus.h>
#include <linux/mei_aux.h>
+#define huc_printk(_huc, _level, _fmt, ...) \
+ gt_##_level(huc_to_gt(_huc), "HuC: " _fmt, ##__VA_ARGS__)
+#define huc_err(_huc, _fmt, ...) huc_printk((_huc), err, _fmt, ##__VA_ARGS__)
+#define huc_warn(_huc, _fmt, ...) huc_printk((_huc), warn, _fmt, ##__VA_ARGS__)
+#define huc_notice(_huc, _fmt, ...) huc_printk((_huc), notice, _fmt, ##__VA_ARGS__)
+#define huc_info(_huc, _fmt, ...) huc_printk((_huc), info, _fmt, ##__VA_ARGS__)
+#define huc_dbg(_huc, _fmt, ...) huc_printk((_huc), dbg, _fmt, ##__VA_ARGS__)
+#define huc_probe_error(_huc, _fmt, ...) huc_printk((_huc), probe_error, _fmt, ##__VA_ARGS__)
+
/**
* DOC: HuC
*
@@ -107,11 +117,9 @@ static enum hrtimer_restart huc_delayed_load_timer_callback(struct hrtimer *hrti
if (!intel_huc_is_authenticated(huc)) {
if (huc->delayed_load.status == INTEL_HUC_WAITING_ON_GSC)
- drm_notice(&huc_to_gt(huc)->i915->drm,
- "timed out waiting for MEI GSC init to load HuC\n");
+ huc_notice(huc, "timed out waiting for MEI GSC\n");
else if (huc->delayed_load.status == INTEL_HUC_WAITING_ON_PXP)
- drm_notice(&huc_to_gt(huc)->i915->drm,
- "timed out waiting for MEI PXP init to load HuC\n");
+ huc_notice(huc, "timed out waiting for MEI PXP\n");
else
MISSING_CASE(huc->delayed_load.status);
@@ -174,8 +182,7 @@ static int gsc_notifier(struct notifier_block *nb, unsigned long action, void *d
case BUS_NOTIFY_DRIVER_NOT_BOUND: /* mei driver fails to be bound */
case BUS_NOTIFY_UNBIND_DRIVER: /* mei driver about to be unbound */
- drm_info(&huc_to_gt(huc)->i915->drm,
- "mei driver not bound, disabling HuC load\n");
+ huc_info(huc, "MEI driver not bound, disabling load\n");
gsc_init_error(huc);
break;
}
@@ -193,8 +200,7 @@ void intel_huc_register_gsc_notifier(struct intel_huc *huc, struct bus_type *bus
huc->delayed_load.nb.notifier_call = gsc_notifier;
ret = bus_register_notifier(bus, &huc->delayed_load.nb);
if (ret) {
- drm_err(&huc_to_gt(huc)->i915->drm,
- "failed to register GSC notifier\n");
+ huc_err(huc, "failed to register GSC notifier %pe\n", ERR_PTR(ret));
huc->delayed_load.nb.notifier_call = NULL;
gsc_init_error(huc);
}
@@ -306,29 +312,25 @@ static int check_huc_loading_mode(struct intel_huc *huc)
GSC_LOADS_HUC;
if (fw_needs_gsc != hw_uses_gsc) {
- drm_err(&gt->i915->drm,
- "mismatch between HuC FW (%s) and HW (%s) load modes\n",
- HUC_LOAD_MODE_STRING(fw_needs_gsc),
- HUC_LOAD_MODE_STRING(hw_uses_gsc));
+ huc_err(huc, "mismatch between FW (%s) and HW (%s) load modes\n",
+ HUC_LOAD_MODE_STRING(fw_needs_gsc), HUC_LOAD_MODE_STRING(hw_uses_gsc));
return -ENOEXEC;
}
/* make sure we can access the GSC via the mei driver if we need it */
if (!(IS_ENABLED(CONFIG_INTEL_MEI_PXP) && IS_ENABLED(CONFIG_INTEL_MEI_GSC)) &&
fw_needs_gsc) {
- drm_info(&gt->i915->drm,
- "Can't load HuC due to missing MEI modules\n");
+ huc_info(huc, "can't load due to missing MEI modules\n");
return -EIO;
}
- drm_dbg(&gt->i915->drm, "GSC loads huc=%s\n", str_yes_no(fw_needs_gsc));
+ huc_dbg(huc, "loaded by GSC = %s\n", str_yes_no(fw_needs_gsc));
return 0;
}
int intel_huc_init(struct intel_huc *huc)
{
- struct drm_i915_private *i915 = huc_to_gt(huc)->i915;
int err;
err = check_huc_loading_mode(huc);
@@ -345,7 +347,7 @@ int intel_huc_init(struct intel_huc *huc)
out:
intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_INIT_FAIL);
- drm_info(&i915->drm, "HuC init failed with %d\n", err);
+ huc_info(huc, "initialization failed %pe\n", ERR_PTR(err));
return err;
}
@@ -389,13 +391,13 @@ int intel_huc_wait_for_auth_complete(struct intel_huc *huc)
delayed_huc_load_complete(huc);
if (ret) {
- drm_err(&gt->i915->drm, "HuC: Firmware not verified %d\n", ret);
+ huc_err(huc, "firmware not verified %pe\n", ERR_PTR(ret));
intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
return ret;
}
intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_RUNNING);
- drm_info(&gt->i915->drm, "HuC authenticated\n");
+ huc_info(huc, "authenticated!\n");
return 0;
}
@@ -430,7 +432,7 @@ int intel_huc_auth(struct intel_huc *huc)
ret = intel_guc_auth_huc(guc, intel_guc_ggtt_offset(guc, huc->fw.rsa_data));
if (ret) {
- DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret);
+ huc_err(huc, "authentication by GuC failed %pe\n", ERR_PTR(ret));
goto fail;
}
@@ -442,7 +444,7 @@ int intel_huc_auth(struct intel_huc *huc)
return 0;
fail:
- i915_probe_error(gt->i915, "HuC: Authentication failed %d\n", ret);
+ huc_probe_error(huc, "authentication failed %pe\n", ERR_PTR(ret));
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index de7f987cf611..4ccb4be4c9cb 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -83,15 +83,15 @@ static int __intel_uc_reset_hw(struct intel_uc *uc)
static void __confirm_options(struct intel_uc *uc)
{
- struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
+ struct intel_gt *gt = uc_to_gt(uc);
+ struct drm_i915_private *i915 = gt->i915;
- drm_dbg(&i915->drm,
- "enable_guc=%d (guc:%s submission:%s huc:%s slpc:%s)\n",
- i915->params.enable_guc,
- str_yes_no(intel_uc_wants_guc(uc)),
- str_yes_no(intel_uc_wants_guc_submission(uc)),
- str_yes_no(intel_uc_wants_huc(uc)),
- str_yes_no(intel_uc_wants_guc_slpc(uc)));
+ gt_dbg(gt, "enable_guc=%d (guc:%s submission:%s huc:%s slpc:%s)\n",
+ i915->params.enable_guc,
+ str_yes_no(intel_uc_wants_guc(uc)),
+ str_yes_no(intel_uc_wants_guc_submission(uc)),
+ str_yes_no(intel_uc_wants_huc(uc)),
+ str_yes_no(intel_uc_wants_guc_slpc(uc)));
if (i915->params.enable_guc == 0) {
GEM_BUG_ON(intel_uc_wants_guc(uc));
@@ -102,26 +102,22 @@ static void __confirm_options(struct intel_uc *uc)
}
if (!intel_uc_supports_guc(uc))
- drm_info(&i915->drm,
- "Incompatible option enable_guc=%d - %s\n",
- i915->params.enable_guc, "GuC is not supported!");
+ gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
+ i915->params.enable_guc, "GuC is not supported!");
if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC &&
!intel_uc_supports_huc(uc))
- drm_info(&i915->drm,
- "Incompatible option enable_guc=%d - %s\n",
- i915->params.enable_guc, "HuC is not supported!");
+ gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
+ i915->params.enable_guc, "HuC is not supported!");
if (i915->params.enable_guc & ENABLE_GUC_SUBMISSION &&
!intel_uc_supports_guc_submission(uc))
- drm_info(&i915->drm,
- "Incompatible option enable_guc=%d - %s\n",
- i915->params.enable_guc, "GuC submission is N/A");
+ gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
+ i915->params.enable_guc, "GuC submission is N/A");
if (i915->params.enable_guc & ~ENABLE_GUC_MASK)
- drm_info(&i915->drm,
- "Incompatible option enable_guc=%d - %s\n",
- i915->params.enable_guc, "undocumented flag");
+ gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
+ i915->params.enable_guc, "undocumented flag");
}
void intel_uc_init_early(struct intel_uc *uc)
@@ -143,6 +139,7 @@ void intel_uc_init_early(struct intel_uc *uc)
void intel_uc_init_late(struct intel_uc *uc)
{
intel_guc_init_late(&uc->guc);
+ intel_gsc_uc_load_start(&uc->gsc);
}
void intel_uc_driver_late_release(struct intel_uc *uc)
@@ -535,8 +532,11 @@ static int __uc_init_hw(struct intel_uc *uc)
else
intel_huc_auth(huc);
- if (intel_uc_uses_guc_submission(uc))
- intel_guc_submission_enable(guc);
+ if (intel_uc_uses_guc_submission(uc)) {
+ ret = intel_guc_submission_enable(guc);
+ if (ret)
+ goto err_log_capture;
+ }
if (intel_uc_uses_guc_slpc(uc)) {
ret = intel_guc_slpc_enable(&guc->slpc);
@@ -547,12 +547,8 @@ static int __uc_init_hw(struct intel_uc *uc)
intel_rps_lower_unslice(&uc_to_gt(uc)->rps);
}
- intel_gsc_uc_load_start(&uc->gsc);
-
- gt_info(gt, "GuC submission %s\n",
- str_enabled_disabled(intel_uc_uses_guc_submission(uc)));
- gt_info(gt, "GuC SLPC %s\n",
- str_enabled_disabled(intel_uc_uses_guc_slpc(uc)));
+ guc_info(guc, "submission %s\n", str_enabled_disabled(intel_uc_uses_guc_submission(uc)));
+ guc_info(guc, "SLPC %s\n", str_enabled_disabled(intel_uc_uses_guc_slpc(uc)));
return 0;
@@ -678,7 +674,7 @@ void intel_uc_suspend(struct intel_uc *uc)
int err;
/* flush the GSC worker */
- intel_gsc_uc_suspend(&uc->gsc);
+ intel_gsc_uc_flush_work(&uc->gsc);
if (!intel_guc_is_ready(guc)) {
guc->interrupts.enabled = false;
@@ -720,6 +716,8 @@ static int __uc_resume(struct intel_uc *uc, bool enable_communication)
return err;
}
+ intel_gsc_uc_resume(&uc->gsc);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 65672ff82605..264c952f777b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -11,6 +11,7 @@
#include <drm/drm_print.h>
#include "gem/i915_gem_lmem.h"
+#include "gt/intel_gt_print.h"
#include "intel_uc_fw.h"
#include "intel_uc_fw_abi.h"
#include "i915_drv.h"
@@ -44,11 +45,10 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
enum intel_uc_fw_status status)
{
uc_fw->__status = status;
- drm_dbg(&__uc_fw_to_gt(uc_fw)->i915->drm,
- "%s firmware -> %s\n",
- intel_uc_fw_type_repr(uc_fw->type),
- status == INTEL_UC_FIRMWARE_SELECTED ?
- uc_fw->file_selected.path : intel_uc_fw_status_repr(status));
+ gt_dbg(__uc_fw_to_gt(uc_fw), "%s firmware -> %s\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ status == INTEL_UC_FIRMWARE_SELECTED ?
+ uc_fw->file_selected.path : intel_uc_fw_status_repr(status));
}
#endif
@@ -562,15 +562,14 @@ static int check_ccs_header(struct intel_gt *gt,
const struct firmware *fw,
struct intel_uc_fw *uc_fw)
{
- struct drm_i915_private *i915 = gt->i915;
struct uc_css_header *css;
size_t size;
/* Check the size of the blob before examining buffer contents */
if (unlikely(fw->size < sizeof(struct uc_css_header))) {
- drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
- fw->size, sizeof(struct uc_css_header));
+ gt_warn(gt, "%s firmware %s: invalid size: %zu < %zu\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ fw->size, sizeof(struct uc_css_header));
return -ENODATA;
}
@@ -580,10 +579,9 @@ static int check_ccs_header(struct intel_gt *gt,
size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw -
css->exponent_size_dw) * sizeof(u32);
if (unlikely(size != sizeof(struct uc_css_header))) {
- drm_warn(&i915->drm,
- "%s firmware %s: unexpected header size: %zu != %zu\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
- fw->size, sizeof(struct uc_css_header));
+ gt_warn(gt, "%s firmware %s: unexpected header size: %zu != %zu\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ fw->size, sizeof(struct uc_css_header));
return -EPROTO;
}
@@ -596,18 +594,18 @@ static int check_ccs_header(struct intel_gt *gt,
/* At least, it should have header, uCode and RSA. Size of all three. */
size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size;
if (unlikely(fw->size < size)) {
- drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
- fw->size, size);
+ gt_warn(gt, "%s firmware %s: invalid size: %zu < %zu\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ fw->size, size);
return -ENOEXEC;
}
/* Sanity check whether this fw is not larger than whole WOPCM memory */
size = __intel_uc_fw_get_upload_size(uc_fw);
if (unlikely(size >= gt->wopcm.size)) {
- drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu > %zu\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
- size, (size_t)gt->wopcm.size);
+ gt_warn(gt, "%s firmware %s: invalid size: %zu > %zu\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ size, (size_t)gt->wopcm.size);
return -E2BIG;
}
@@ -635,20 +633,20 @@ static bool guc_check_version_range(struct intel_uc_fw *uc_fw)
*/
if (!is_ver_8bit(&uc_fw->file_selected.ver)) {
- drm_warn(&__uc_fw_to_gt(uc_fw)->i915->drm, "%s firmware: invalid file version: 0x%02X:%02X:%02X\n",
- intel_uc_fw_type_repr(uc_fw->type),
- uc_fw->file_selected.ver.major,
- uc_fw->file_selected.ver.minor,
- uc_fw->file_selected.ver.patch);
+ gt_warn(__uc_fw_to_gt(uc_fw), "%s firmware: invalid file version: 0x%02X:%02X:%02X\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ uc_fw->file_selected.ver.major,
+ uc_fw->file_selected.ver.minor,
+ uc_fw->file_selected.ver.patch);
return false;
}
if (!is_ver_8bit(&guc->submission_version)) {
- drm_warn(&__uc_fw_to_gt(uc_fw)->i915->drm, "%s firmware: invalid submit version: 0x%02X:%02X:%02X\n",
- intel_uc_fw_type_repr(uc_fw->type),
- guc->submission_version.major,
- guc->submission_version.minor,
- guc->submission_version.patch);
+ gt_warn(__uc_fw_to_gt(uc_fw), "%s firmware: invalid submit version: 0x%02X:%02X:%02X\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ guc->submission_version.major,
+ guc->submission_version.minor,
+ guc->submission_version.patch);
return false;
}
@@ -687,10 +685,9 @@ static int try_firmware_load(struct intel_uc_fw *uc_fw, const struct firmware **
return err;
if ((*fw)->size > INTEL_UC_RSVD_GGTT_PER_FW) {
- drm_err(&gt->i915->drm,
- "%s firmware %s: size (%zuKB) exceeds max supported size (%uKB)\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
- (*fw)->size / SZ_1K, INTEL_UC_RSVD_GGTT_PER_FW / SZ_1K);
+ gt_err(gt, "%s firmware %s: size (%zuKB) exceeds max supported size (%uKB)\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ (*fw)->size / SZ_1K, INTEL_UC_RSVD_GGTT_PER_FW / SZ_1K);
/* try to find another blob to load */
release_firmware(*fw);
@@ -768,10 +765,10 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
if (uc_fw->file_wanted.ver.major && uc_fw->file_selected.ver.major) {
/* Check the file's major version was as it claimed */
if (uc_fw->file_selected.ver.major != uc_fw->file_wanted.ver.major) {
- drm_notice(&i915->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
- uc_fw->file_selected.ver.major, uc_fw->file_selected.ver.minor,
- uc_fw->file_wanted.ver.major, uc_fw->file_wanted.ver.minor);
+ gt_notice(gt, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ uc_fw->file_selected.ver.major, uc_fw->file_selected.ver.minor,
+ uc_fw->file_wanted.ver.major, uc_fw->file_wanted.ver.minor);
if (!intel_uc_fw_is_overridden(uc_fw)) {
err = -ENOEXEC;
goto fail;
@@ -786,16 +783,14 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
/* Preserve the version that was really wanted */
memcpy(&uc_fw->file_wanted, &file_ideal, sizeof(uc_fw->file_wanted));
- drm_notice(&i915->drm,
- "%s firmware %s (%d.%d) is recommended, but only %s (%d.%d) was found\n",
- intel_uc_fw_type_repr(uc_fw->type),
- uc_fw->file_wanted.path,
- uc_fw->file_wanted.ver.major, uc_fw->file_wanted.ver.minor,
- uc_fw->file_selected.path,
- uc_fw->file_selected.ver.major, uc_fw->file_selected.ver.minor);
- drm_info(&i915->drm,
- "Consider updating your linux-firmware pkg or downloading from %s\n",
- INTEL_UC_FIRMWARE_URL);
+ gt_notice(gt, "%s firmware %s (%d.%d) is recommended, but only %s (%d.%d) was found\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ uc_fw->file_wanted.path,
+ uc_fw->file_wanted.ver.major, uc_fw->file_wanted.ver.minor,
+ uc_fw->file_selected.path,
+ uc_fw->file_selected.ver.major, uc_fw->file_selected.ver.minor);
+ gt_info(gt, "Consider updating your linux-firmware pkg or downloading from %s\n",
+ INTEL_UC_FIRMWARE_URL);
}
if (HAS_LMEM(i915)) {
@@ -823,10 +818,10 @@ fail:
INTEL_UC_FIRMWARE_MISSING :
INTEL_UC_FIRMWARE_ERROR);
- i915_probe_error(i915, "%s firmware %s: fetch failed with error %d\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, err);
- drm_info(&i915->drm, "%s firmware(s) can be downloaded from %s\n",
- intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
+ gt_probe_error(gt, "%s firmware %s: fetch failed %pe\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, ERR_PTR(err));
+ gt_info(gt, "%s firmware(s) can be downloaded from %s\n",
+ intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
release_firmware(fw); /* OK even if fw is NULL */
return err;
@@ -932,9 +927,9 @@ static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
/* Wait for DMA to finish */
ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
if (ret)
- drm_err(&gt->i915->drm, "DMA for %s fw failed, DMA_CTRL=%u\n",
- intel_uc_fw_type_repr(uc_fw->type),
- intel_uncore_read_fw(uncore, DMA_CTRL));
+ gt_err(gt, "DMA for %s fw failed, DMA_CTRL=%u\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ intel_uncore_read_fw(uncore, DMA_CTRL));
/* Disable the bits once DMA is over */
intel_uncore_write_fw(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags));
@@ -950,9 +945,8 @@ int intel_uc_fw_mark_load_failed(struct intel_uc_fw *uc_fw, int err)
GEM_BUG_ON(!intel_uc_fw_is_loadable(uc_fw));
- i915_probe_error(gt->i915, "Failed to load %s firmware %s (%d)\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
- err);
+ gt_probe_error(gt, "Failed to load %s firmware %s %pe\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, ERR_PTR(err));
intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
return err;
@@ -1078,15 +1072,15 @@ int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
err = i915_gem_object_pin_pages_unlocked(uc_fw->obj);
if (err) {
- DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
- intel_uc_fw_type_repr(uc_fw->type), err);
+ gt_dbg(__uc_fw_to_gt(uc_fw), "%s fw pin-pages failed %pe\n",
+ intel_uc_fw_type_repr(uc_fw->type), ERR_PTR(err));
goto out;
}
err = uc_fw_rsa_data_create(uc_fw);
if (err) {
- DRM_DEBUG_DRIVER("%s fw rsa data creation failed, err=%d\n",
- intel_uc_fw_type_repr(uc_fw->type), err);
+ gt_dbg(__uc_fw_to_gt(uc_fw), "%s fw rsa data creation failed %pe\n",
+ intel_uc_fw_type_repr(uc_fw->type), ERR_PTR(err));
goto out_unpin;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
index e28518fe8b90..1fd760539f77 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
@@ -3,6 +3,8 @@
* Copyright �� 2021 Intel Corporation
*/
+#include "gt/intel_gt_print.h"
+#include "intel_guc_print.h"
#include "selftests/igt_spinner.h"
#include "selftests/intel_scheduler_helpers.h"
@@ -65,7 +67,7 @@ static int intel_guc_scrub_ctbs(void *arg)
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
ret = PTR_ERR(ce);
- drm_err(&gt->i915->drm, "Failed to create context, %d: %d\n", i, ret);
+ gt_err(gt, "Failed to create context %d: %pe\n", i, ce);
goto err;
}
@@ -86,7 +88,7 @@ static int intel_guc_scrub_ctbs(void *arg)
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
- drm_err(&gt->i915->drm, "Failed to create request, %d: %d\n", i, ret);
+ gt_err(gt, "Failed to create request %d: %pe\n", i, rq);
goto err;
}
@@ -96,7 +98,7 @@ static int intel_guc_scrub_ctbs(void *arg)
for (i = 0; i < 3; ++i) {
ret = i915_request_wait(last[i], 0, HZ);
if (ret < 0) {
- drm_err(&gt->i915->drm, "Last request failed to complete: %d\n", ret);
+ gt_err(gt, "Last request failed to complete: %pe\n", ERR_PTR(ret));
goto err;
}
i915_request_put(last[i]);
@@ -113,7 +115,7 @@ static int intel_guc_scrub_ctbs(void *arg)
/* GT will not idle if G2H are lost */
ret = intel_gt_wait_for_idle(gt, HZ);
if (ret < 0) {
- drm_err(&gt->i915->drm, "GT failed to idle: %d\n", ret);
+ gt_err(gt, "GT failed to idle: %pe\n", ERR_PTR(ret));
goto err;
}
@@ -153,7 +155,7 @@ static int intel_guc_steal_guc_ids(void *arg)
ce = kcalloc(GUC_MAX_CONTEXT_ID, sizeof(*ce), GFP_KERNEL);
if (!ce) {
- drm_err(&gt->i915->drm, "Context array allocation failed\n");
+ guc_err(guc, "Context array allocation failed\n");
return -ENOMEM;
}
@@ -166,25 +168,25 @@ static int intel_guc_steal_guc_ids(void *arg)
ce[context_index] = intel_context_create(engine);
if (IS_ERR(ce[context_index])) {
ret = PTR_ERR(ce[context_index]);
+ guc_err(guc, "Failed to create context: %pe\n", ce[context_index]);
ce[context_index] = NULL;
- drm_err(&gt->i915->drm, "Failed to create context: %d\n", ret);
goto err_wakeref;
}
ret = igt_spinner_init(&spin, engine->gt);
if (ret) {
- drm_err(&gt->i915->drm, "Failed to create spinner: %d\n", ret);
+ guc_err(guc, "Failed to create spinner: %pe\n", ERR_PTR(ret));
goto err_contexts;
}
spin_rq = igt_spinner_create_request(&spin, ce[context_index],
MI_ARB_CHECK);
if (IS_ERR(spin_rq)) {
ret = PTR_ERR(spin_rq);
- drm_err(&gt->i915->drm, "Failed to create spinner request: %d\n", ret);
+ guc_err(guc, "Failed to create spinner request: %pe\n", spin_rq);
goto err_contexts;
}
ret = request_add_spin(spin_rq, &spin);
if (ret) {
- drm_err(&gt->i915->drm, "Failed to add Spinner request: %d\n", ret);
+ guc_err(guc, "Failed to add Spinner request: %pe\n", ERR_PTR(ret));
goto err_spin_rq;
}
@@ -192,9 +194,9 @@ static int intel_guc_steal_guc_ids(void *arg)
while (ret != -EAGAIN) {
ce[++context_index] = intel_context_create(engine);
if (IS_ERR(ce[context_index])) {
- ret = PTR_ERR(ce[context_index--]);
- ce[context_index] = NULL;
- drm_err(&gt->i915->drm, "Failed to create context: %d\n", ret);
+ ret = PTR_ERR(ce[context_index]);
+ guc_err(guc, "Failed to create context: %pe\n", ce[context_index]);
+ ce[context_index--] = NULL;
goto err_spin_rq;
}
@@ -203,8 +205,8 @@ static int intel_guc_steal_guc_ids(void *arg)
ret = PTR_ERR(rq);
rq = NULL;
if (ret != -EAGAIN) {
- drm_err(&gt->i915->drm, "Failed to create request, %d: %d\n",
- context_index, ret);
+ guc_err(guc, "Failed to create request %d: %pe\n",
+ context_index, ERR_PTR(ret));
goto err_spin_rq;
}
} else {
@@ -218,7 +220,7 @@ static int intel_guc_steal_guc_ids(void *arg)
igt_spinner_end(&spin);
ret = intel_selftest_wait_for_rq(spin_rq);
if (ret) {
- drm_err(&gt->i915->drm, "Spin request failed to complete: %d\n", ret);
+ guc_err(guc, "Spin request failed to complete: %pe\n", ERR_PTR(ret));
i915_request_put(last);
goto err_spin_rq;
}
@@ -230,7 +232,7 @@ static int intel_guc_steal_guc_ids(void *arg)
ret = i915_request_wait(last, 0, HZ * 30);
i915_request_put(last);
if (ret < 0) {
- drm_err(&gt->i915->drm, "Last request failed to complete: %d\n", ret);
+ guc_err(guc, "Last request failed to complete: %pe\n", ERR_PTR(ret));
goto err_spin_rq;
}
@@ -238,7 +240,7 @@ static int intel_guc_steal_guc_ids(void *arg)
rq = nop_user_request(ce[context_index], NULL);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
- drm_err(&gt->i915->drm, "Failed to steal guc_id, %d: %d\n", context_index, ret);
+ guc_err(guc, "Failed to steal guc_id %d: %pe\n", context_index, rq);
goto err_spin_rq;
}
@@ -246,20 +248,20 @@ static int intel_guc_steal_guc_ids(void *arg)
ret = i915_request_wait(rq, 0, HZ);
i915_request_put(rq);
if (ret < 0) {
- drm_err(&gt->i915->drm, "Request with stolen guc_id failed to complete: %d\n", ret);
+ guc_err(guc, "Request with stolen guc_id failed to complete: %pe\n", ERR_PTR(ret));
goto err_spin_rq;
}
/* Wait for idle */
ret = intel_gt_wait_for_idle(gt, HZ * 30);
if (ret < 0) {
- drm_err(&gt->i915->drm, "GT failed to idle: %d\n", ret);
+ guc_err(guc, "GT failed to idle: %pe\n", ERR_PTR(ret));
goto err_spin_rq;
}
/* Verify a guc_id was stolen */
if (guc->number_guc_id_stolen == number_guc_id_stolen) {
- drm_err(&gt->i915->drm, "No guc_id was stolen");
+ guc_err(guc, "No guc_id was stolen");
ret = -EINVAL;
} else {
ret = 0;
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
index d91b58f70403..34b5d952e2bc 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
@@ -3,6 +3,7 @@
* Copyright © 2022 Intel Corporation
*/
+#include "gt/intel_gt_print.h"
#include "selftests/igt_spinner.h"
#include "selftests/igt_reset.h"
#include "selftests/intel_scheduler_helpers.h"
@@ -45,7 +46,7 @@ static int intel_hang_guc(void *arg)
ctx = kernel_context(gt->i915, NULL);
if (IS_ERR(ctx)) {
- drm_err(&gt->i915->drm, "Failed get kernel context: %ld\n", PTR_ERR(ctx));
+ gt_err(gt, "Failed get kernel context: %pe\n", ctx);
return PTR_ERR(ctx);
}
@@ -54,7 +55,7 @@ static int intel_hang_guc(void *arg)
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
ret = PTR_ERR(ce);
- drm_err(&gt->i915->drm, "Failed to create spinner request: %d\n", ret);
+ gt_err(gt, "Failed to create spinner request: %pe\n", ce);
goto err;
}
@@ -63,13 +64,13 @@ static int intel_hang_guc(void *arg)
old_beat = engine->props.heartbeat_interval_ms;
ret = intel_engine_set_heartbeat(engine, BEAT_INTERVAL);
if (ret) {
- drm_err(&gt->i915->drm, "Failed to boost heatbeat interval: %d\n", ret);
+ gt_err(gt, "Failed to boost heatbeat interval: %pe\n", ERR_PTR(ret));
goto err;
}
ret = igt_spinner_init(&spin, engine->gt);
if (ret) {
- drm_err(&gt->i915->drm, "Failed to create spinner: %d\n", ret);
+ gt_err(gt, "Failed to create spinner: %pe\n", ERR_PTR(ret));
goto err;
}
@@ -77,28 +78,28 @@ static int intel_hang_guc(void *arg)
intel_context_put(ce);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
- drm_err(&gt->i915->drm, "Failed to create spinner request: %d\n", ret);
+ gt_err(gt, "Failed to create spinner request: %pe\n", rq);
goto err_spin;
}
ret = request_add_spin(rq, &spin);
if (ret) {
i915_request_put(rq);
- drm_err(&gt->i915->drm, "Failed to add Spinner request: %d\n", ret);
+ gt_err(gt, "Failed to add Spinner request: %pe\n", ERR_PTR(ret));
goto err_spin;
}
ret = intel_reset_guc(gt);
if (ret) {
i915_request_put(rq);
- drm_err(&gt->i915->drm, "Failed to reset GuC, ret = %d\n", ret);
+ gt_err(gt, "Failed to reset GuC: %pe\n", ERR_PTR(ret));
goto err_spin;
}
guc_status = intel_uncore_read(gt->uncore, GUC_STATUS);
if (!(guc_status & GS_MIA_IN_RESET)) {
i915_request_put(rq);
- drm_err(&gt->i915->drm, "GuC failed to reset: status = 0x%08X\n", guc_status);
+ gt_err(gt, "Failed to reset GuC: status = 0x%08X\n", guc_status);
ret = -EIO;
goto err_spin;
}
@@ -107,12 +108,12 @@ static int intel_hang_guc(void *arg)
ret = intel_selftest_wait_for_rq(rq);
i915_request_put(rq);
if (ret) {
- drm_err(&gt->i915->drm, "Request failed to complete: %d\n", ret);
+ gt_err(gt, "Request failed to complete: %pe\n", ERR_PTR(ret));
goto err_spin;
}
if (i915_reset_count(global) == reset_count) {
- drm_err(&gt->i915->drm, "Failed to record a GPU reset\n");
+ gt_err(gt, "Failed to record a GPU reset\n");
ret = -EINVAL;
goto err_spin;
}
@@ -132,7 +133,7 @@ err_spin:
ret = intel_selftest_wait_for_rq(rq);
i915_request_put(rq);
if (ret) {
- drm_err(&gt->i915->drm, "No-op failed to complete: %d\n", ret);
+ gt_err(gt, "No-op failed to complete: %pe\n", ERR_PTR(ret));
goto err;
}
}
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
index d17982c36d25..a40e7c32e613 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
@@ -3,6 +3,7 @@
* Copyright �� 2019 Intel Corporation
*/
+#include "gt/intel_gt_print.h"
#include "selftests/igt_spinner.h"
#include "selftests/igt_reset.h"
#include "selftests/intel_scheduler_helpers.h"
@@ -115,30 +116,30 @@ static int __intel_guc_multi_lrc_basic(struct intel_gt *gt, unsigned int class)
parent = multi_lrc_create_parent(gt, class, 0);
if (IS_ERR(parent)) {
- drm_err(&gt->i915->drm, "Failed creating contexts: %ld", PTR_ERR(parent));
+ gt_err(gt, "Failed creating contexts: %pe\n", parent);
return PTR_ERR(parent);
} else if (!parent) {
- drm_dbg(&gt->i915->drm, "Not enough engines in class: %d", class);
+ gt_dbg(gt, "Not enough engines in class: %d\n", class);
return 0;
}
rq = multi_lrc_nop_request(parent);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
- drm_err(&gt->i915->drm, "Failed creating requests: %d", ret);
+ gt_err(gt, "Failed creating requests: %pe\n", rq);
goto out;
}
ret = intel_selftest_wait_for_rq(rq);
if (ret)
- drm_err(&gt->i915->drm, "Failed waiting on request: %d", ret);
+ gt_err(gt, "Failed waiting on request: %pe\n", ERR_PTR(ret));
i915_request_put(rq);
if (ret >= 0) {
ret = intel_gt_wait_for_idle(gt, HZ * 5);
if (ret < 0)
- drm_err(&gt->i915->drm, "GT failed to idle: %d\n", ret);
+ gt_err(gt, "GT failed to idle: %pe\n", ERR_PTR(ret));
}
out:
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 7412abf166a8..8ef93889061a 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -92,8 +92,7 @@ static void debug_active_init(struct i915_active *ref)
static void debug_active_activate(struct i915_active *ref)
{
lockdep_assert_held(&ref->tree_lock);
- if (!atomic_read(&ref->count)) /* before the first inc */
- debug_object_activate(ref, &active_debug_desc);
+ debug_object_activate(ref, &active_debug_desc);
}
static void debug_active_deactivate(struct i915_active *ref)
@@ -422,12 +421,12 @@ replace_barrier(struct i915_active *ref, struct i915_active_fence *active)
* we can use it to substitute for the pending idle-barrer
* request that we want to emit on the kernel_context.
*/
- __active_del_barrier(ref, node_from_active(active));
- return true;
+ return __active_del_barrier(ref, node_from_active(active));
}
int i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
{
+ u64 idx = i915_request_timeline(rq)->fence_context;
struct dma_fence *fence = &rq->fence;
struct i915_active_fence *active;
int err;
@@ -437,16 +436,19 @@ int i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
if (err)
return err;
- active = active_instance(ref, i915_request_timeline(rq)->fence_context);
- if (!active) {
- err = -ENOMEM;
- goto out;
- }
+ do {
+ active = active_instance(ref, idx);
+ if (!active) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (replace_barrier(ref, active)) {
+ RCU_INIT_POINTER(active->fence, NULL);
+ atomic_dec(&ref->count);
+ }
+ } while (unlikely(is_barrier(active)));
- if (replace_barrier(ref, active)) {
- RCU_INIT_POINTER(active->fence, NULL);
- atomic_dec(&ref->count);
- }
if (!__i915_active_fence_set(active, fence))
__i915_active_acquire(ref);
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index db7a86def7e2..a53fd339e2cc 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -483,13 +483,17 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
if (ret)
goto err_ggtt;
- ret = intel_memory_regions_hw_probe(dev_priv);
+ /*
+ * Make sure we probe lmem before we probe stolen-lmem. The BAR size
+ * might be different due to bar resizing.
+ */
+ ret = intel_gt_tiles_init(dev_priv);
if (ret)
goto err_ggtt;
- ret = intel_gt_tiles_init(dev_priv);
+ ret = intel_memory_regions_hw_probe(dev_priv);
if (ret)
- goto err_mem_regions;
+ goto err_ggtt;
ret = i915_ggtt_enable_hw(dev_priv);
if (ret) {
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 904f21e1380c..f020c0086fbc 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -505,6 +505,7 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
ctx->guilty, ctx->active,
ctx->total_runtime, ctx->avg_runtime);
+ err_printf(m, " context timeline seqno %u\n", ctx->hwsp_seqno);
}
static struct i915_vma_coredump *
@@ -1395,6 +1396,8 @@ static bool record_context(struct i915_gem_context_coredump *e,
e->sched_attr = ctx->sched;
e->guilty = atomic_read(&ctx->guilty_count);
e->active = atomic_read(&ctx->active_count);
+ e->hwsp_seqno = (ce->timeline && ce->timeline->hwsp_seqno) ?
+ *ce->timeline->hwsp_seqno : ~0U;
e->total_runtime = intel_context_get_total_runtime_ns(ce);
e->avg_runtime = intel_context_get_avg_runtime_ns(ce);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 56027ffbce51..a91932cc6531 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -107,6 +107,7 @@ struct intel_engine_coredump {
int active;
int guilty;
struct i915_sched_attr sched_attr;
+ u32 hwsp_seqno;
} context;
struct i915_vma_coredump *vma;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 1757fb8fdf5b..34b7d2ac33fb 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -6458,15 +6458,6 @@
/* XEHP_PCODE_FREQUENCY_CONFIG param2 */
#define PCODE_MBOX_DOMAIN_NONE 0x0
#define PCODE_MBOX_DOMAIN_MEDIAFF 0x3
-
-/* Wa_14017210380: mtl */
-#define PCODE_MBOX_GT_STATE 0x50
-/* sub-commands (param1) */
-#define PCODE_MBOX_GT_STATE_MEDIA_BUSY 0x1
-#define PCODE_MBOX_GT_STATE_MEDIA_NOT_BUSY 0x2
-/* param2 */
-#define PCODE_MBOX_GT_STATE_DOMAIN_MEDIA 0x1
-
#define GEN6_PCODE_DATA _MMIO(0x138128)
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
index a72698a2dbc8..a1bc804cfa15 100644
--- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
+++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
@@ -139,13 +139,6 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
mutex_unlock(&bman->lock);
- if (place->lpfn - place->fpfn == n_pages)
- bman_res->base.start = place->fpfn;
- else if (lpfn <= bman->visible_size)
- bman_res->base.start = 0;
- else
- bman_res->base.start = bman->visible_size;
-
*res = &bman_res->base;
return 0;
diff --git a/drivers/gpu/drm/i915/intel_pcode.c b/drivers/gpu/drm/i915/intel_pcode.c
index a234d9b4ed14..3db2ba439bb5 100644
--- a/drivers/gpu/drm/i915/intel_pcode.c
+++ b/drivers/gpu/drm/i915/intel_pcode.c
@@ -204,15 +204,42 @@ out:
#undef COND
}
+static int pcode_init_wait(struct intel_uncore *uncore, int timeout_ms)
+{
+ if (__intel_wait_for_register_fw(uncore,
+ GEN6_PCODE_MAILBOX,
+ GEN6_PCODE_READY, 0,
+ 500, timeout_ms,
+ NULL))
+ return -EPROBE_DEFER;
+
+ return skl_pcode_request(uncore,
+ DG1_PCODE_STATUS,
+ DG1_UNCORE_GET_INIT_STATUS,
+ DG1_UNCORE_INIT_STATUS_COMPLETE,
+ DG1_UNCORE_INIT_STATUS_COMPLETE, timeout_ms);
+}
+
int intel_pcode_init(struct intel_uncore *uncore)
{
+ int err;
+
if (!IS_DGFX(uncore->i915))
return 0;
- return skl_pcode_request(uncore, DG1_PCODE_STATUS,
- DG1_UNCORE_GET_INIT_STATUS,
- DG1_UNCORE_INIT_STATUS_COMPLETE,
- DG1_UNCORE_INIT_STATUS_COMPLETE, 180000);
+ /*
+ * Wait 10 seconds so that the punit to settle and complete
+ * any outstanding transactions upon module load
+ */
+ err = pcode_init_wait(uncore, 10000);
+
+ if (err) {
+ drm_notice(&uncore->i915->drm,
+ "Waiting for HW initialisation...\n");
+ err = pcode_init_wait(uncore, 180000);
+ }
+
+ return err;
}
int snb_pcode_read_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 *val)
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index f4b3b2063018..e1e1f34490c8 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -2748,14 +2748,25 @@ static void driver_initiated_flr(struct intel_uncore *uncore)
/* Trigger the actual Driver-FLR */
intel_uncore_rmw_fw(uncore, GU_CNTL, 0, DRIVERFLR);
+ /* Wait for hardware teardown to complete */
+ ret = intel_wait_for_register_fw(uncore, GU_CNTL,
+ DRIVERFLR, 0,
+ flr_timeout_ms);
+ if (ret) {
+ drm_err(&i915->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret);
+ return;
+ }
+
+ /* Wait for hardware/firmware re-init to complete */
ret = intel_wait_for_register_fw(uncore, GU_DEBUG,
DRIVERFLR_STATUS, DRIVERFLR_STATUS,
flr_timeout_ms);
if (ret) {
- drm_err(&i915->drm, "wait for Driver-FLR completion failed! %d\n", ret);
+ drm_err(&i915->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret);
return;
}
+ /* Clear sticky completion status */
intel_uncore_write_fw(uncore, GU_DEBUG, DRIVERFLR_STATUS);
}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_huc.c b/drivers/gpu/drm/i915/pxp/intel_pxp_huc.c
index 64609d1b1c0f..23431c36b60b 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_huc.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_huc.c
@@ -38,7 +38,7 @@ int intel_pxp_huc_load_and_auth(struct intel_pxp *pxp)
huc_in.header.command_id = PXP43_CMDID_START_HUC_AUTH;
huc_in.header.status = 0;
huc_in.header.buffer_len = sizeof(huc_in.huc_base_address);
- huc_in.huc_base_address = huc_phys_addr;
+ huc_in.huc_base_address = cpu_to_le64(huc_phys_addr);
err = intel_pxp_tee_stream_message(pxp, client_id, fence_id,
&huc_in, sizeof(huc_in),
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index aaf8a380e5c7..5aee6c9a8295 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -25,6 +25,7 @@ selftest(gt_lrc, intel_lrc_live_selftests)
selftest(gt_mocs, intel_mocs_live_selftests)
selftest(gt_pm, intel_gt_pm_live_selftests)
selftest(gt_heartbeat, intel_heartbeat_live_selftests)
+selftest(gt_tlb, intel_tlb_live_selftests)
selftest(requests, i915_request_live_selftests)
selftest(migrate, intel_migrate_live_selftests)
selftest(active, i915_active_live_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 6fe22b096bdd..a9b79888c193 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -957,18 +957,18 @@ static int live_cancel_request(void *arg)
return 0;
}
-static struct i915_vma *empty_batch(struct drm_i915_private *i915)
+static struct i915_vma *empty_batch(struct intel_gt *gt)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u32 *cmd;
int err;
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(obj))
return ERR_CAST(obj);
- cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
+ cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto err;
@@ -979,15 +979,15 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
- intel_gt_chipset_flush(to_gt(i915));
+ intel_gt_chipset_flush(gt);
- vma = i915_vma_instance(obj, &to_gt(i915)->ggtt->vm, NULL);
+ vma = i915_vma_instance(obj, gt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
}
- err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_GLOBAL);
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (err)
goto err;
@@ -1005,6 +1005,14 @@ err:
return ERR_PTR(err);
}
+static int emit_bb_start(struct i915_request *rq, struct i915_vma *batch)
+{
+ return rq->engine->emit_bb_start(rq,
+ i915_vma_offset(batch),
+ i915_vma_size(batch),
+ 0);
+}
+
static struct i915_request *
empty_request(struct intel_engine_cs *engine,
struct i915_vma *batch)
@@ -1016,10 +1024,7 @@ empty_request(struct intel_engine_cs *engine,
if (IS_ERR(request))
return request;
- err = engine->emit_bb_start(request,
- i915_vma_offset(batch),
- i915_vma_size(batch),
- I915_DISPATCH_SECURE);
+ err = emit_bb_start(request, batch);
if (err)
goto out_request;
@@ -1034,8 +1039,7 @@ static int live_empty_request(void *arg)
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
struct igt_live_test t;
- struct i915_vma *batch;
- int err = 0;
+ int err;
/*
* Submit various sized batches of empty requests, to each engine
@@ -1043,16 +1047,17 @@ static int live_empty_request(void *arg)
* the overhead of submitting requests to the hardware.
*/
- batch = empty_batch(i915);
- if (IS_ERR(batch))
- return PTR_ERR(batch);
-
for_each_uabi_engine(engine, i915) {
IGT_TIMEOUT(end_time);
struct i915_request *request;
+ struct i915_vma *batch;
unsigned long n, prime;
ktime_t times[2] = {};
+ batch = empty_batch(engine->gt);
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
+
err = igt_live_test_begin(&t, i915, __func__, engine->name);
if (err)
goto out_batch;
@@ -1100,27 +1105,29 @@ static int live_empty_request(void *arg)
engine->name,
ktime_to_ns(times[0]),
prime, div64_u64(ktime_to_ns(times[1]), prime));
+out_batch:
+ i915_vma_unpin(batch);
+ i915_vma_put(batch);
+ if (err)
+ break;
}
-out_batch:
- i915_vma_unpin(batch);
- i915_vma_put(batch);
return err;
}
-static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
+static struct i915_vma *recursive_batch(struct intel_gt *gt)
{
struct drm_i915_gem_object *obj;
- const int ver = GRAPHICS_VER(i915);
+ const int ver = GRAPHICS_VER(gt->i915);
struct i915_vma *vma;
u32 *cmd;
int err;
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(obj))
return ERR_CAST(obj);
- vma = i915_vma_instance(obj, to_gt(i915)->vm, NULL);
+ vma = i915_vma_instance(obj, gt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
@@ -1152,7 +1159,7 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
- intel_gt_chipset_flush(to_gt(i915));
+ intel_gt_chipset_flush(gt);
return vma;
@@ -1186,7 +1193,6 @@ static int live_all_engines(void *arg)
struct intel_engine_cs *engine;
struct i915_request **request;
struct igt_live_test t;
- struct i915_vma *batch;
unsigned int idx;
int err;
@@ -1204,42 +1210,44 @@ static int live_all_engines(void *arg)
if (err)
goto out_free;
- batch = recursive_batch(i915);
- if (IS_ERR(batch)) {
- err = PTR_ERR(batch);
- pr_err("%s: Unable to create batch, err=%d\n", __func__, err);
- goto out_free;
- }
-
- i915_vma_lock(batch);
-
idx = 0;
for_each_uabi_engine(engine, i915) {
+ struct i915_vma *batch;
+
+ batch = recursive_batch(engine->gt);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ pr_err("%s: Unable to create batch, err=%d\n",
+ __func__, err);
+ goto out_free;
+ }
+
+ i915_vma_lock(batch);
request[idx] = intel_engine_create_kernel_request(engine);
if (IS_ERR(request[idx])) {
err = PTR_ERR(request[idx]);
pr_err("%s: Request allocation failed with err=%d\n",
__func__, err);
- goto out_request;
+ goto out_unlock;
}
+ GEM_BUG_ON(request[idx]->context->vm != batch->vm);
err = i915_vma_move_to_active(batch, request[idx], 0);
GEM_BUG_ON(err);
- err = engine->emit_bb_start(request[idx],
- i915_vma_offset(batch),
- i915_vma_size(batch),
- 0);
+ err = emit_bb_start(request[idx], batch);
GEM_BUG_ON(err);
request[idx]->batch = batch;
i915_request_get(request[idx]);
i915_request_add(request[idx]);
idx++;
+out_unlock:
+ i915_vma_unlock(batch);
+ if (err)
+ goto out_request;
}
- i915_vma_unlock(batch);
-
idx = 0;
for_each_uabi_engine(engine, i915) {
if (i915_request_completed(request[idx])) {
@@ -1251,17 +1259,23 @@ static int live_all_engines(void *arg)
idx++;
}
- err = recursive_batch_resolve(batch);
- if (err) {
- pr_err("%s: failed to resolve batch, err=%d\n", __func__, err);
- goto out_request;
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ err = recursive_batch_resolve(request[idx]->batch);
+ if (err) {
+ pr_err("%s: failed to resolve batch, err=%d\n",
+ __func__, err);
+ goto out_request;
+ }
+ idx++;
}
idx = 0;
for_each_uabi_engine(engine, i915) {
+ struct i915_request *rq = request[idx];
long timeout;
- timeout = i915_request_wait(request[idx], 0,
+ timeout = i915_request_wait(rq, 0,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0) {
err = timeout;
@@ -1270,8 +1284,10 @@ static int live_all_engines(void *arg)
goto out_request;
}
- GEM_BUG_ON(!i915_request_completed(request[idx]));
- i915_request_put(request[idx]);
+ GEM_BUG_ON(!i915_request_completed(rq));
+ i915_vma_unpin(rq->batch);
+ i915_vma_put(rq->batch);
+ i915_request_put(rq);
request[idx] = NULL;
idx++;
}
@@ -1281,12 +1297,18 @@ static int live_all_engines(void *arg)
out_request:
idx = 0;
for_each_uabi_engine(engine, i915) {
- if (request[idx])
- i915_request_put(request[idx]);
+ struct i915_request *rq = request[idx];
+
+ if (!rq)
+ continue;
+
+ if (rq->batch) {
+ i915_vma_unpin(rq->batch);
+ i915_vma_put(rq->batch);
+ }
+ i915_request_put(rq);
idx++;
}
- i915_vma_unpin(batch);
- i915_vma_put(batch);
out_free:
kfree(request);
return err;
@@ -1322,7 +1344,7 @@ static int live_sequential_engines(void *arg)
for_each_uabi_engine(engine, i915) {
struct i915_vma *batch;
- batch = recursive_batch(i915);
+ batch = recursive_batch(engine->gt);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
pr_err("%s: Unable to create batch for %s, err=%d\n",
@@ -1338,6 +1360,7 @@ static int live_sequential_engines(void *arg)
__func__, engine->name, err);
goto out_unlock;
}
+ GEM_BUG_ON(request[idx]->context->vm != batch->vm);
if (prev) {
err = i915_request_await_dma_fence(request[idx],
@@ -1353,10 +1376,7 @@ static int live_sequential_engines(void *arg)
err = i915_vma_move_to_active(batch, request[idx], 0);
GEM_BUG_ON(err);
- err = engine->emit_bb_start(request[idx],
- i915_vma_offset(batch),
- i915_vma_size(batch),
- 0);
+ err = emit_bb_start(request[idx], batch);
GEM_BUG_ON(err);
request[idx]->batch = batch;