summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gt/selftest_lrc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gt/selftest_lrc.c')
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c329
1 files changed, 236 insertions, 93 deletions
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 3119016d9910..23f04f6f8fba 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -5,6 +5,9 @@
#include <linux/prime_numbers.h>
+#include "gem/i915_gem_internal.h"
+
+#include "i915_drv.h"
#include "i915_selftest.h"
#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
@@ -25,6 +28,9 @@
#define NUM_GPR 16
#define NUM_GPR_DW (NUM_GPR * 2) /* each GPR is 2 dwords */
+#define LRI_HEADER MI_INSTR(0x22, 0)
+#define LRI_LENGTH_MASK GENMASK(7, 0)
+
static struct i915_vma *create_scratch(struct intel_gt *gt)
{
return __vm_create_scratch_for_read_pinned(&gt->ggtt->vm, PAGE_SIZE);
@@ -49,7 +55,7 @@ static int wait_for_submit(struct intel_engine_cs *engine,
unsigned long timeout)
{
/* Ignore our own attempts to suppress excess tasklets */
- tasklet_hi_schedule(&engine->execlists.tasklet);
+ tasklet_hi_schedule(&engine->sched_engine->tasklet);
timeout += jiffies;
do {
@@ -58,7 +64,7 @@ static int wait_for_submit(struct intel_engine_cs *engine,
if (i915_request_completed(rq)) /* that was quick! */
return 0;
- /* Wait until the HW has acknowleged the submission (or err) */
+ /* Wait until the HW has acknowledged the submission (or err) */
intel_engine_flush_submission(engine);
if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
return 0;
@@ -126,6 +132,27 @@ static int context_flush(struct intel_context *ce, long timeout)
return err;
}
+static int get_lri_mask(struct intel_engine_cs *engine, u32 lri)
+{
+ if ((lri & MI_LRI_LRM_CS_MMIO) == 0)
+ return ~0u;
+
+ if (GRAPHICS_VER(engine->i915) < 12)
+ return 0xfff;
+
+ switch (engine->class) {
+ default:
+ case RENDER_CLASS:
+ case COMPUTE_CLASS:
+ return 0x07ff;
+ case COPY_ENGINE_CLASS:
+ return 0x0fff;
+ case VIDEO_DECODE_CLASS:
+ case VIDEO_ENHANCEMENT_CLASS:
+ return 0x3fff;
+ }
+}
+
static int live_lrc_layout(void *arg)
{
struct intel_gt *gt = arg;
@@ -153,8 +180,8 @@ static int live_lrc_layout(void *arg)
continue;
hw = shmem_pin_map(engine->default_state);
- if (IS_ERR(hw)) {
- err = PTR_ERR(hw);
+ if (!hw) {
+ err = -ENOMEM;
break;
}
hw += LRC_STATE_OFFSET / sizeof(*hw);
@@ -165,6 +192,7 @@ static int live_lrc_layout(void *arg)
dw = 0;
do {
u32 lri = READ_ONCE(hw[dw]);
+ u32 lri_mask;
if (lri == 0) {
dw++;
@@ -178,7 +206,7 @@ static int live_lrc_layout(void *arg)
continue;
}
- if ((lri & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+ if ((lri & GENMASK(31, 23)) != LRI_HEADER) {
pr_err("%s: Expected LRI command at dword %d, found %08x\n",
engine->name, dw, lri);
err = -EINVAL;
@@ -192,6 +220,18 @@ static int live_lrc_layout(void *arg)
break;
}
+ /*
+ * When bit 19 of MI_LOAD_REGISTER_IMM instruction
+ * opcode is set on Gen12+ devices, HW does not
+ * care about certain register address offsets, and
+ * instead check the following for valid address
+ * ranges on specific engines:
+ * RCS && CCS: BITS(0 - 10)
+ * BCS: BITS(0 - 11)
+ * VECS && VCS: BITS(0 - 13)
+ */
+ lri_mask = get_lri_mask(engine, lri);
+
lri &= 0x7f;
lri++;
dw++;
@@ -199,7 +239,7 @@ static int live_lrc_layout(void *arg)
while (lri) {
u32 offset = READ_ONCE(hw[dw]);
- if (offset != lrc[dw]) {
+ if ((offset ^ lrc[dw]) & lri_mask) {
pr_err("%s: Different registers found at dword %d, expected %x, found %x\n",
engine->name, dw, offset, lrc[dw]);
err = -EINVAL;
@@ -321,6 +361,11 @@ static int live_lrc_fixed(void *arg)
lrc_ring_cmd_buf_cctl(engine),
"RING_CMD_BUF_CCTL"
},
+ {
+ i915_mmio_reg_offset(RING_BB_OFFSET(engine->mmio_base)),
+ lrc_ring_bb_offset(engine),
+ "RING_BB_OFFSET"
+ },
{ },
}, *t;
u32 *hw;
@@ -329,8 +374,8 @@ static int live_lrc_fixed(void *arg)
continue;
hw = shmem_pin_map(engine->default_state);
- if (IS_ERR(hw)) {
- err = PTR_ERR(hw);
+ if (!hw) {
+ err = -ENOMEM;
break;
}
hw += LRC_STATE_OFFSET / sizeof(*hw);
@@ -408,9 +453,7 @@ retry:
*cs++ = i915_ggtt_offset(scratch) + RING_TAIL_IDX * sizeof(u32);
*cs++ = 0;
- err = i915_request_await_object(rq, scratch->obj, true);
- if (!err)
- err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE);
+ err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE);
i915_request_get(rq);
i915_request_add(rq);
@@ -557,11 +600,7 @@ __gpr_read(struct intel_context *ce, struct i915_vma *scratch, u32 *slot)
*cs++ = 0;
}
- i915_vma_lock(scratch);
- err = i915_request_await_object(rq, scratch->obj, true);
- if (!err)
- err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE);
- i915_vma_unlock(scratch);
+ err = igt_vma_move_to_active_unlocked(scratch, rq, EXEC_OBJECT_WRITE);
i915_request_get(rq);
i915_request_add(rq);
@@ -821,6 +860,14 @@ static int live_lrc_timestamp(void *arg)
};
/*
+ * This test was designed to isolate a hardware bug.
+ * The bug was found and fixed in future generations but
+ * now the test pollutes our CI on previous generation.
+ */
+ if (GRAPHICS_VER(gt->i915) == 12)
+ return 0;
+
+ /*
* We want to verify that the timestamp is saved and restore across
* context switches and is monotonic.
*
@@ -909,6 +956,19 @@ create_user_vma(struct i915_address_space *vm, unsigned long size)
return vma;
}
+static u32 safe_poison(u32 offset, u32 poison)
+{
+ /*
+ * Do not enable predication as it will nop all subsequent commands,
+ * not only disabling the tests (by preventing all the other SRM) but
+ * also preventing the arbitration events at the end of the request.
+ */
+ if (offset == i915_mmio_reg_offset(RING_PREDICATE_RESULT(0)))
+ poison &= ~REG_BIT(0);
+
+ return poison;
+}
+
static struct i915_vma *
store_context(struct intel_context *ce, struct i915_vma *scratch)
{
@@ -938,25 +998,47 @@ store_context(struct intel_context *ce, struct i915_vma *scratch)
hw = defaults;
hw += LRC_STATE_OFFSET / sizeof(*hw);
do {
- u32 len = hw[dw] & 0x7f;
+ u32 len = hw[dw] & LRI_LENGTH_MASK;
+
+ /*
+ * Keep it simple, skip parsing complex commands
+ *
+ * At present, there are no more MI_LOAD_REGISTER_IMM
+ * commands after the first 3D state command. Rather
+ * than include a table (see i915_cmd_parser.c) of all
+ * the possible commands and their instruction lengths
+ * (or mask for variable length instructions), assume
+ * we have gathered the complete list of registers and
+ * bail out.
+ */
+ if ((hw[dw] >> INSTR_CLIENT_SHIFT) != INSTR_MI_CLIENT)
+ break;
if (hw[dw] == 0) {
dw++;
continue;
}
- if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+ if ((hw[dw] & GENMASK(31, 23)) != LRI_HEADER) {
+ /* Assume all other MI commands match LRI length mask */
dw += len + 2;
continue;
}
+ if (!len) {
+ pr_err("%s: invalid LRI found in context image\n",
+ ce->engine->name);
+ igt_hexdump(defaults, PAGE_SIZE);
+ break;
+ }
+
dw++;
len = (len + 1) / 2;
while (len--) {
*cs++ = MI_STORE_REGISTER_MEM_GEN8;
*cs++ = hw[dw];
- *cs++ = lower_32_bits(scratch->node.start + x);
- *cs++ = upper_32_bits(scratch->node.start + x);
+ *cs++ = lower_32_bits(i915_vma_offset(scratch) + x);
+ *cs++ = upper_32_bits(i915_vma_offset(scratch) + x);
dw += 2;
x += 4;
@@ -974,21 +1056,6 @@ store_context(struct intel_context *ce, struct i915_vma *scratch)
return batch;
}
-static int move_to_active(struct i915_request *rq,
- struct i915_vma *vma,
- unsigned int flags)
-{
- int err;
-
- i915_vma_lock(vma);
- err = i915_request_await_object(rq, vma->obj, flags);
- if (!err)
- err = i915_vma_move_to_active(vma, rq, flags);
- i915_vma_unlock(vma);
-
- return err;
-}
-
static struct i915_request *
record_registers(struct intel_context *ce,
struct i915_vma *before,
@@ -1014,19 +1081,19 @@ record_registers(struct intel_context *ce,
if (IS_ERR(rq))
goto err_after;
- err = move_to_active(rq, before, EXEC_OBJECT_WRITE);
+ err = igt_vma_move_to_active_unlocked(before, rq, EXEC_OBJECT_WRITE);
if (err)
goto err_rq;
- err = move_to_active(rq, b_before, 0);
+ err = igt_vma_move_to_active_unlocked(b_before, rq, 0);
if (err)
goto err_rq;
- err = move_to_active(rq, after, EXEC_OBJECT_WRITE);
+ err = igt_vma_move_to_active_unlocked(after, rq, EXEC_OBJECT_WRITE);
if (err)
goto err_rq;
- err = move_to_active(rq, b_after, 0);
+ err = igt_vma_move_to_active_unlocked(b_after, rq, 0);
if (err)
goto err_rq;
@@ -1038,8 +1105,8 @@ record_registers(struct intel_context *ce,
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
*cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
- *cs++ = lower_32_bits(b_before->node.start);
- *cs++ = upper_32_bits(b_before->node.start);
+ *cs++ = lower_32_bits(i915_vma_offset(b_before));
+ *cs++ = upper_32_bits(i915_vma_offset(b_before));
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
*cs++ = MI_SEMAPHORE_WAIT |
@@ -1054,8 +1121,8 @@ record_registers(struct intel_context *ce,
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
*cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
- *cs++ = lower_32_bits(b_after->node.start);
- *cs++ = upper_32_bits(b_after->node.start);
+ *cs++ = lower_32_bits(i915_vma_offset(b_after));
+ *cs++ = upper_32_bits(i915_vma_offset(b_after));
intel_ring_advance(rq, cs);
@@ -1101,24 +1168,37 @@ static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
hw = defaults;
hw += LRC_STATE_OFFSET / sizeof(*hw);
do {
- u32 len = hw[dw] & 0x7f;
+ u32 len = hw[dw] & LRI_LENGTH_MASK;
+
+ /* For simplicity, break parsing at the first complex command */
+ if ((hw[dw] >> INSTR_CLIENT_SHIFT) != INSTR_MI_CLIENT)
+ break;
if (hw[dw] == 0) {
dw++;
continue;
}
- if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+ if ((hw[dw] & GENMASK(31, 23)) != LRI_HEADER) {
dw += len + 2;
continue;
}
+ if (!len) {
+ pr_err("%s: invalid LRI found in context image\n",
+ ce->engine->name);
+ igt_hexdump(defaults, PAGE_SIZE);
+ break;
+ }
+
dw++;
len = (len + 1) / 2;
*cs++ = MI_LOAD_REGISTER_IMM(len);
while (len--) {
*cs++ = hw[dw];
- *cs++ = poison;
+ *cs++ = safe_poison(hw[dw] & get_lri_mask(ce->engine,
+ MI_LRI_LRM_CS_MMIO),
+ poison);
dw += 2;
}
} while (dw < PAGE_SIZE / sizeof(u32) &&
@@ -1151,7 +1231,7 @@ static int poison_registers(struct intel_context *ce, u32 poison, u32 *sema)
goto err_batch;
}
- err = move_to_active(rq, batch, 0);
+ err = igt_vma_move_to_active_unlocked(batch, rq, 0);
if (err)
goto err_rq;
@@ -1163,8 +1243,8 @@ static int poison_registers(struct intel_context *ce, u32 poison, u32 *sema)
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
*cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
- *cs++ = lower_32_bits(batch->node.start);
- *cs++ = upper_32_bits(batch->node.start);
+ *cs++ = lower_32_bits(i915_vma_offset(batch));
+ *cs++ = upper_32_bits(i915_vma_offset(batch));
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = i915_ggtt_offset(ce->engine->status_page.vma) +
@@ -1221,9 +1301,9 @@ static int compare_isolation(struct intel_engine_cs *engine,
}
lrc = i915_gem_object_pin_map_unlocked(ce->state->obj,
- i915_coherent_map_type(engine->i915,
- ce->state->obj,
- false));
+ intel_gt_coherent_map_type(engine->gt,
+ ce->state->obj,
+ false));
if (IS_ERR(lrc)) {
err = PTR_ERR(lrc);
goto err_B1;
@@ -1241,18 +1321,29 @@ static int compare_isolation(struct intel_engine_cs *engine,
hw = defaults;
hw += LRC_STATE_OFFSET / sizeof(*hw);
do {
- u32 len = hw[dw] & 0x7f;
+ u32 len = hw[dw] & LRI_LENGTH_MASK;
+
+ /* For simplicity, break parsing at the first complex command */
+ if ((hw[dw] >> INSTR_CLIENT_SHIFT) != INSTR_MI_CLIENT)
+ break;
if (hw[dw] == 0) {
dw++;
continue;
}
- if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+ if ((hw[dw] & GENMASK(31, 23)) != LRI_HEADER) {
dw += len + 2;
continue;
}
+ if (!len) {
+ pr_err("%s: invalid LRI found in context image\n",
+ engine->name);
+ igt_hexdump(defaults, PAGE_SIZE);
+ break;
+ }
+
dw++;
len = (len + 1) / 2;
while (len--) {
@@ -1292,6 +1383,30 @@ err_A0:
return err;
}
+static struct i915_vma *
+create_result_vma(struct i915_address_space *vm, unsigned long sz)
+{
+ struct i915_vma *vma;
+ void *ptr;
+
+ vma = create_user_vma(vm, sz);
+ if (IS_ERR(vma))
+ return vma;
+
+ /* Set the results to a known value distinct from the poison */
+ ptr = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WC);
+ if (IS_ERR(ptr)) {
+ i915_vma_put(vma);
+ return ERR_CAST(ptr);
+ }
+
+ memset(ptr, POISON_INUSE, vma->size);
+ i915_gem_object_flush_map(vma->obj);
+ i915_gem_object_unpin_map(vma->obj);
+
+ return vma;
+}
+
static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison)
{
u32 *sema = memset32(engine->status_page.addr + 1000, 0, 1);
@@ -1310,13 +1425,13 @@ static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison)
goto err_A;
}
- ref[0] = create_user_vma(A->vm, SZ_64K);
+ ref[0] = create_result_vma(A->vm, SZ_64K);
if (IS_ERR(ref[0])) {
err = PTR_ERR(ref[0]);
goto err_B;
}
- ref[1] = create_user_vma(A->vm, SZ_64K);
+ ref[1] = create_result_vma(A->vm, SZ_64K);
if (IS_ERR(ref[1])) {
err = PTR_ERR(ref[1]);
goto err_ref0;
@@ -1338,13 +1453,13 @@ static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison)
}
i915_request_put(rq);
- result[0] = create_user_vma(A->vm, SZ_64K);
+ result[0] = create_result_vma(A->vm, SZ_64K);
if (IS_ERR(result[0])) {
err = PTR_ERR(result[0]);
goto err_ref1;
}
- result[1] = create_user_vma(A->vm, SZ_64K);
+ result[1] = create_result_vma(A->vm, SZ_64K);
if (IS_ERR(result[1])) {
err = PTR_ERR(result[1]);
goto err_result0;
@@ -1357,18 +1472,17 @@ static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison)
}
err = poison_registers(B, poison, sema);
- if (err) {
- WRITE_ONCE(*sema, -1);
- i915_request_put(rq);
- goto err_result1;
- }
-
- if (i915_request_wait(rq, 0, HZ / 2) < 0) {
- i915_request_put(rq);
+ if (err == 0 && i915_request_wait(rq, 0, HZ / 2) < 0) {
+ pr_err("%s(%s): wait for results timed out\n",
+ __func__, engine->name);
err = -ETIME;
- goto err_result1;
}
+
+ /* Always cancel the semaphore wait, just in case the GPU gets stuck */
+ WRITE_ONCE(*sema, -1);
i915_request_put(rq);
+ if (err)
+ goto err_result1;
err = compare_isolation(engine, ref, result, A, poison);
@@ -1450,7 +1564,7 @@ static int live_lrc_isolation(void *arg)
return err;
}
-static int indirect_ctx_submit_req(struct intel_context *ce)
+static int wabb_ctx_submit_req(struct intel_context *ce)
{
struct i915_request *rq;
int err = 0;
@@ -1474,7 +1588,8 @@ static int indirect_ctx_submit_req(struct intel_context *ce)
#define CTX_BB_CANARY_INDEX (CTX_BB_CANARY_OFFSET / sizeof(u32))
static u32 *
-emit_indirect_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
+emit_wabb_ctx_canary(const struct intel_context *ce,
+ u32 *cs, bool per_ctx)
{
*cs++ = MI_STORE_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT |
@@ -1482,26 +1597,43 @@ emit_indirect_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
*cs++ = i915_mmio_reg_offset(RING_START(0));
*cs++ = i915_ggtt_offset(ce->state) +
context_wa_bb_offset(ce) +
- CTX_BB_CANARY_OFFSET;
+ CTX_BB_CANARY_OFFSET +
+ (per_ctx ? PAGE_SIZE : 0);
*cs++ = 0;
return cs;
}
+static u32 *
+emit_indirect_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
+{
+ return emit_wabb_ctx_canary(ce, cs, false);
+}
+
+static u32 *
+emit_per_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
+{
+ return emit_wabb_ctx_canary(ce, cs, true);
+}
+
static void
-indirect_ctx_bb_setup(struct intel_context *ce)
+wabb_ctx_setup(struct intel_context *ce, bool per_ctx)
{
- u32 *cs = context_indirect_bb(ce);
+ u32 *cs = context_wabb(ce, per_ctx);
cs[CTX_BB_CANARY_INDEX] = 0xdeadf00d;
- setup_indirect_ctx_bb(ce, ce->engine, emit_indirect_ctx_bb_canary);
+ if (per_ctx)
+ setup_per_ctx_bb(ce, ce->engine, emit_per_ctx_bb_canary);
+ else
+ setup_indirect_ctx_bb(ce, ce->engine, emit_indirect_ctx_bb_canary);
}
-static bool check_ring_start(struct intel_context *ce)
+static bool check_ring_start(struct intel_context *ce, bool per_ctx)
{
const u32 * const ctx_bb = (void *)(ce->lrc_reg_state) -
- LRC_STATE_OFFSET + context_wa_bb_offset(ce);
+ LRC_STATE_OFFSET + context_wa_bb_offset(ce) +
+ (per_ctx ? PAGE_SIZE : 0);
if (ctx_bb[CTX_BB_CANARY_INDEX] == ce->lrc_reg_state[CTX_RING_START])
return true;
@@ -1513,21 +1645,21 @@ static bool check_ring_start(struct intel_context *ce)
return false;
}
-static int indirect_ctx_bb_check(struct intel_context *ce)
+static int wabb_ctx_check(struct intel_context *ce, bool per_ctx)
{
int err;
- err = indirect_ctx_submit_req(ce);
+ err = wabb_ctx_submit_req(ce);
if (err)
return err;
- if (!check_ring_start(ce))
+ if (!check_ring_start(ce, per_ctx))
return -EINVAL;
return 0;
}
-static int __live_lrc_indirect_ctx_bb(struct intel_engine_cs *engine)
+static int __lrc_wabb_ctx(struct intel_engine_cs *engine, bool per_ctx)
{
struct intel_context *a, *b;
int err;
@@ -1562,14 +1694,14 @@ static int __live_lrc_indirect_ctx_bb(struct intel_engine_cs *engine)
* As ring start is restored apriori of starting the indirect ctx bb and
* as it will be different for each context, it fits to this purpose.
*/
- indirect_ctx_bb_setup(a);
- indirect_ctx_bb_setup(b);
+ wabb_ctx_setup(a, per_ctx);
+ wabb_ctx_setup(b, per_ctx);
- err = indirect_ctx_bb_check(a);
+ err = wabb_ctx_check(a, per_ctx);
if (err)
goto unpin_b;
- err = indirect_ctx_bb_check(b);
+ err = wabb_ctx_check(b, per_ctx);
unpin_b:
intel_context_unpin(b);
@@ -1583,7 +1715,7 @@ put_a:
return err;
}
-static int live_lrc_indirect_ctx_bb(void *arg)
+static int lrc_wabb_ctx(void *arg, bool per_ctx)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
@@ -1592,7 +1724,7 @@ static int live_lrc_indirect_ctx_bb(void *arg)
for_each_engine(engine, gt, id) {
intel_engine_pm_get(engine);
- err = __live_lrc_indirect_ctx_bb(engine);
+ err = __lrc_wabb_ctx(engine, per_ctx);
intel_engine_pm_put(engine);
if (igt_flush_test(gt->i915))
@@ -1605,6 +1737,16 @@ static int live_lrc_indirect_ctx_bb(void *arg)
return err;
}
+static int live_lrc_indirect_ctx_bb(void *arg)
+{
+ return lrc_wabb_ctx(arg, false);
+}
+
+static int live_lrc_per_ctx_bb(void *arg)
+{
+ return lrc_wabb_ctx(arg, true);
+}
+
static void garbage_reset(struct intel_engine_cs *engine,
struct i915_request *rq)
{
@@ -1613,12 +1755,12 @@ static void garbage_reset(struct intel_engine_cs *engine,
local_bh_disable();
if (!test_and_set_bit(bit, lock)) {
- tasklet_disable(&engine->execlists.tasklet);
+ tasklet_disable(&engine->sched_engine->tasklet);
if (!rq->fence.error)
__intel_engine_reset_bh(engine, NULL);
- tasklet_enable(&engine->execlists.tasklet);
+ tasklet_enable(&engine->sched_engine->tasklet);
clear_and_wake_up_bit(bit, lock);
}
local_bh_enable();
@@ -1751,8 +1893,8 @@ static int __live_pphwsp_runtime(struct intel_engine_cs *engine)
if (IS_ERR(ce))
return PTR_ERR(ce);
- ce->runtime.num_underflow = 0;
- ce->runtime.max_underflow = 0;
+ ce->stats.runtime.num_underflow = 0;
+ ce->stats.runtime.max_underflow = 0;
do {
unsigned int loop = 1024;
@@ -1790,11 +1932,11 @@ static int __live_pphwsp_runtime(struct intel_engine_cs *engine)
intel_context_get_avg_runtime_ns(ce));
err = 0;
- if (ce->runtime.num_underflow) {
+ if (ce->stats.runtime.num_underflow) {
pr_err("%s: pphwsp underflow %u time(s), max %u cycles!\n",
engine->name,
- ce->runtime.num_underflow,
- ce->runtime.max_underflow);
+ ce->stats.runtime.num_underflow,
+ ce->stats.runtime.max_underflow);
GEM_TRACE_DUMP();
err = -EOVERFLOW;
}
@@ -1842,10 +1984,11 @@ int intel_lrc_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_lrc_garbage),
SUBTEST(live_pphwsp_runtime),
SUBTEST(live_lrc_indirect_ctx_bb),
+ SUBTEST(live_lrc_per_ctx_bb),
};
if (!HAS_LOGICAL_RING_CONTEXTS(i915))
return 0;
- return intel_gt_live_subtests(tests, &i915->gt);
+ return intel_gt_live_subtests(tests, to_gt(i915));
}