diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gt/selftest_mocs.c')
| -rw-r--r-- | drivers/gpu/drm/i915/gt/selftest_mocs.c | 184 |
1 files changed, 103 insertions, 81 deletions
diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c index 8831ffee2061..d73e438fb85f 100644 --- a/drivers/gpu/drm/i915/gt/selftest_mocs.c +++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c @@ -1,23 +1,40 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * * Copyright © 2019 Intel Corporation */ #include "gt/intel_engine_pm.h" +#include "gt/intel_gpu_commands.h" #include "i915_selftest.h" +#include "gem/selftests/igt_gem_utils.h" #include "gem/selftests/mock_context.h" #include "selftests/igt_reset.h" #include "selftests/igt_spinner.h" +#include "selftests/intel_scheduler_helpers.h" struct live_mocs { - struct drm_i915_mocs_table mocs; - struct drm_i915_mocs_table l3cc; + struct drm_i915_mocs_table table; + struct drm_i915_mocs_table *mocs; + struct drm_i915_mocs_table *l3cc; struct i915_vma *scratch; void *vaddr; }; +static struct intel_context *mocs_context_create(struct intel_engine_cs *engine) +{ + struct intel_context *ce; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) + return ce; + + /* We build large requests to read the registers from the ring */ + ce->ring_size = SZ_16K; + + return ce; +} + static int request_add_sync(struct i915_request *rq, int err) { i915_request_get(rq); @@ -42,56 +59,29 @@ static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin) return err; } -static struct i915_vma *create_scratch(struct intel_gt *gt) -{ - struct drm_i915_gem_object *obj; - struct i915_vma *vma; - int err; - - obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); - if (IS_ERR(obj)) - return ERR_CAST(obj); - - i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED); - - vma = i915_vma_instance(obj, >->ggtt->vm, NULL); - if (IS_ERR(vma)) { - i915_gem_object_put(obj); - return vma; - } - - err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); - if (err) { - i915_gem_object_put(obj); - return ERR_PTR(err); - } - - return vma; -} - static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt) { - struct drm_i915_mocs_table table; unsigned int flags; int err; memset(arg, 0, sizeof(*arg)); - flags = get_mocs_settings(gt->i915, &table); + flags = get_mocs_settings(gt->i915, &arg->table); if (!flags) return -EINVAL; if (flags & HAS_RENDER_L3CC) - arg->l3cc = table; + arg->l3cc = &arg->table; if (flags & (HAS_GLOBAL_MOCS | HAS_ENGINE_MOCS)) - arg->mocs = table; + arg->mocs = &arg->table; - arg->scratch = create_scratch(gt); + arg->scratch = + __vm_create_scratch_for_read_pinned(>->ggtt->vm, PAGE_SIZE); if (IS_ERR(arg->scratch)) return PTR_ERR(arg->scratch); - arg->vaddr = i915_gem_object_pin_map(arg->scratch->obj, I915_MAP_WB); + arg->vaddr = i915_gem_object_pin_map_unlocked(arg->scratch->obj, I915_MAP_WB); if (IS_ERR(arg->vaddr)) { err = PTR_ERR(arg->vaddr); goto err_scratch; @@ -111,7 +101,7 @@ static void live_mocs_fini(struct live_mocs *arg) static int read_regs(struct i915_request *rq, u32 addr, unsigned int count, - uint32_t *offset) + u32 *offset) { unsigned int i; u32 *cs; @@ -139,12 +129,16 @@ static int read_regs(struct i915_request *rq, static int read_mocs_table(struct i915_request *rq, const struct drm_i915_mocs_table *table, - uint32_t *offset) + u32 *offset) { + struct intel_gt *gt = rq->engine->gt; u32 addr; + if (!table) + return 0; + if (HAS_GLOBAL_MOCS_REGISTERS(rq->i915)) - addr = global_mocs_offset(); + addr = global_mocs_offset() + gt->uncore->gsi_offset; else addr = mocs_offset(rq->engine); @@ -153,20 +147,26 @@ static int read_mocs_table(struct i915_request *rq, static int read_l3cc_table(struct i915_request *rq, const struct drm_i915_mocs_table *table, - uint32_t *offset) + u32 *offset) { u32 addr = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0)); + if (!table) + return 0; + return read_regs(rq, addr, (table->n_entries + 1) / 2, offset); } static int check_mocs_table(struct intel_engine_cs *engine, const struct drm_i915_mocs_table *table, - uint32_t **vaddr) + u32 **vaddr) { unsigned int i; u32 expect; + if (!table) + return 0; + for_each_mocs(expect, table, i) { if (**vaddr != expect) { pr_err("%s: Invalid MOCS[%d] entry, found %08x, expected %08x\n", @@ -186,18 +186,21 @@ static bool mcr_range(struct drm_i915_private *i915, u32 offset) * which only controls CPU initiated MMIO. Routing does not * work for CS access so we cannot verify them on this path. */ - return INTEL_GEN(i915) >= 8 && offset >= 0xb000 && offset <= 0xb4ff; + return GRAPHICS_VER(i915) >= 8 && offset >= 0xb000 && offset <= 0xb4ff; } static int check_l3cc_table(struct intel_engine_cs *engine, const struct drm_i915_mocs_table *table, - uint32_t **vaddr) + u32 **vaddr) { /* Can we read the MCR range 0xb00 directly? See intel_workarounds! */ u32 reg = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0)); unsigned int i; u32 expect; + if (!table) + return 0; + for_each_l3cc(expect, table, i) { if (!mcr_range(engine->i915, reg) && **vaddr != expect) { pr_err("%s: Invalid L3CC[%d] entry, found %08x, expected %08x\n", @@ -226,18 +229,14 @@ static int check_mocs_engine(struct live_mocs *arg, if (IS_ERR(rq)) return PTR_ERR(rq); - i915_vma_lock(vma); - err = i915_request_await_object(rq, vma->obj, true); - if (!err) - err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); - i915_vma_unlock(vma); + err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE); /* Read the mocs tables back using SRM */ offset = i915_ggtt_offset(vma); if (!err) - err = read_mocs_table(rq, &arg->mocs, &offset); + err = read_mocs_table(rq, arg->mocs, &offset); if (!err && ce->engine->class == RENDER_CLASS) - err = read_l3cc_table(rq, &arg->l3cc, &offset); + err = read_l3cc_table(rq, arg->l3cc, &offset); offset -= i915_ggtt_offset(vma); GEM_BUG_ON(offset > PAGE_SIZE); @@ -248,9 +247,9 @@ static int check_mocs_engine(struct live_mocs *arg, /* Compare the results against the expected tables */ vaddr = arg->vaddr; if (!err) - err = check_mocs_table(ce->engine, &arg->mocs, &vaddr); + err = check_mocs_table(ce->engine, arg->mocs, &vaddr); if (!err && ce->engine->class == RENDER_CLASS) - err = check_l3cc_table(ce->engine, &arg->l3cc, &vaddr); + err = check_l3cc_table(ce->engine, arg->l3cc, &vaddr); if (err) return err; @@ -301,7 +300,7 @@ static int live_mocs_clean(void *arg) for_each_engine(engine, gt, id) { struct intel_context *ce; - ce = intel_context_create(engine); + ce = mocs_context_create(engine); if (IS_ERR(ce)) { err = PTR_ERR(ce); break; @@ -318,7 +317,8 @@ static int live_mocs_clean(void *arg) } static int active_engine_reset(struct intel_context *ce, - const char *reason) + const char *reason, + bool using_guc) { struct igt_spinner spin; struct i915_request *rq; @@ -335,9 +335,13 @@ static int active_engine_reset(struct intel_context *ce, } err = request_add_spin(rq, &spin); - if (err == 0) + if (err == 0 && !using_guc) err = intel_engine_reset(ce->engine, reason); + /* Ensure the reset happens and kills the engine */ + if (err == 0) + err = intel_selftest_wait_for_rq(rq); + igt_spinner_end(&spin); igt_spinner_fini(&spin); @@ -345,31 +349,38 @@ static int active_engine_reset(struct intel_context *ce, } static int __live_mocs_reset(struct live_mocs *mocs, - struct intel_context *ce) + struct intel_context *ce, bool using_guc) { + struct intel_gt *gt = ce->engine->gt; int err; - err = intel_engine_reset(ce->engine, "mocs"); - if (err) - return err; + if (intel_has_reset_engine(gt)) { + if (!using_guc) { + err = intel_engine_reset(ce->engine, "mocs"); + if (err) + return err; - err = check_mocs_engine(mocs, ce); - if (err) - return err; + err = check_mocs_engine(mocs, ce); + if (err) + return err; + } - err = active_engine_reset(ce, "mocs"); - if (err) - return err; + err = active_engine_reset(ce, "mocs", using_guc); + if (err) + return err; - err = check_mocs_engine(mocs, ce); - if (err) - return err; + err = check_mocs_engine(mocs, ce); + if (err) + return err; + } - intel_gt_reset(ce->engine->gt, ce->engine->mask, "mocs"); + if (intel_has_gpu_reset(gt)) { + intel_gt_reset(gt, ce->engine->mask, "mocs"); - err = check_mocs_engine(mocs, ce); - if (err) - return err; + err = check_mocs_engine(mocs, ce); + if (err) + return err; + } return 0; } @@ -384,28 +395,39 @@ static int live_mocs_reset(void *arg) /* Check the mocs setup is retained over per-engine and global resets */ - if (!intel_has_reset_engine(gt)) - return 0; - err = live_mocs_init(&mocs, gt); if (err) return err; igt_global_reset_lock(gt); for_each_engine(engine, gt, id) { + bool using_guc = intel_engine_uses_guc(engine); + struct intel_selftest_saved_policy saved; struct intel_context *ce; + int err2; + + err = intel_selftest_modify_policy(engine, &saved, + SELFTEST_SCHEDULER_MODIFY_FAST_RESET); + if (err) + break; - ce = intel_context_create(engine); + ce = mocs_context_create(engine); if (IS_ERR(ce)) { err = PTR_ERR(ce); - break; + goto restore; } intel_engine_pm_get(engine); - err = __live_mocs_reset(&mocs, ce); - intel_engine_pm_put(engine); + err = __live_mocs_reset(&mocs, ce, using_guc); + + intel_engine_pm_put(engine); intel_context_put(ce); + +restore: + err2 = intel_selftest_restore_policy(engine, &saved); + if (err == 0) + err = err2; if (err) break; } @@ -427,5 +449,5 @@ int intel_mocs_live_selftests(struct drm_i915_private *i915) if (!get_mocs_settings(i915, &table)) return 0; - return intel_gt_live_subtests(tests, &i915->gt); + return intel_gt_live_subtests(tests, to_gt(i915)); } |
