diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gt/selftest_engine_cs.c')
| -rw-r--r-- | drivers/gpu/drm/i915/gt/selftest_engine_cs.c | 89 |
1 files changed, 56 insertions, 33 deletions
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c index f88e445a1cae..5ffa5e30f419 100644 --- a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c @@ -1,11 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * SPDX-License-Identifier: GPL-2.0 - * * Copyright © 2018 Intel Corporation */ #include <linux/sort.h> +#include "intel_gpu_commands.h" #include "intel_gt_pm.h" #include "intel_rps.h" @@ -21,26 +21,41 @@ static int cmp_u32(const void *A, const void *B) return *a - *b; } -static void perf_begin(struct intel_gt *gt) +static intel_wakeref_t perf_begin(struct intel_gt *gt) { - intel_gt_pm_get(gt); + intel_wakeref_t wakeref = intel_gt_pm_get(gt); /* Boost gpufreq to max [waitboost] and keep it fixed */ atomic_inc(>->rps.num_waiters); - schedule_work(>->rps.work); + queue_work(gt->i915->unordered_wq, >->rps.work); flush_work(>->rps.work); + + return wakeref; } -static int perf_end(struct intel_gt *gt) +static int perf_end(struct intel_gt *gt, intel_wakeref_t wakeref) { atomic_dec(>->rps.num_waiters); - intel_gt_pm_put(gt); + intel_gt_pm_put(gt, wakeref); return igt_flush_test(gt->i915); } +static i915_reg_t timestamp_reg(struct intel_engine_cs *engine) +{ + struct drm_i915_private *i915 = engine->i915; + + if (GRAPHICS_VER(i915) == 5 || IS_G4X(i915)) + return RING_TIMESTAMP_UDW(engine->mmio_base); + else + return RING_TIMESTAMP(engine->mmio_base); +} + static int write_timestamp(struct i915_request *rq, int slot) { + struct intel_timeline *tl = + rcu_dereference_protected(rq->timeline, + !i915_request_signaled(rq)); u32 cmd; u32 *cs; @@ -49,11 +64,11 @@ static int write_timestamp(struct i915_request *rq, int slot) return PTR_ERR(cs); cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT; - if (INTEL_GEN(rq->i915) >= 8) + if (GRAPHICS_VER(rq->i915) >= 8) cmd++; *cs++ = cmd; - *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base)); - *cs++ = i915_request_timeline(rq)->hwsp_offset + slot * sizeof(u32); + *cs++ = i915_mmio_reg_offset(timestamp_reg(rq->engine)); + *cs++ = tl->hwsp_offset + slot * sizeof(u32); *cs++ = 0; intel_ring_advance(rq, cs); @@ -72,7 +87,7 @@ static struct i915_vma *create_empty_batch(struct intel_context *ce) if (IS_ERR(obj)) return ERR_CAST(obj); - cs = i915_gem_object_pin_map(obj, I915_MAP_WB); + cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB); if (IS_ERR(cs)) { err = PTR_ERR(cs); goto err_put; @@ -120,18 +135,22 @@ static int perf_mi_bb_start(void *arg) struct intel_gt *gt = arg; struct intel_engine_cs *engine; enum intel_engine_id id; + intel_wakeref_t wakeref; int err = 0; - if (INTEL_GEN(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */ + if (GRAPHICS_VER(gt->i915) < 4) /* Any CS_TIMESTAMP? */ return 0; - perf_begin(gt); + wakeref = perf_begin(gt); for_each_engine(engine, gt, id) { struct intel_context *ce = engine->kernel_context; struct i915_vma *batch; u32 cycles[COUNT]; int i; + if (GRAPHICS_VER(engine->i915) < 7 && engine->id != RCS0) + continue; + intel_engine_pm_get(engine); batch = create_empty_batch(ce); @@ -162,7 +181,7 @@ static int perf_mi_bb_start(void *arg) goto out; err = rq->engine->emit_bb_start(rq, - batch->node.start, 8, + i915_vma_offset(batch), 8, 0); if (err) goto out; @@ -191,7 +210,7 @@ out: pr_info("%s: MI_BB_START cycles: %u\n", engine->name, trifilter(cycles)); } - if (perf_end(gt)) + if (perf_end(gt, wakeref)) err = -EIO; return err; @@ -208,7 +227,7 @@ static struct i915_vma *create_nop_batch(struct intel_context *ce) if (IS_ERR(obj)) return ERR_CAST(obj); - cs = i915_gem_object_pin_map(obj, I915_MAP_WB); + cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB); if (IS_ERR(cs)) { err = PTR_ERR(cs); goto err_put; @@ -244,18 +263,22 @@ static int perf_mi_noop(void *arg) struct intel_gt *gt = arg; struct intel_engine_cs *engine; enum intel_engine_id id; + intel_wakeref_t wakeref; int err = 0; - if (INTEL_GEN(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */ + if (GRAPHICS_VER(gt->i915) < 4) /* Any CS_TIMESTAMP? */ return 0; - perf_begin(gt); + wakeref = perf_begin(gt); for_each_engine(engine, gt, id) { struct intel_context *ce = engine->kernel_context; struct i915_vma *base, *nop; u32 cycles[COUNT]; int i; + if (GRAPHICS_VER(engine->i915) < 7 && engine->id != RCS0) + continue; + intel_engine_pm_get(engine); base = create_empty_batch(ce); @@ -302,7 +325,7 @@ static int perf_mi_noop(void *arg) goto out; err = rq->engine->emit_bb_start(rq, - base->node.start, 8, + i915_vma_offset(base), 8, 0); if (err) goto out; @@ -312,8 +335,8 @@ static int perf_mi_noop(void *arg) goto out; err = rq->engine->emit_bb_start(rq, - nop->node.start, - nop->node.size, + i915_vma_offset(nop), + i915_vma_size(nop), 0); if (err) goto out; @@ -345,7 +368,7 @@ out: pr_info("%s: 16K MI_NOOP cycles: %u\n", engine->name, trifilter(cycles)); } - if (perf_end(gt)) + if (perf_end(gt, wakeref)) err = -EIO; return err; @@ -358,10 +381,10 @@ int intel_engine_cs_perf_selftests(struct drm_i915_private *i915) SUBTEST(perf_mi_noop), }; - if (intel_gt_is_wedged(&i915->gt)) + if (intel_gt_is_wedged(to_gt(i915))) return 0; - return intel_gt_live_subtests(tests, &i915->gt); + return intel_gt_live_subtests(tests, to_gt(i915)); } static int intel_mmio_bases_check(void *arg) @@ -373,34 +396,34 @@ static int intel_mmio_bases_check(void *arg) u8 prev = U8_MAX; for (j = 0; j < MAX_MMIO_BASES; j++) { - u8 gen = info->mmio_bases[j].gen; + u8 ver = info->mmio_bases[j].graphics_ver; u32 base = info->mmio_bases[j].base; - if (gen >= prev) { - pr_err("%s(%s, class:%d, instance:%d): mmio base for gen %x is before the one for gen %x\n", + if (ver >= prev) { + pr_err("%s(%s, class:%d, instance:%d): mmio base for graphics ver %u is before the one for ver %u\n", __func__, intel_engine_class_repr(info->class), info->class, info->instance, - prev, gen); + prev, ver); return -EINVAL; } - if (gen == 0) + if (ver == 0) break; if (!base) { - pr_err("%s(%s, class:%d, instance:%d): invalid mmio base (%x) for gen %x at entry %u\n", + pr_err("%s(%s, class:%d, instance:%d): invalid mmio base (%x) for graphics ver %u at entry %u\n", __func__, intel_engine_class_repr(info->class), info->class, info->instance, - base, gen, j); + base, ver, j); return -EINVAL; } - prev = gen; + prev = ver; } - pr_debug("%s: min gen supported for %s%d is %d\n", + pr_debug("%s: min graphics version supported for %s%d is %u\n", __func__, intel_engine_class_repr(info->class), info->instance, |
