diff options
Diffstat (limited to 'drivers/gpu/drm/i915/selftests/igt_spinner.c')
| -rw-r--r-- | drivers/gpu/drm/i915/selftests/igt_spinner.c | 191 |
1 files changed, 114 insertions, 77 deletions
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c index e35ba5f9e73f..820364171ebe 100644 --- a/drivers/gpu/drm/i915/selftests/igt_spinner.c +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c @@ -3,16 +3,17 @@ * * Copyright © 2018 Intel Corporation */ -#include "gt/intel_gt.h" +#include "gem/i915_gem_internal.h" #include "gem/selftests/igt_gem_utils.h" +#include "gt/intel_gpu_commands.h" +#include "gt/intel_gt.h" +#include "i915_wait_util.h" #include "igt_spinner.h" int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt) { - unsigned int mode; - void *vaddr; int err; memset(spin, 0, sizeof(*spin)); @@ -23,6 +24,7 @@ int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt) err = PTR_ERR(spin->hws); goto err; } + i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC); spin->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); if (IS_ERR(spin->obj)) { @@ -30,59 +32,92 @@ int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt) goto err_hws; } - i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC); - vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB); - if (IS_ERR(vaddr)) { - err = PTR_ERR(vaddr); - goto err_obj; - } - spin->seqno = memset(vaddr, 0xff, PAGE_SIZE); - - mode = i915_coherent_map_type(gt->i915); - vaddr = i915_gem_object_pin_map(spin->obj, mode); - if (IS_ERR(vaddr)) { - err = PTR_ERR(vaddr); - goto err_unpin_hws; - } - spin->batch = vaddr; - return 0; -err_unpin_hws: - i915_gem_object_unpin_map(spin->hws); -err_obj: - i915_gem_object_put(spin->obj); err_hws: i915_gem_object_put(spin->hws); err: return err; } -static unsigned int seqno_offset(u64 fence) +static void *igt_spinner_pin_obj(struct intel_context *ce, + struct i915_gem_ww_ctx *ww, + struct drm_i915_gem_object *obj, + unsigned int mode, struct i915_vma **vma) { - return offset_in_page(sizeof(u32) * fence); + void *vaddr; + int ret; + + *vma = i915_vma_instance(obj, ce->vm, NULL); + if (IS_ERR(*vma)) + return ERR_CAST(*vma); + + ret = i915_gem_object_lock(obj, ww); + if (ret) + return ERR_PTR(ret); + + vaddr = i915_gem_object_pin_map(obj, mode); + + if (!ww) + i915_gem_object_unlock(obj); + + if (IS_ERR(vaddr)) + return vaddr; + + if (ww) + ret = i915_vma_pin_ww(*vma, ww, 0, 0, PIN_USER); + else + ret = i915_vma_pin(*vma, 0, 0, PIN_USER); + + if (ret) { + i915_gem_object_unpin_map(obj); + return ERR_PTR(ret); + } + + return vaddr; } -static u64 hws_address(const struct i915_vma *hws, - const struct i915_request *rq) +int igt_spinner_pin(struct igt_spinner *spin, + struct intel_context *ce, + struct i915_gem_ww_ctx *ww) { - return hws->node.start + seqno_offset(rq->fence.context); + void *vaddr; + + if (spin->ce && WARN_ON(spin->ce != ce)) + return -ENODEV; + spin->ce = ce; + + if (!spin->seqno) { + vaddr = igt_spinner_pin_obj(ce, ww, spin->hws, I915_MAP_WB, &spin->hws_vma); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); + + spin->seqno = memset(vaddr, 0xff, PAGE_SIZE); + } + + if (!spin->batch) { + unsigned int mode; + + mode = intel_gt_coherent_map_type(spin->gt, spin->obj, false); + vaddr = igt_spinner_pin_obj(ce, ww, spin->obj, mode, &spin->batch_vma); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); + + spin->batch = vaddr; + } + + return 0; } -static int move_to_active(struct i915_vma *vma, - struct i915_request *rq, - unsigned int flags) +static unsigned int seqno_offset(u64 fence) { - int err; - - i915_vma_lock(vma); - err = i915_request_await_object(rq, vma->obj, - flags & EXEC_OBJECT_WRITE); - if (err == 0) - err = i915_vma_move_to_active(vma, rq, flags); - i915_vma_unlock(vma); + return offset_in_page(sizeof(u32) * fence); +} - return err; +static u64 hws_address(const struct i915_vma *hws, + const struct i915_request *rq) +{ + return i915_vma_offset(hws) + seqno_offset(rq->fence.context); } struct i915_request * @@ -102,47 +137,38 @@ igt_spinner_create_request(struct igt_spinner *spin, if (!intel_engine_can_store_dword(ce->engine)) return ERR_PTR(-ENODEV); - vma = i915_vma_instance(spin->obj, ce->vm, NULL); - if (IS_ERR(vma)) - return ERR_CAST(vma); - - hws = i915_vma_instance(spin->hws, ce->vm, NULL); - if (IS_ERR(hws)) - return ERR_CAST(hws); - - err = i915_vma_pin(vma, 0, 0, PIN_USER); - if (err) - return ERR_PTR(err); + if (!spin->batch) { + err = igt_spinner_pin(spin, ce, NULL); + if (err) + return ERR_PTR(err); + } - err = i915_vma_pin(hws, 0, 0, PIN_USER); - if (err) - goto unpin_vma; + hws = spin->hws_vma; + vma = spin->batch_vma; rq = intel_context_create_request(ce); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto unpin_hws; - } + if (IS_ERR(rq)) + return ERR_CAST(rq); - err = move_to_active(vma, rq, 0); + err = igt_vma_move_to_active_unlocked(vma, rq, 0); if (err) goto cancel_rq; - err = move_to_active(hws, rq, 0); + err = igt_vma_move_to_active_unlocked(hws, rq, 0); if (err) goto cancel_rq; batch = spin->batch; - if (INTEL_GEN(rq->i915) >= 8) { + if (GRAPHICS_VER(rq->i915) >= 8) { *batch++ = MI_STORE_DWORD_IMM_GEN4; *batch++ = lower_32_bits(hws_address(hws, rq)); *batch++ = upper_32_bits(hws_address(hws, rq)); - } else if (INTEL_GEN(rq->i915) >= 6) { + } else if (GRAPHICS_VER(rq->i915) >= 6) { *batch++ = MI_STORE_DWORD_IMM_GEN4; *batch++ = 0; *batch++ = hws_address(hws, rq); - } else if (INTEL_GEN(rq->i915) >= 4) { + } else if (GRAPHICS_VER(rq->i915) >= 4) { *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; *batch++ = 0; *batch++ = hws_address(hws, rq); @@ -154,16 +180,19 @@ igt_spinner_create_request(struct igt_spinner *spin, *batch++ = arbitration_command; - if (INTEL_GEN(rq->i915) >= 8) + memset32(batch, MI_NOOP, 128); + batch += 128; + + if (GRAPHICS_VER(rq->i915) >= 8) *batch++ = MI_BATCH_BUFFER_START | BIT(8) | 1; else if (IS_HASWELL(rq->i915)) *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW; - else if (INTEL_GEN(rq->i915) >= 6) + else if (GRAPHICS_VER(rq->i915) >= 6) *batch++ = MI_BATCH_BUFFER_START; else *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; - *batch++ = lower_32_bits(vma->node.start); - *batch++ = upper_32_bits(vma->node.start); + *batch++ = lower_32_bits(i915_vma_offset(vma)); + *batch++ = upper_32_bits(i915_vma_offset(vma)); *batch++ = MI_BATCH_BUFFER_END; /* not reached */ @@ -176,19 +205,15 @@ igt_spinner_create_request(struct igt_spinner *spin, } flags = 0; - if (INTEL_GEN(rq->i915) <= 5) + if (GRAPHICS_VER(rq->i915) <= 5) flags |= I915_DISPATCH_SECURE; - err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags); + err = engine->emit_bb_start(rq, i915_vma_offset(vma), PAGE_SIZE, flags); cancel_rq: if (err) { i915_request_set_error_once(rq, err); i915_request_add(rq); } -unpin_hws: - i915_vma_unpin(hws); -unpin_vma: - i915_vma_unpin(vma); return err ? ERR_PTR(err) : rq; } @@ -202,6 +227,9 @@ hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq) void igt_spinner_end(struct igt_spinner *spin) { + if (!spin->batch) + return; + *spin->batch = MI_BATCH_BUFFER_END; intel_gt_chipset_flush(spin->gt); } @@ -210,19 +238,28 @@ void igt_spinner_fini(struct igt_spinner *spin) { igt_spinner_end(spin); - i915_gem_object_unpin_map(spin->obj); + if (spin->batch) { + i915_vma_unpin(spin->batch_vma); + i915_gem_object_unpin_map(spin->obj); + } i915_gem_object_put(spin->obj); - i915_gem_object_unpin_map(spin->hws); + if (spin->seqno) { + i915_vma_unpin(spin->hws_vma); + i915_gem_object_unpin_map(spin->hws); + } i915_gem_object_put(spin->hws); } bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq) { + if (i915_request_is_ready(rq)) + intel_engine_flush_submission(rq->engine); + return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq), rq->fence.seqno), - 10) && + 100) && wait_for(i915_seqno_passed(hws_seqno(spin, rq), rq->fence.seqno), - 1000)); + 50)); } |
