diff options
Diffstat (limited to 'drivers/gpu/drm/i915/selftests/igt_spinner.c')
| -rw-r--r-- | drivers/gpu/drm/i915/selftests/igt_spinner.c | 262 |
1 files changed, 164 insertions, 98 deletions
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c index 8cd34f6e6859..820364171ebe 100644 --- a/drivers/gpu/drm/i915/selftests/igt_spinner.c +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c @@ -4,59 +4,111 @@ * Copyright © 2018 Intel Corporation */ +#include "gem/i915_gem_internal.h" +#include "gem/selftests/igt_gem_utils.h" +#include "gt/intel_gpu_commands.h" +#include "gt/intel_gt.h" + +#include "i915_wait_util.h" #include "igt_spinner.h" -int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915) +int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt) { - unsigned int mode; - void *vaddr; int err; - GEM_BUG_ON(INTEL_GEN(i915) < 8); - memset(spin, 0, sizeof(*spin)); - spin->i915 = i915; + spin->gt = gt; - spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE); + spin->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); if (IS_ERR(spin->hws)) { err = PTR_ERR(spin->hws); goto err; } + i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC); - spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + spin->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); if (IS_ERR(spin->obj)) { err = PTR_ERR(spin->obj); goto err_hws; } - i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC); - vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB); - if (IS_ERR(vaddr)) { - err = PTR_ERR(vaddr); - goto err_obj; - } - spin->seqno = memset(vaddr, 0xff, PAGE_SIZE); - - mode = i915_coherent_map_type(i915); - vaddr = i915_gem_object_pin_map(spin->obj, mode); - if (IS_ERR(vaddr)) { - err = PTR_ERR(vaddr); - goto err_unpin_hws; - } - spin->batch = vaddr; - return 0; -err_unpin_hws: - i915_gem_object_unpin_map(spin->hws); -err_obj: - i915_gem_object_put(spin->obj); err_hws: i915_gem_object_put(spin->hws); err: return err; } +static void *igt_spinner_pin_obj(struct intel_context *ce, + struct i915_gem_ww_ctx *ww, + struct drm_i915_gem_object *obj, + unsigned int mode, struct i915_vma **vma) +{ + void *vaddr; + int ret; + + *vma = i915_vma_instance(obj, ce->vm, NULL); + if (IS_ERR(*vma)) + return ERR_CAST(*vma); + + ret = i915_gem_object_lock(obj, ww); + if (ret) + return ERR_PTR(ret); + + vaddr = i915_gem_object_pin_map(obj, mode); + + if (!ww) + i915_gem_object_unlock(obj); + + if (IS_ERR(vaddr)) + return vaddr; + + if (ww) + ret = i915_vma_pin_ww(*vma, ww, 0, 0, PIN_USER); + else + ret = i915_vma_pin(*vma, 0, 0, PIN_USER); + + if (ret) { + i915_gem_object_unpin_map(obj); + return ERR_PTR(ret); + } + + return vaddr; +} + +int igt_spinner_pin(struct igt_spinner *spin, + struct intel_context *ce, + struct i915_gem_ww_ctx *ww) +{ + void *vaddr; + + if (spin->ce && WARN_ON(spin->ce != ce)) + return -ENODEV; + spin->ce = ce; + + if (!spin->seqno) { + vaddr = igt_spinner_pin_obj(ce, ww, spin->hws, I915_MAP_WB, &spin->hws_vma); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); + + spin->seqno = memset(vaddr, 0xff, PAGE_SIZE); + } + + if (!spin->batch) { + unsigned int mode; + + mode = intel_gt_coherent_map_type(spin->gt, spin->obj, false); + vaddr = igt_spinner_pin_obj(ce, ww, spin->obj, mode, &spin->batch_vma); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); + + spin->batch = vaddr; + } + + return 0; +} + static unsigned int seqno_offset(u64 fence) { return offset_in_page(sizeof(u32) * fence); @@ -65,97 +117,104 @@ static unsigned int seqno_offset(u64 fence) static u64 hws_address(const struct i915_vma *hws, const struct i915_request *rq) { - return hws->node.start + seqno_offset(rq->fence.context); + return i915_vma_offset(hws) + seqno_offset(rq->fence.context); } -static int emit_recurse_batch(struct igt_spinner *spin, - struct i915_request *rq, - u32 arbitration_command) +struct i915_request * +igt_spinner_create_request(struct igt_spinner *spin, + struct intel_context *ce, + u32 arbitration_command) { - struct i915_address_space *vm = &rq->gem_context->ppgtt->vm; + struct intel_engine_cs *engine = ce->engine; + struct i915_request *rq = NULL; struct i915_vma *hws, *vma; + unsigned int flags; u32 *batch; int err; - vma = i915_vma_instance(spin->obj, vm, NULL); - if (IS_ERR(vma)) - return PTR_ERR(vma); + GEM_BUG_ON(spin->gt != ce->vm->gt); - hws = i915_vma_instance(spin->hws, vm, NULL); - if (IS_ERR(hws)) - return PTR_ERR(hws); - - err = i915_vma_pin(vma, 0, 0, PIN_USER); - if (err) - return err; + if (!intel_engine_can_store_dword(ce->engine)) + return ERR_PTR(-ENODEV); - err = i915_vma_pin(hws, 0, 0, PIN_USER); - if (err) - goto unpin_vma; + if (!spin->batch) { + err = igt_spinner_pin(spin, ce, NULL); + if (err) + return ERR_PTR(err); + } - err = i915_vma_move_to_active(vma, rq, 0); - if (err) - goto unpin_hws; + hws = spin->hws_vma; + vma = spin->batch_vma; - if (!i915_gem_object_has_active_reference(vma->obj)) { - i915_gem_object_get(vma->obj); - i915_gem_object_set_active_reference(vma->obj); - } + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) + return ERR_CAST(rq); - err = i915_vma_move_to_active(hws, rq, 0); + err = igt_vma_move_to_active_unlocked(vma, rq, 0); if (err) - goto unpin_hws; + goto cancel_rq; - if (!i915_gem_object_has_active_reference(hws->obj)) { - i915_gem_object_get(hws->obj); - i915_gem_object_set_active_reference(hws->obj); - } + err = igt_vma_move_to_active_unlocked(hws, rq, 0); + if (err) + goto cancel_rq; batch = spin->batch; - *batch++ = MI_STORE_DWORD_IMM_GEN4; - *batch++ = lower_32_bits(hws_address(hws, rq)); - *batch++ = upper_32_bits(hws_address(hws, rq)); + if (GRAPHICS_VER(rq->i915) >= 8) { + *batch++ = MI_STORE_DWORD_IMM_GEN4; + *batch++ = lower_32_bits(hws_address(hws, rq)); + *batch++ = upper_32_bits(hws_address(hws, rq)); + } else if (GRAPHICS_VER(rq->i915) >= 6) { + *batch++ = MI_STORE_DWORD_IMM_GEN4; + *batch++ = 0; + *batch++ = hws_address(hws, rq); + } else if (GRAPHICS_VER(rq->i915) >= 4) { + *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *batch++ = 0; + *batch++ = hws_address(hws, rq); + } else { + *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; + *batch++ = hws_address(hws, rq); + } *batch++ = rq->fence.seqno; *batch++ = arbitration_command; - *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1; - *batch++ = lower_32_bits(vma->node.start); - *batch++ = upper_32_bits(vma->node.start); - *batch++ = MI_BATCH_BUFFER_END; /* not reached */ + memset32(batch, MI_NOOP, 128); + batch += 128; - i915_gem_chipset_flush(spin->i915); + if (GRAPHICS_VER(rq->i915) >= 8) + *batch++ = MI_BATCH_BUFFER_START | BIT(8) | 1; + else if (IS_HASWELL(rq->i915)) + *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW; + else if (GRAPHICS_VER(rq->i915) >= 6) + *batch++ = MI_BATCH_BUFFER_START; + else + *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; + *batch++ = lower_32_bits(i915_vma_offset(vma)); + *batch++ = upper_32_bits(i915_vma_offset(vma)); - err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0); + *batch++ = MI_BATCH_BUFFER_END; /* not reached */ -unpin_hws: - i915_vma_unpin(hws); -unpin_vma: - i915_vma_unpin(vma); - return err; -} + intel_gt_chipset_flush(engine->gt); -struct i915_request * -igt_spinner_create_request(struct igt_spinner *spin, - struct i915_gem_context *ctx, - struct intel_engine_cs *engine, - u32 arbitration_command) -{ - struct i915_request *rq; - int err; + if (engine->emit_init_breadcrumb) { + err = engine->emit_init_breadcrumb(rq); + if (err) + goto cancel_rq; + } - rq = i915_request_alloc(engine, ctx); - if (IS_ERR(rq)) - return rq; + flags = 0; + if (GRAPHICS_VER(rq->i915) <= 5) + flags |= I915_DISPATCH_SECURE; + err = engine->emit_bb_start(rq, i915_vma_offset(vma), PAGE_SIZE, flags); - err = emit_recurse_batch(spin, rq, arbitration_command); +cancel_rq: if (err) { + i915_request_set_error_once(rq, err); i915_request_add(rq); - return ERR_PTR(err); } - - return rq; + return err ? ERR_PTR(err) : rq; } static u32 @@ -168,32 +227,39 @@ hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq) void igt_spinner_end(struct igt_spinner *spin) { + if (!spin->batch) + return; + *spin->batch = MI_BATCH_BUFFER_END; - i915_gem_chipset_flush(spin->i915); + intel_gt_chipset_flush(spin->gt); } void igt_spinner_fini(struct igt_spinner *spin) { igt_spinner_end(spin); - i915_gem_object_unpin_map(spin->obj); + if (spin->batch) { + i915_vma_unpin(spin->batch_vma); + i915_gem_object_unpin_map(spin->obj); + } i915_gem_object_put(spin->obj); - i915_gem_object_unpin_map(spin->hws); + if (spin->seqno) { + i915_vma_unpin(spin->hws_vma); + i915_gem_object_unpin_map(spin->hws); + } i915_gem_object_put(spin->hws); } bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq) { - if (!wait_event_timeout(rq->execute, - READ_ONCE(rq->global_seqno), - msecs_to_jiffies(10))) - return false; + if (i915_request_is_ready(rq)) + intel_engine_flush_submission(rq->engine); return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq), rq->fence.seqno), - 10) && + 100) && wait_for(i915_seqno_passed(hws_seqno(spin, rq), rq->fence.seqno), - 1000)); + 50)); } |
