diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gt/selftest_execlists.c')
| -rw-r--r-- | drivers/gpu/drm/i915/gt/selftest_execlists.c | 216 |
1 files changed, 106 insertions, 110 deletions
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c index e10da897e07a..a06b397b6d42 100644 --- a/drivers/gpu/drm/i915/gt/selftest_execlists.c +++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c @@ -5,11 +5,15 @@ #include <linux/prime_numbers.h> +#include <drm/drm_print.h> + +#include "gem/i915_gem_internal.h" #include "gem/i915_gem_pm.h" #include "gt/intel_engine_heartbeat.h" #include "gt/intel_reset.h" #include "gt/selftest_engine_heartbeat.h" +#include "i915_jiffies.h" #include "i915_selftest.h" #include "selftests/i915_random.h" #include "selftests/igt_flush_test.h" @@ -52,7 +56,7 @@ static int wait_for_submit(struct intel_engine_cs *engine, if (i915_request_completed(rq)) /* that was quick! */ return 0; - /* Wait until the HW has acknowleged the submission (or err) */ + /* Wait until the HW has acknowledged the submission (or err) */ intel_engine_flush_submission(engine); if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq)) return 0; @@ -84,8 +88,6 @@ static int wait_for_reset(struct intel_engine_cs *engine, break; } while (time_before(jiffies, timeout)); - flush_scheduled_work(); - if (rq->fence.error != -EIO) { pr_err("%s: hanging request %llx:%lld not reset\n", engine->name, @@ -94,7 +96,7 @@ static int wait_for_reset(struct intel_engine_cs *engine, return -EINVAL; } - /* Give the request a jiffie to complete after flushing the worker */ + /* Give the request a jiffy to complete after flushing the worker */ if (i915_request_wait(rq, 0, max(0l, (long)(timeout - jiffies)) + 1) < 0) { pr_err("%s: hanging request %llx:%lld did not complete\n", @@ -1199,7 +1201,7 @@ static int live_timeslice_rewind(void *arg) ENGINE_TRACE(engine, "forcing tasklet for rewind\n"); while (i915_request_is_active(rq[A2])) { /* semaphore yield! */ /* Wait for the timeslice to kick in */ - del_timer(&engine->execlists.timer); + timer_delete(&engine->execlists.timer); tasklet_hi_schedule(&engine->sched_engine->tasklet); intel_engine_flush_submission(engine); } @@ -1531,8 +1533,8 @@ static int live_busywait_preempt(void *arg) struct drm_i915_gem_object *obj; struct i915_vma *vma; enum intel_engine_id id; - int err = -ENOMEM; u32 *map; + int err; /* * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can @@ -1540,13 +1542,17 @@ static int live_busywait_preempt(void *arg) */ ctx_hi = kernel_context(gt->i915, NULL); - if (!ctx_hi) - return -ENOMEM; + if (IS_ERR(ctx_hi)) + return PTR_ERR(ctx_hi); + ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY; ctx_lo = kernel_context(gt->i915, NULL); - if (!ctx_lo) + if (IS_ERR(ctx_lo)) { + err = PTR_ERR(ctx_lo); goto err_ctx_hi; + } + ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY; obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); @@ -1735,15 +1741,9 @@ static int live_preempt(void *arg) enum intel_engine_id id; int err = -ENOMEM; - if (igt_spinner_init(&spin_hi, gt)) - return -ENOMEM; - - if (igt_spinner_init(&spin_lo, gt)) - goto err_spin_hi; - ctx_hi = kernel_context(gt->i915, NULL); if (!ctx_hi) - goto err_spin_lo; + return -ENOMEM; ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY; ctx_lo = kernel_context(gt->i915, NULL); @@ -1751,6 +1751,12 @@ static int live_preempt(void *arg) goto err_ctx_hi; ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY; + if (igt_spinner_init(&spin_hi, gt)) + goto err_ctx_lo; + + if (igt_spinner_init(&spin_lo, gt)) + goto err_spin_hi; + for_each_engine(engine, gt, id) { struct igt_live_test t; struct i915_request *rq; @@ -1760,14 +1766,14 @@ static int live_preempt(void *arg) if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { err = -EIO; - goto err_ctx_lo; + goto err_spin_lo; } rq = spinner_create_request(&spin_lo, ctx_lo, engine, MI_ARB_CHECK); if (IS_ERR(rq)) { err = PTR_ERR(rq); - goto err_ctx_lo; + goto err_spin_lo; } i915_request_add(rq); @@ -1776,7 +1782,7 @@ static int live_preempt(void *arg) GEM_TRACE_DUMP(); intel_gt_set_wedged(gt); err = -EIO; - goto err_ctx_lo; + goto err_spin_lo; } rq = spinner_create_request(&spin_hi, ctx_hi, engine, @@ -1784,7 +1790,7 @@ static int live_preempt(void *arg) if (IS_ERR(rq)) { igt_spinner_end(&spin_lo); err = PTR_ERR(rq); - goto err_ctx_lo; + goto err_spin_lo; } i915_request_add(rq); @@ -1793,7 +1799,7 @@ static int live_preempt(void *arg) GEM_TRACE_DUMP(); intel_gt_set_wedged(gt); err = -EIO; - goto err_ctx_lo; + goto err_spin_lo; } igt_spinner_end(&spin_hi); @@ -1801,19 +1807,19 @@ static int live_preempt(void *arg) if (igt_live_test_end(&t)) { err = -EIO; - goto err_ctx_lo; + goto err_spin_lo; } } err = 0; -err_ctx_lo: - kernel_context_close(ctx_lo); -err_ctx_hi: - kernel_context_close(ctx_hi); err_spin_lo: igt_spinner_fini(&spin_lo); err_spin_hi: igt_spinner_fini(&spin_hi); +err_ctx_lo: + kernel_context_close(ctx_lo); +err_ctx_hi: + kernel_context_close(ctx_hi); return err; } @@ -1827,20 +1833,20 @@ static int live_late_preempt(void *arg) enum intel_engine_id id; int err = -ENOMEM; - if (igt_spinner_init(&spin_hi, gt)) - return -ENOMEM; - - if (igt_spinner_init(&spin_lo, gt)) - goto err_spin_hi; - ctx_hi = kernel_context(gt->i915, NULL); if (!ctx_hi) - goto err_spin_lo; + return -ENOMEM; ctx_lo = kernel_context(gt->i915, NULL); if (!ctx_lo) goto err_ctx_hi; + if (igt_spinner_init(&spin_hi, gt)) + goto err_ctx_lo; + + if (igt_spinner_init(&spin_lo, gt)) + goto err_spin_hi; + /* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */ ctx_lo->sched.priority = 1; @@ -1853,14 +1859,14 @@ static int live_late_preempt(void *arg) if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { err = -EIO; - goto err_ctx_lo; + goto err_spin_lo; } rq = spinner_create_request(&spin_lo, ctx_lo, engine, MI_ARB_CHECK); if (IS_ERR(rq)) { err = PTR_ERR(rq); - goto err_ctx_lo; + goto err_spin_lo; } i915_request_add(rq); @@ -1874,7 +1880,7 @@ static int live_late_preempt(void *arg) if (IS_ERR(rq)) { igt_spinner_end(&spin_lo); err = PTR_ERR(rq); - goto err_ctx_lo; + goto err_spin_lo; } i915_request_add(rq); @@ -1897,19 +1903,19 @@ static int live_late_preempt(void *arg) if (igt_live_test_end(&t)) { err = -EIO; - goto err_ctx_lo; + goto err_spin_lo; } } err = 0; -err_ctx_lo: - kernel_context_close(ctx_lo); -err_ctx_hi: - kernel_context_close(ctx_hi); err_spin_lo: igt_spinner_fini(&spin_lo); err_spin_hi: igt_spinner_fini(&spin_hi); +err_ctx_lo: + kernel_context_close(ctx_lo); +err_ctx_hi: + kernel_context_close(ctx_hi); return err; err_wedged: @@ -1917,7 +1923,7 @@ err_wedged: igt_spinner_end(&spin_lo); intel_gt_set_wedged(gt); err = -EIO; - goto err_ctx_lo; + goto err_spin_lo; } struct preempt_client { @@ -2076,7 +2082,7 @@ static int __cancel_active0(struct live_preempt_cancel *arg) goto out; } - intel_context_set_banned(rq->context); + intel_context_ban(rq->context, rq); err = intel_engine_pulse(arg->engine); if (err) goto out; @@ -2135,7 +2141,7 @@ static int __cancel_active1(struct live_preempt_cancel *arg) if (err) goto out; - intel_context_set_banned(rq[1]->context); + intel_context_ban(rq[1]->context, rq[1]); err = intel_engine_pulse(arg->engine); if (err) goto out; @@ -2218,7 +2224,7 @@ static int __cancel_queued(struct live_preempt_cancel *arg) if (err) goto out; - intel_context_set_banned(rq[2]->context); + intel_context_ban(rq[2]->context, rq[2]); err = intel_engine_pulse(arg->engine); if (err) goto out; @@ -2233,7 +2239,13 @@ static int __cancel_queued(struct live_preempt_cancel *arg) goto out; } - if (rq[1]->fence.error != 0) { + /* + * The behavior between having semaphores and not is different. With + * semaphores the subsequent request is on the hardware and not cancelled + * while without the request is held in the driver and cancelled. + */ + if (intel_engine_has_semaphores(rq[1]->engine) && + rq[1]->fence.error != 0) { pr_err("Normal inflight1 request did not complete\n"); err = -EINVAL; goto out; @@ -2281,7 +2293,7 @@ static int __cancel_hostile(struct live_preempt_cancel *arg) goto out; } - intel_context_set_banned(rq->context); + intel_context_ban(rq->context, rq); err = intel_engine_pulse(arg->engine); /* force reset */ if (err) goto out; @@ -2348,7 +2360,7 @@ static int __cancel_fail(struct live_preempt_cancel *arg) /* force preempt reset [failure] */ while (!engine->execlists.pending[0]) intel_engine_flush_submission(engine); - del_timer_sync(&engine->execlists.preempt); + timer_delete_sync(&engine->execlists.preempt); intel_engine_flush_submission(engine); cancel_reset_timeout(engine); @@ -2732,11 +2744,11 @@ static int create_gang(struct intel_engine_cs *engine, MI_SEMAPHORE_POLL | MI_SEMAPHORE_SAD_EQ_SDD; *cs++ = 0; - *cs++ = lower_32_bits(vma->node.start); - *cs++ = upper_32_bits(vma->node.start); + *cs++ = lower_32_bits(i915_vma_offset(vma)); + *cs++ = upper_32_bits(i915_vma_offset(vma)); if (*prev) { - u64 offset = (*prev)->batch->node.start; + u64 offset = i915_vma_offset((*prev)->batch); /* Terminate the spinner in the next lower priority batch. */ *cs++ = MI_STORE_DWORD_IMM_GEN4; @@ -2758,15 +2770,11 @@ static int create_gang(struct intel_engine_cs *engine, rq->batch = i915_vma_get(vma); i915_request_get(rq); - i915_vma_lock(vma); - err = i915_request_await_object(rq, vma->obj, false); - if (!err) - err = i915_vma_move_to_active(vma, rq, 0); + err = igt_vma_move_to_active_unlocked(vma, rq, 0); if (!err) err = rq->engine->emit_bb_start(rq, - vma->node.start, + i915_vma_offset(vma), PAGE_SIZE, 0); - i915_vma_unlock(vma); i915_request_add(rq); if (err) goto err_rq; @@ -3092,7 +3100,7 @@ create_gpr_user(struct intel_engine_cs *engine, *cs++ = MI_MATH_ADD; *cs++ = MI_MATH_STORE(MI_MATH_REG(i), MI_MATH_REG_ACCU); - addr = result->node.start + offset + i * sizeof(*cs); + addr = i915_vma_offset(result) + offset + i * sizeof(*cs); *cs++ = MI_STORE_REGISTER_MEM_GEN8; *cs++ = CS_GPR(engine, 2 * i); *cs++ = lower_32_bits(addr); @@ -3102,8 +3110,8 @@ create_gpr_user(struct intel_engine_cs *engine, MI_SEMAPHORE_POLL | MI_SEMAPHORE_SAD_GTE_SDD; *cs++ = i; - *cs++ = lower_32_bits(result->node.start); - *cs++ = upper_32_bits(result->node.start); + *cs++ = lower_32_bits(i915_vma_offset(result)); + *cs++ = upper_32_bits(i915_vma_offset(result)); } *cs++ = MI_BATCH_BUFFER_END; @@ -3174,20 +3182,14 @@ create_gpr_client(struct intel_engine_cs *engine, goto out_batch; } - i915_vma_lock(vma); - err = i915_request_await_object(rq, vma->obj, false); - if (!err) - err = i915_vma_move_to_active(vma, rq, 0); - i915_vma_unlock(vma); + err = igt_vma_move_to_active_unlocked(vma, rq, 0); i915_vma_lock(batch); if (!err) - err = i915_request_await_object(rq, batch->obj, false); - if (!err) err = i915_vma_move_to_active(batch, rq, 0); if (!err) err = rq->engine->emit_bb_start(rq, - batch->node.start, + i915_vma_offset(batch), PAGE_SIZE, 0); i915_vma_unlock(batch); i915_vma_unpin(batch); @@ -3381,12 +3383,9 @@ static int live_preempt_timeout(void *arg) if (!intel_has_reset_engine(gt)) return 0; - if (igt_spinner_init(&spin_lo, gt)) - return -ENOMEM; - ctx_hi = kernel_context(gt->i915, NULL); if (!ctx_hi) - goto err_spin_lo; + return -ENOMEM; ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY; ctx_lo = kernel_context(gt->i915, NULL); @@ -3394,6 +3393,9 @@ static int live_preempt_timeout(void *arg) goto err_ctx_hi; ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY; + if (igt_spinner_init(&spin_lo, gt)) + goto err_ctx_lo; + for_each_engine(engine, gt, id) { unsigned long saved_timeout; struct i915_request *rq; @@ -3405,21 +3407,21 @@ static int live_preempt_timeout(void *arg) MI_NOOP); /* preemption disabled */ if (IS_ERR(rq)) { err = PTR_ERR(rq); - goto err_ctx_lo; + goto err_spin_lo; } i915_request_add(rq); if (!igt_wait_for_spinner(&spin_lo, rq)) { intel_gt_set_wedged(gt); err = -EIO; - goto err_ctx_lo; + goto err_spin_lo; } rq = igt_request_alloc(ctx_hi, engine); if (IS_ERR(rq)) { igt_spinner_end(&spin_lo); err = PTR_ERR(rq); - goto err_ctx_lo; + goto err_spin_lo; } /* Flush the previous CS ack before changing timeouts */ @@ -3427,7 +3429,7 @@ static int live_preempt_timeout(void *arg) cpu_relax(); saved_timeout = engine->props.preempt_timeout_ms; - engine->props.preempt_timeout_ms = 1; /* in ms, -> 1 jiffie */ + engine->props.preempt_timeout_ms = 1; /* in ms, -> 1 jiffy */ i915_request_get(rq); i915_request_add(rq); @@ -3439,7 +3441,7 @@ static int live_preempt_timeout(void *arg) intel_gt_set_wedged(gt); i915_request_put(rq); err = -ETIME; - goto err_ctx_lo; + goto err_spin_lo; } igt_spinner_end(&spin_lo); @@ -3447,12 +3449,12 @@ static int live_preempt_timeout(void *arg) } err = 0; +err_spin_lo: + igt_spinner_fini(&spin_lo); err_ctx_lo: kernel_context_close(ctx_lo); err_ctx_hi: kernel_context_close(ctx_hi); -err_spin_lo: - igt_spinner_fini(&spin_lo); return err; } @@ -3468,12 +3470,14 @@ static int random_priority(struct rnd_state *rnd) struct preempt_smoke { struct intel_gt *gt; + struct kthread_work work; struct i915_gem_context **contexts; struct intel_engine_cs *engine; struct drm_i915_gem_object *batch; unsigned int ncontext; struct rnd_state prng; unsigned long count; + int result; }; static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke) @@ -3513,15 +3517,11 @@ static int smoke_submit(struct preempt_smoke *smoke, } if (vma) { - i915_vma_lock(vma); - err = i915_request_await_object(rq, vma->obj, false); - if (!err) - err = i915_vma_move_to_active(vma, rq, 0); + err = igt_vma_move_to_active_unlocked(vma, rq, 0); if (!err) err = rq->engine->emit_bb_start(rq, - vma->node.start, + i915_vma_offset(vma), PAGE_SIZE, 0); - i915_vma_unlock(vma); } i915_request_add(rq); @@ -3533,34 +3533,31 @@ unpin: return err; } -static int smoke_crescendo_thread(void *arg) +static void smoke_crescendo_work(struct kthread_work *work) { - struct preempt_smoke *smoke = arg; + struct preempt_smoke *smoke = container_of(work, typeof(*smoke), work); IGT_TIMEOUT(end_time); unsigned long count; count = 0; do { struct i915_gem_context *ctx = smoke_context(smoke); - int err; - err = smoke_submit(smoke, - ctx, count % I915_PRIORITY_MAX, - smoke->batch); - if (err) - return err; + smoke->result = smoke_submit(smoke, ctx, + count % I915_PRIORITY_MAX, + smoke->batch); count++; - } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL)); + } while (!smoke->result && count < smoke->ncontext && + !__igt_timeout(end_time, NULL)); smoke->count = count; - return 0; } static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags) #define BATCH BIT(0) { - struct task_struct *tsk[I915_NUM_ENGINES] = {}; + struct kthread_worker *worker[I915_NUM_ENGINES] = {}; struct preempt_smoke *arg; struct intel_engine_cs *engine; enum intel_engine_id id; @@ -3571,6 +3568,8 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags) if (!arg) return -ENOMEM; + memset(arg, 0, I915_NUM_ENGINES * sizeof(*arg)); + for_each_engine(engine, smoke->gt, id) { arg[id] = *smoke; arg[id].engine = engine; @@ -3578,31 +3577,28 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags) arg[id].batch = NULL; arg[id].count = 0; - tsk[id] = kthread_run(smoke_crescendo_thread, arg, - "igt/smoke:%d", id); - if (IS_ERR(tsk[id])) { - err = PTR_ERR(tsk[id]); + worker[id] = kthread_run_worker(0, "igt/smoke:%d", id); + if (IS_ERR(worker[id])) { + err = PTR_ERR(worker[id]); break; } - get_task_struct(tsk[id]); - } - yield(); /* start all threads before we kthread_stop() */ + kthread_init_work(&arg[id].work, smoke_crescendo_work); + kthread_queue_work(worker[id], &arg[id].work); + } count = 0; for_each_engine(engine, smoke->gt, id) { - int status; - - if (IS_ERR_OR_NULL(tsk[id])) + if (IS_ERR_OR_NULL(worker[id])) continue; - status = kthread_stop(tsk[id]); - if (status && !err) - err = status; + kthread_flush_work(&arg[id].work); + if (arg[id].result && !err) + err = arg[id].result; count += arg[id].count; - put_task_struct(tsk[id]); + kthread_destroy_worker(worker[id]); } pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n", |
