summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/selftests
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2019-11-01 10:15:28 +0000
committerChris Wilson <chris@chris-wilson.co.uk>2019-11-01 13:06:35 +0000
commitf05816cbbcd0aa0af1efdd888ea6964644197e13 (patch)
treef0b3dca9aa8c1ca4f458b6afad2f60d976c71d7f /drivers/gpu/drm/i915/selftests
parent1883e2999f045e1fd6f76a7f30288a5312085289 (diff)
drm/i915/selftests: Spin on all engines simultaneously
Vanshidhar Konda asked for the simplest test "to verify that the kernel can submit and hardware can execute batch buffers on all the command streamers in parallel." We have a number of tests in userspace that submit load to each engine and verify that it is present, but strictly we have no selftest to prove that the kernel can _simultaneously_ execute on all known engines. (We have tests to demonstrate that we can submit to HW in parallel, but we don't insist that they execute in parallel.) v2: Improve the igt_spinner support for older gen. Suggested-by: Vanshidhar Konda <vanshidhar.r.konda@intel.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Vanshidhar Konda <vanshidhar.r.konda@intel.com> Cc: Matthew Auld <matthew.auld@intel.com> Reviewed-by: Vanshidhar Konda <vanshidhar.r.konda@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191101101528.10553-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/selftests')
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c76
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c40
2 files changed, 109 insertions, 7 deletions
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 8618a4dc0701..9e6d3159cd80 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -32,6 +32,7 @@
#include "i915_random.h"
#include "i915_selftest.h"
#include "igt_live_test.h"
+#include "igt_spinner.h"
#include "lib_sw_fence.h"
#include "mock_drm.h"
@@ -1116,12 +1117,85 @@ static int __live_parallel_engineN(void *arg)
return 0;
}
+static bool wake_all(struct drm_i915_private *i915)
+{
+ if (atomic_dec_and_test(&i915->selftest.counter)) {
+ wake_up_var(&i915->selftest.counter);
+ return true;
+ }
+
+ return false;
+}
+
+static int wait_for_all(struct drm_i915_private *i915)
+{
+ if (wake_all(i915))
+ return 0;
+
+ if (wait_var_event_timeout(&i915->selftest.counter,
+ !atomic_read(&i915->selftest.counter),
+ i915_selftest.timeout_jiffies))
+ return 0;
+
+ return -ETIME;
+}
+
+static int __live_parallel_spin(void *arg)
+{
+ struct intel_engine_cs *engine = arg;
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ int err = 0;
+
+ /*
+ * Create a spinner running for eternity on each engine. If a second
+ * spinner is incorrectly placed on the same engine, it will not be
+ * able to start in time.
+ */
+
+ if (igt_spinner_init(&spin, engine->gt)) {
+ wake_all(engine->i915);
+ return -ENOMEM;
+ }
+
+ rq = igt_spinner_create_request(&spin,
+ engine->kernel_context,
+ MI_NOOP); /* no preemption */
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ if (err == -ENODEV)
+ err = 0;
+ wake_all(engine->i915);
+ goto out_spin;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (igt_wait_for_spinner(&spin, rq)) {
+ /* Occupy this engine for the whole test */
+ err = wait_for_all(engine->i915);
+ } else {
+ pr_err("Failed to start spinner on %s\n", engine->name);
+ err = -EINVAL;
+ }
+ igt_spinner_end(&spin);
+
+ if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -EIO;
+ i915_request_put(rq);
+
+out_spin:
+ igt_spinner_fini(&spin);
+ return err;
+}
+
static int live_parallel_engines(void *arg)
{
struct drm_i915_private *i915 = arg;
static int (* const func[])(void *arg) = {
__live_parallel_engine1,
__live_parallel_engineN,
+ __live_parallel_spin,
NULL,
};
const unsigned int nengines = num_uabi_engines(i915);
@@ -1147,6 +1221,8 @@ static int live_parallel_engines(void *arg)
if (err)
break;
+ atomic_set(&i915->selftest.counter, nengines);
+
idx = 0;
for_each_uabi_engine(engine, i915) {
tsk[idx] = kthread_run(*fn, engine,
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index ee8450b871da..e8a58fe49c39 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -15,8 +15,6 @@ int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt)
void *vaddr;
int err;
- GEM_BUG_ON(INTEL_GEN(gt->i915) < 8);
-
memset(spin, 0, sizeof(*spin));
spin->gt = gt;
@@ -95,11 +93,15 @@ igt_spinner_create_request(struct igt_spinner *spin,
struct intel_engine_cs *engine = ce->engine;
struct i915_request *rq = NULL;
struct i915_vma *hws, *vma;
+ unsigned int flags;
u32 *batch;
int err;
GEM_BUG_ON(spin->gt != ce->vm->gt);
+ if (!intel_engine_can_store_dword(ce->engine))
+ return ERR_PTR(-ENODEV);
+
vma = i915_vma_instance(spin->obj, ce->vm, NULL);
if (IS_ERR(vma))
return ERR_CAST(vma);
@@ -132,16 +134,37 @@ igt_spinner_create_request(struct igt_spinner *spin,
batch = spin->batch;
- *batch++ = MI_STORE_DWORD_IMM_GEN4;
- *batch++ = lower_32_bits(hws_address(hws, rq));
- *batch++ = upper_32_bits(hws_address(hws, rq));
+ if (INTEL_GEN(rq->i915) >= 8) {
+ *batch++ = MI_STORE_DWORD_IMM_GEN4;
+ *batch++ = lower_32_bits(hws_address(hws, rq));
+ *batch++ = upper_32_bits(hws_address(hws, rq));
+ } else if (INTEL_GEN(rq->i915) >= 6) {
+ *batch++ = MI_STORE_DWORD_IMM_GEN4;
+ *batch++ = 0;
+ *batch++ = hws_address(hws, rq);
+ } else if (INTEL_GEN(rq->i915) >= 4) {
+ *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *batch++ = 0;
+ *batch++ = hws_address(hws, rq);
+ } else {
+ *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+ *batch++ = hws_address(hws, rq);
+ }
*batch++ = rq->fence.seqno;
*batch++ = arbitration_command;
- *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
+ if (INTEL_GEN(rq->i915) >= 8)
+ *batch++ = MI_BATCH_BUFFER_START | BIT(8) | 1;
+ else if (IS_HASWELL(rq->i915))
+ *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW;
+ else if (INTEL_GEN(rq->i915) >= 6)
+ *batch++ = MI_BATCH_BUFFER_START;
+ else
+ *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
*batch++ = lower_32_bits(vma->node.start);
*batch++ = upper_32_bits(vma->node.start);
+
*batch++ = MI_BATCH_BUFFER_END; /* not reached */
intel_gt_chipset_flush(engine->gt);
@@ -153,7 +176,10 @@ igt_spinner_create_request(struct igt_spinner *spin,
goto cancel_rq;
}
- err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
+ flags = 0;
+ if (INTEL_GEN(rq->i915) <= 5)
+ flags |= I915_DISPATCH_SECURE;
+ err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
cancel_rq:
if (err) {