summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/selftests/i915_request.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/selftests/i915_request.c')
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c623
1 files changed, 602 insertions, 21 deletions
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index f89d9c42f1fa..6014e8dfcbb1 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -23,11 +23,13 @@
*/
#include <linux/prime_numbers.h>
+#include <linux/pm_qos.h>
#include "gem/i915_gem_pm.h"
#include "gem/selftests/mock_context.h"
#include "gt/intel_engine_pm.h"
+#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
#include "i915_random.h"
@@ -51,6 +53,11 @@ static unsigned int num_uabi_engines(struct drm_i915_private *i915)
return count;
}
+static struct intel_engine_cs *rcs0(struct drm_i915_private *i915)
+{
+ return intel_engine_lookup_user(i915, I915_ENGINE_CLASS_RENDER, 0);
+}
+
static int igt_add_request(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -58,7 +65,7 @@ static int igt_add_request(void *arg)
/* Basic preliminary test to create a request and let it loose! */
- request = mock_request(i915->engine[RCS0]->kernel_context, HZ / 10);
+ request = mock_request(rcs0(i915)->kernel_context, HZ / 10);
if (!request)
return -ENOMEM;
@@ -76,7 +83,7 @@ static int igt_wait_request(void *arg)
/* Submit a request, then wait upon it */
- request = mock_request(i915->engine[RCS0]->kernel_context, T);
+ request = mock_request(rcs0(i915)->kernel_context, T);
if (!request)
return -ENOMEM;
@@ -145,7 +152,7 @@ static int igt_fence_wait(void *arg)
/* Submit a request, treat it as a fence and wait upon it */
- request = mock_request(i915->engine[RCS0]->kernel_context, T);
+ request = mock_request(rcs0(i915)->kernel_context, T);
if (!request)
return -ENOMEM;
@@ -420,7 +427,7 @@ static int mock_breadcrumbs_smoketest(void *arg)
{
struct drm_i915_private *i915 = arg;
struct smoketest t = {
- .engine = i915->engine[RCS0],
+ .engine = rcs0(i915),
.ncontexts = 1024,
.max_batch = 1024,
.request_alloc = __mock_request_alloc
@@ -809,10 +816,12 @@ static int recursive_batch_resolve(struct i915_vma *batch)
return PTR_ERR(cmd);
*cmd = MI_BATCH_BUFFER_END;
- intel_gt_chipset_flush(batch->vm->gt);
+ __i915_gem_object_flush_map(batch->obj, 0, sizeof(*cmd));
i915_gem_object_unpin_map(batch->obj);
+ intel_gt_chipset_flush(batch->vm->gt);
+
return 0;
}
@@ -858,13 +867,6 @@ static int live_all_engines(void *arg)
goto out_request;
}
- err = engine->emit_bb_start(request[idx],
- batch->node.start,
- batch->node.size,
- 0);
- GEM_BUG_ON(err);
- request[idx]->batch = batch;
-
i915_vma_lock(batch);
err = i915_request_await_object(request[idx], batch->obj, 0);
if (err == 0)
@@ -872,6 +874,13 @@ static int live_all_engines(void *arg)
i915_vma_unlock(batch);
GEM_BUG_ON(err);
+ err = engine->emit_bb_start(request[idx],
+ batch->node.start,
+ batch->node.size,
+ 0);
+ GEM_BUG_ON(err);
+ request[idx]->batch = batch;
+
i915_request_get(request[idx]);
i915_request_add(request[idx]);
idx++;
@@ -986,13 +995,6 @@ static int live_sequential_engines(void *arg)
}
}
- err = engine->emit_bb_start(request[idx],
- batch->node.start,
- batch->node.size,
- 0);
- GEM_BUG_ON(err);
- request[idx]->batch = batch;
-
i915_vma_lock(batch);
err = i915_request_await_object(request[idx],
batch->obj, false);
@@ -1001,6 +1003,13 @@ static int live_sequential_engines(void *arg)
i915_vma_unlock(batch);
GEM_BUG_ON(err);
+ err = engine->emit_bb_start(request[idx],
+ batch->node.start,
+ batch->node.size,
+ 0);
+ GEM_BUG_ON(err);
+ request[idx]->batch = batch;
+
i915_request_get(request[idx]);
i915_request_add(request[idx]);
@@ -1053,9 +1062,12 @@ out_request:
I915_MAP_WC);
if (!IS_ERR(cmd)) {
*cmd = MI_BATCH_BUFFER_END;
- intel_gt_chipset_flush(engine->gt);
+ __i915_gem_object_flush_map(request[idx]->batch->obj,
+ 0, sizeof(*cmd));
i915_gem_object_unpin_map(request[idx]->batch->obj);
+
+ intel_gt_chipset_flush(engine->gt);
}
i915_vma_put(request[idx]->batch);
@@ -1233,7 +1245,7 @@ static int live_parallel_engines(void *arg)
struct igt_live_test t;
unsigned int idx;
- snprintf(name, sizeof(name), "%pS", fn);
+ snprintf(name, sizeof(name), "%ps", *fn);
err = igt_live_test_begin(&t, i915, __func__, name);
if (err)
break;
@@ -1470,3 +1482,572 @@ int i915_request_live_selftests(struct drm_i915_private *i915)
return i915_subtests(tests, i915);
}
+
+static int switch_to_kernel_sync(struct intel_context *ce, int err)
+{
+ struct i915_request *rq;
+ struct dma_fence *fence;
+
+ rq = intel_engine_create_kernel_request(ce->engine);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ fence = i915_active_fence_get(&ce->timeline->last_request);
+ if (fence) {
+ i915_request_await_dma_fence(rq, fence);
+ dma_fence_put(fence);
+ }
+
+ rq = i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, HZ / 2) < 0 && !err)
+ err = -ETIME;
+ i915_request_put(rq);
+
+ while (!err && !intel_engine_is_idle(ce->engine))
+ intel_engine_flush_submission(ce->engine);
+
+ return err;
+}
+
+struct perf_stats {
+ struct intel_engine_cs *engine;
+ unsigned long count;
+ ktime_t time;
+ ktime_t busy;
+ u64 runtime;
+};
+
+struct perf_series {
+ struct drm_i915_private *i915;
+ unsigned int nengines;
+ struct intel_context *ce[];
+};
+
+static int s_sync0(void *arg)
+{
+ struct perf_series *ps = arg;
+ IGT_TIMEOUT(end_time);
+ unsigned int idx = 0;
+ int err = 0;
+
+ GEM_BUG_ON(!ps->nengines);
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ps->ce[idx]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+ if (err)
+ break;
+
+ if (++idx == ps->nengines)
+ idx = 0;
+ } while (!__igt_timeout(end_time, NULL));
+
+ return err;
+}
+
+static int s_sync1(void *arg)
+{
+ struct perf_series *ps = arg;
+ struct i915_request *prev = NULL;
+ IGT_TIMEOUT(end_time);
+ unsigned int idx = 0;
+ int err = 0;
+
+ GEM_BUG_ON(!ps->nengines);
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ps->ce[idx]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (prev && i915_request_wait(prev, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(prev);
+ prev = rq;
+ if (err)
+ break;
+
+ if (++idx == ps->nengines)
+ idx = 0;
+ } while (!__igt_timeout(end_time, NULL));
+ i915_request_put(prev);
+
+ return err;
+}
+
+static int s_many(void *arg)
+{
+ struct perf_series *ps = arg;
+ IGT_TIMEOUT(end_time);
+ unsigned int idx = 0;
+
+ GEM_BUG_ON(!ps->nengines);
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ps->ce[idx]);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_add(rq);
+
+ if (++idx == ps->nengines)
+ idx = 0;
+ } while (!__igt_timeout(end_time, NULL));
+
+ return 0;
+}
+
+static int perf_series_engines(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ static int (* const func[])(void *arg) = {
+ s_sync0,
+ s_sync1,
+ s_many,
+ NULL,
+ };
+ const unsigned int nengines = num_uabi_engines(i915);
+ struct intel_engine_cs *engine;
+ int (* const *fn)(void *arg);
+ struct pm_qos_request qos;
+ struct perf_stats *stats;
+ struct perf_series *ps;
+ unsigned int idx;
+ int err = 0;
+
+ stats = kcalloc(nengines, sizeof(*stats), GFP_KERNEL);
+ if (!stats)
+ return -ENOMEM;
+
+ ps = kzalloc(struct_size(ps, ce, nengines), GFP_KERNEL);
+ if (!ps) {
+ kfree(stats);
+ return -ENOMEM;
+ }
+
+ cpu_latency_qos_add_request(&qos, 0); /* disable cstates */
+
+ ps->i915 = i915;
+ ps->nengines = nengines;
+
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ struct intel_context *ce;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ goto out;
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ goto out;
+ }
+
+ ps->ce[idx++] = ce;
+ }
+ GEM_BUG_ON(idx != ps->nengines);
+
+ for (fn = func; *fn && !err; fn++) {
+ char name[KSYM_NAME_LEN];
+ struct igt_live_test t;
+
+ snprintf(name, sizeof(name), "%ps", *fn);
+ err = igt_live_test_begin(&t, i915, __func__, name);
+ if (err)
+ break;
+
+ for (idx = 0; idx < nengines; idx++) {
+ struct perf_stats *p =
+ memset(&stats[idx], 0, sizeof(stats[idx]));
+ struct intel_context *ce = ps->ce[idx];
+
+ p->engine = ps->ce[idx]->engine;
+ intel_engine_pm_get(p->engine);
+
+ if (intel_engine_supports_stats(p->engine))
+ p->busy = intel_engine_get_busy_time(p->engine) + 1;
+ p->runtime = -intel_context_get_total_runtime_ns(ce);
+ p->time = ktime_get();
+ }
+
+ err = (*fn)(ps);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+
+ for (idx = 0; idx < nengines; idx++) {
+ struct perf_stats *p = &stats[idx];
+ struct intel_context *ce = ps->ce[idx];
+ int integer, decimal;
+ u64 busy, dt;
+
+ p->time = ktime_sub(ktime_get(), p->time);
+ if (p->busy) {
+ p->busy = ktime_sub(intel_engine_get_busy_time(p->engine),
+ p->busy - 1);
+ }
+
+ err = switch_to_kernel_sync(ce, err);
+ p->runtime += intel_context_get_total_runtime_ns(ce);
+ intel_engine_pm_put(p->engine);
+
+ busy = 100 * ktime_to_ns(p->busy);
+ dt = ktime_to_ns(p->time);
+ if (dt) {
+ integer = div64_u64(busy, dt);
+ busy -= integer * dt;
+ decimal = div64_u64(100 * busy, dt);
+ } else {
+ integer = 0;
+ decimal = 0;
+ }
+
+ pr_info("%s %5s: { seqno:%d, busy:%d.%02d%%, runtime:%lldms, walltime:%lldms }\n",
+ name, p->engine->name, ce->timeline->seqno,
+ integer, decimal,
+ div_u64(p->runtime, 1000 * 1000),
+ div_u64(ktime_to_ns(p->time), 1000 * 1000));
+ }
+ }
+
+out:
+ for (idx = 0; idx < nengines; idx++) {
+ if (IS_ERR_OR_NULL(ps->ce[idx]))
+ break;
+
+ intel_context_unpin(ps->ce[idx]);
+ intel_context_put(ps->ce[idx]);
+ }
+ kfree(ps);
+
+ cpu_latency_qos_remove_request(&qos);
+ kfree(stats);
+ return err;
+}
+
+static int p_sync0(void *arg)
+{
+ struct perf_stats *p = arg;
+ struct intel_engine_cs *engine = p->engine;
+ struct intel_context *ce;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+ bool busy;
+ int err = 0;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ return err;
+ }
+
+ busy = false;
+ if (intel_engine_supports_stats(engine)) {
+ p->busy = intel_engine_get_busy_time(engine);
+ busy = true;
+ }
+
+ p->time = ktime_get();
+ count = 0;
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ err = 0;
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+ if (err)
+ break;
+
+ count++;
+ } while (!__igt_timeout(end_time, NULL));
+ p->time = ktime_sub(ktime_get(), p->time);
+
+ if (busy) {
+ p->busy = ktime_sub(intel_engine_get_busy_time(engine),
+ p->busy);
+ }
+
+ err = switch_to_kernel_sync(ce, err);
+ p->runtime = intel_context_get_total_runtime_ns(ce);
+ p->count = count;
+
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+ return err;
+}
+
+static int p_sync1(void *arg)
+{
+ struct perf_stats *p = arg;
+ struct intel_engine_cs *engine = p->engine;
+ struct i915_request *prev = NULL;
+ struct intel_context *ce;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+ bool busy;
+ int err = 0;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ return err;
+ }
+
+ busy = false;
+ if (intel_engine_supports_stats(engine)) {
+ p->busy = intel_engine_get_busy_time(engine);
+ busy = true;
+ }
+
+ p->time = ktime_get();
+ count = 0;
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ err = 0;
+ if (prev && i915_request_wait(prev, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(prev);
+ prev = rq;
+ if (err)
+ break;
+
+ count++;
+ } while (!__igt_timeout(end_time, NULL));
+ i915_request_put(prev);
+ p->time = ktime_sub(ktime_get(), p->time);
+
+ if (busy) {
+ p->busy = ktime_sub(intel_engine_get_busy_time(engine),
+ p->busy);
+ }
+
+ err = switch_to_kernel_sync(ce, err);
+ p->runtime = intel_context_get_total_runtime_ns(ce);
+ p->count = count;
+
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+ return err;
+}
+
+static int p_many(void *arg)
+{
+ struct perf_stats *p = arg;
+ struct intel_engine_cs *engine = p->engine;
+ struct intel_context *ce;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+ int err = 0;
+ bool busy;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ return err;
+ }
+
+ busy = false;
+ if (intel_engine_supports_stats(engine)) {
+ p->busy = intel_engine_get_busy_time(engine);
+ busy = true;
+ }
+
+ count = 0;
+ p->time = ktime_get();
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_add(rq);
+ count++;
+ } while (!__igt_timeout(end_time, NULL));
+ p->time = ktime_sub(ktime_get(), p->time);
+
+ if (busy) {
+ p->busy = ktime_sub(intel_engine_get_busy_time(engine),
+ p->busy);
+ }
+
+ err = switch_to_kernel_sync(ce, err);
+ p->runtime = intel_context_get_total_runtime_ns(ce);
+ p->count = count;
+
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+ return err;
+}
+
+static int perf_parallel_engines(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ static int (* const func[])(void *arg) = {
+ p_sync0,
+ p_sync1,
+ p_many,
+ NULL,
+ };
+ const unsigned int nengines = num_uabi_engines(i915);
+ struct intel_engine_cs *engine;
+ int (* const *fn)(void *arg);
+ struct pm_qos_request qos;
+ struct {
+ struct perf_stats p;
+ struct task_struct *tsk;
+ } *engines;
+ int err = 0;
+
+ engines = kcalloc(nengines, sizeof(*engines), GFP_KERNEL);
+ if (!engines)
+ return -ENOMEM;
+
+ cpu_latency_qos_add_request(&qos, 0);
+
+ for (fn = func; *fn; fn++) {
+ char name[KSYM_NAME_LEN];
+ struct igt_live_test t;
+ unsigned int idx;
+
+ snprintf(name, sizeof(name), "%ps", *fn);
+ err = igt_live_test_begin(&t, i915, __func__, name);
+ if (err)
+ break;
+
+ atomic_set(&i915->selftest.counter, nengines);
+
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ intel_engine_pm_get(engine);
+
+ memset(&engines[idx].p, 0, sizeof(engines[idx].p));
+ engines[idx].p.engine = engine;
+
+ engines[idx].tsk = kthread_run(*fn, &engines[idx].p,
+ "igt:%s", engine->name);
+ if (IS_ERR(engines[idx].tsk)) {
+ err = PTR_ERR(engines[idx].tsk);
+ intel_engine_pm_put(engine);
+ break;
+ }
+ get_task_struct(engines[idx++].tsk);
+ }
+
+ yield(); /* start all threads before we kthread_stop() */
+
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ int status;
+
+ if (IS_ERR(engines[idx].tsk))
+ break;
+
+ status = kthread_stop(engines[idx].tsk);
+ if (status && !err)
+ err = status;
+
+ intel_engine_pm_put(engine);
+ put_task_struct(engines[idx++].tsk);
+ }
+
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ break;
+
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ struct perf_stats *p = &engines[idx].p;
+ u64 busy = 100 * ktime_to_ns(p->busy);
+ u64 dt = ktime_to_ns(p->time);
+ int integer, decimal;
+
+ if (dt) {
+ integer = div64_u64(busy, dt);
+ busy -= integer * dt;
+ decimal = div64_u64(100 * busy, dt);
+ } else {
+ integer = 0;
+ decimal = 0;
+ }
+
+ GEM_BUG_ON(engine != p->engine);
+ pr_info("%s %5s: { count:%lu, busy:%d.%02d%%, runtime:%lldms, walltime:%lldms }\n",
+ name, engine->name, p->count, integer, decimal,
+ div_u64(p->runtime, 1000 * 1000),
+ div_u64(ktime_to_ns(p->time), 1000 * 1000));
+ idx++;
+ }
+ }
+
+ cpu_latency_qos_remove_request(&qos);
+ kfree(engines);
+ return err;
+}
+
+int i915_request_perf_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(perf_series_engines),
+ SUBTEST(perf_parallel_engines),
+ };
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ return i915_subtests(tests, i915);
+}