diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2019-04-24 18:48:39 +0100 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2019-04-24 21:01:46 +0100 |
commit | 112ed2d31a46f4704085ad925435b77e62b8abee (patch) | |
tree | d099b0a6d7989a1f60810bbf82b2966439eb490c /drivers/gpu/drm/i915/selftests | |
parent | 86554f48e511faa58f729cc077b1733179882804 (diff) |
drm/i915: Move GraphicsTechnology files under gt/
Start partitioning off the code that talks to the hardware (GT) from the
uapi layers and move the device facing code under gt/
One casualty is s/intel_ringbuffer.h/intel_engine.h/ with the plan to
subdivide that header and body further (and split out the submission
code from the ringbuffer and logical context handling). This patch aims
to be simple motion so git can fixup inflight patches with little mess.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Acked-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Acked-by: Jani Nikula <jani.nikula@intel.com>
Acked-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424174839.7141-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/selftests')
-rw-r--r-- | drivers/gpu/drm/i915/selftests/i915_gem_context.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/igt_reset.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/igt_spinner.h | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/intel_engine_cs.c | 58 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/intel_hangcheck.c | 1919 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/intel_lrc.c | 1326 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/intel_workarounds.c | 1172 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/mock_engine.c | 321 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/mock_engine.h | 49 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/mock_gem_device.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/mock_request.c | 3 |
11 files changed, 11 insertions, 4851 deletions
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c index e1cb22f03e8e..6f52ca881173 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c @@ -24,8 +24,9 @@ #include <linux/prime_numbers.h> -#include "../i915_reset.h" -#include "../i915_selftest.h" +#include "gt/intel_reset.h" +#include "i915_selftest.h" + #include "i915_random.h" #include "igt_flush_test.h" #include "igt_live_test.h" diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.c b/drivers/gpu/drm/i915/selftests/igt_reset.c index 208a966da8ca..4f31b137c428 100644 --- a/drivers/gpu/drm/i915/selftests/igt_reset.c +++ b/drivers/gpu/drm/i915/selftests/igt_reset.c @@ -6,8 +6,9 @@ #include "igt_reset.h" +#include "gt/intel_engine.h" + #include "../i915_drv.h" -#include "../intel_ringbuffer.h" void igt_global_reset_lock(struct drm_i915_private *i915) { diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.h b/drivers/gpu/drm/i915/selftests/igt_spinner.h index 391777c76dc7..d312e7cdab68 100644 --- a/drivers/gpu/drm/i915/selftests/igt_spinner.h +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.h @@ -9,9 +9,10 @@ #include "../i915_selftest.h" +#include "gt/intel_engine.h" + #include "../i915_drv.h" #include "../i915_request.h" -#include "../intel_ringbuffer.h" #include "../i915_gem_context.h" struct igt_spinner { diff --git a/drivers/gpu/drm/i915/selftests/intel_engine_cs.c b/drivers/gpu/drm/i915/selftests/intel_engine_cs.c deleted file mode 100644 index cfaa6b296835..000000000000 --- a/drivers/gpu/drm/i915/selftests/intel_engine_cs.c +++ /dev/null @@ -1,58 +0,0 @@ -/* - * SPDX-License-Identifier: GPL-2.0 - * - * Copyright © 2018 Intel Corporation - */ - -#include "../i915_selftest.h" - -static int intel_mmio_bases_check(void *arg) -{ - int i, j; - - for (i = 0; i < ARRAY_SIZE(intel_engines); i++) { - const struct engine_info *info = &intel_engines[i]; - char name[INTEL_ENGINE_CS_MAX_NAME]; - u8 prev = U8_MAX; - - __sprint_engine_name(name, info); - - for (j = 0; j < MAX_MMIO_BASES; j++) { - u8 gen = info->mmio_bases[j].gen; - u32 base = info->mmio_bases[j].base; - - if (gen >= prev) { - pr_err("%s: %s: mmio base for gen %x " - "is before the one for gen %x\n", - __func__, name, prev, gen); - return -EINVAL; - } - - if (gen == 0) - break; - - if (!base) { - pr_err("%s: %s: invalid mmio base (%x) " - "for gen %x at entry %u\n", - __func__, name, base, gen, j); - return -EINVAL; - } - - prev = gen; - } - - pr_info("%s: min gen supported for %s = %d\n", - __func__, name, prev); - } - - return 0; -} - -int intel_engine_cs_mock_selftests(void) -{ - static const struct i915_subtest tests[] = { - SUBTEST(intel_mmio_bases_check), - }; - - return i915_subtests(tests, NULL); -} diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c deleted file mode 100644 index 2fd33aad8683..000000000000 --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c +++ /dev/null @@ -1,1919 +0,0 @@ -/* - * Copyright © 2016 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#include <linux/kthread.h> - -#include "../i915_selftest.h" -#include "i915_random.h" -#include "igt_flush_test.h" -#include "igt_reset.h" -#include "igt_wedge_me.h" - -#include "mock_context.h" -#include "mock_drm.h" - -#define IGT_IDLE_TIMEOUT 50 /* ms; time to wait after flushing between tests */ - -struct hang { - struct drm_i915_private *i915; - struct drm_i915_gem_object *hws; - struct drm_i915_gem_object *obj; - struct i915_gem_context *ctx; - u32 *seqno; - u32 *batch; -}; - -static int hang_init(struct hang *h, struct drm_i915_private *i915) -{ - void *vaddr; - int err; - - memset(h, 0, sizeof(*h)); - h->i915 = i915; - - h->ctx = kernel_context(i915); - if (IS_ERR(h->ctx)) - return PTR_ERR(h->ctx); - - GEM_BUG_ON(i915_gem_context_is_bannable(h->ctx)); - - h->hws = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(h->hws)) { - err = PTR_ERR(h->hws); - goto err_ctx; - } - - h->obj = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(h->obj)) { - err = PTR_ERR(h->obj); - goto err_hws; - } - - i915_gem_object_set_cache_coherency(h->hws, I915_CACHE_LLC); - vaddr = i915_gem_object_pin_map(h->hws, I915_MAP_WB); - if (IS_ERR(vaddr)) { - err = PTR_ERR(vaddr); - goto err_obj; - } - h->seqno = memset(vaddr, 0xff, PAGE_SIZE); - - vaddr = i915_gem_object_pin_map(h->obj, - i915_coherent_map_type(i915)); - if (IS_ERR(vaddr)) { - err = PTR_ERR(vaddr); - goto err_unpin_hws; - } - h->batch = vaddr; - - return 0; - -err_unpin_hws: - i915_gem_object_unpin_map(h->hws); -err_obj: - i915_gem_object_put(h->obj); -err_hws: - i915_gem_object_put(h->hws); -err_ctx: - kernel_context_close(h->ctx); - return err; -} - -static u64 hws_address(const struct i915_vma *hws, - const struct i915_request *rq) -{ - return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context); -} - -static int move_to_active(struct i915_vma *vma, - struct i915_request *rq, - unsigned int flags) -{ - int err; - - err = i915_vma_move_to_active(vma, rq, flags); - if (err) - return err; - - if (!i915_gem_object_has_active_reference(vma->obj)) { - i915_gem_object_get(vma->obj); - i915_gem_object_set_active_reference(vma->obj); - } - - return 0; -} - -static struct i915_request * -hang_create_request(struct hang *h, struct intel_engine_cs *engine) -{ - struct drm_i915_private *i915 = h->i915; - struct i915_address_space *vm = - h->ctx->ppgtt ? &h->ctx->ppgtt->vm : &i915->ggtt.vm; - struct i915_request *rq = NULL; - struct i915_vma *hws, *vma; - unsigned int flags; - u32 *batch; - int err; - - if (i915_gem_object_is_active(h->obj)) { - struct drm_i915_gem_object *obj; - void *vaddr; - - obj = i915_gem_object_create_internal(h->i915, PAGE_SIZE); - if (IS_ERR(obj)) - return ERR_CAST(obj); - - vaddr = i915_gem_object_pin_map(obj, - i915_coherent_map_type(h->i915)); - if (IS_ERR(vaddr)) { - i915_gem_object_put(obj); - return ERR_CAST(vaddr); - } - - i915_gem_object_unpin_map(h->obj); - i915_gem_object_put(h->obj); - - h->obj = obj; - h->batch = vaddr; - } - - vma = i915_vma_instance(h->obj, vm, NULL); - if (IS_ERR(vma)) - return ERR_CAST(vma); - - hws = i915_vma_instance(h->hws, vm, NULL); - if (IS_ERR(hws)) - return ERR_CAST(hws); - - err = i915_vma_pin(vma, 0, 0, PIN_USER); - if (err) - return ERR_PTR(err); - - err = i915_vma_pin(hws, 0, 0, PIN_USER); - if (err) - goto unpin_vma; - - rq = i915_request_alloc(engine, h->ctx); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto unpin_hws; - } - - err = move_to_active(vma, rq, 0); - if (err) - goto cancel_rq; - - err = move_to_active(hws, rq, 0); - if (err) - goto cancel_rq; - - batch = h->batch; - if (INTEL_GEN(i915) >= 8) { - *batch++ = MI_STORE_DWORD_IMM_GEN4; - *batch++ = lower_32_bits(hws_address(hws, rq)); - *batch++ = upper_32_bits(hws_address(hws, rq)); - *batch++ = rq->fence.seqno; - *batch++ = MI_ARB_CHECK; - - memset(batch, 0, 1024); - batch += 1024 / sizeof(*batch); - - *batch++ = MI_ARB_CHECK; - *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1; - *batch++ = lower_32_bits(vma->node.start); - *batch++ = upper_32_bits(vma->node.start); - } else if (INTEL_GEN(i915) >= 6) { - *batch++ = MI_STORE_DWORD_IMM_GEN4; - *batch++ = 0; - *batch++ = lower_32_bits(hws_address(hws, rq)); - *batch++ = rq->fence.seqno; - *batch++ = MI_ARB_CHECK; - - memset(batch, 0, 1024); - batch += 1024 / sizeof(*batch); - - *batch++ = MI_ARB_CHECK; - *batch++ = MI_BATCH_BUFFER_START | 1 << 8; - *batch++ = lower_32_bits(vma->node.start); - } else if (INTEL_GEN(i915) >= 4) { - *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; - *batch++ = 0; - *batch++ = lower_32_bits(hws_address(hws, rq)); - *batch++ = rq->fence.seqno; - *batch++ = MI_ARB_CHECK; - - memset(batch, 0, 1024); - batch += 1024 / sizeof(*batch); - - *batch++ = MI_ARB_CHECK; - *batch++ = MI_BATCH_BUFFER_START | 2 << 6; - *batch++ = lower_32_bits(vma->node.start); - } else { - *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; - *batch++ = lower_32_bits(hws_address(hws, rq)); - *batch++ = rq->fence.seqno; - *batch++ = MI_ARB_CHECK; - - memset(batch, 0, 1024); - batch += 1024 / sizeof(*batch); - - *batch++ = MI_ARB_CHECK; - *batch++ = MI_BATCH_BUFFER_START | 2 << 6; - *batch++ = lower_32_bits(vma->node.start); - } - *batch++ = MI_BATCH_BUFFER_END; /* not reached */ - i915_gem_chipset_flush(h->i915); - - if (rq->engine->emit_init_breadcrumb) { - err = rq->engine->emit_init_breadcrumb(rq); - if (err) - goto cancel_rq; - } - - flags = 0; - if (INTEL_GEN(vm->i915) <= 5) - flags |= I915_DISPATCH_SECURE; - - err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags); - -cancel_rq: - if (err) { - i915_request_skip(rq, err); - i915_request_add(rq); - } -unpin_hws: - i915_vma_unpin(hws); -unpin_vma: - i915_vma_unpin(vma); - return err ? ERR_PTR(err) : rq; -} - -static u32 hws_seqno(const struct hang *h, const struct i915_request *rq) -{ - return READ_ONCE(h->seqno[rq->fence.context % (PAGE_SIZE/sizeof(u32))]); -} - -static void hang_fini(struct hang *h) -{ - *h->batch = MI_BATCH_BUFFER_END; - i915_gem_chipset_flush(h->i915); - - i915_gem_object_unpin_map(h->obj); - i915_gem_object_put(h->obj); - - i915_gem_object_unpin_map(h->hws); - i915_gem_object_put(h->hws); - - kernel_context_close(h->ctx); - - igt_flush_test(h->i915, I915_WAIT_LOCKED); -} - -static bool wait_until_running(struct hang *h, struct i915_request *rq) -{ - return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq), - rq->fence.seqno), - 10) && - wait_for(i915_seqno_passed(hws_seqno(h, rq), - rq->fence.seqno), - 1000)); -} - -static int igt_hang_sanitycheck(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct i915_request *rq; - struct intel_engine_cs *engine; - enum intel_engine_id id; - struct hang h; - int err; - - /* Basic check that we can execute our hanging batch */ - - mutex_lock(&i915->drm.struct_mutex); - err = hang_init(&h, i915); - if (err) - goto unlock; - - for_each_engine(engine, i915, id) { - struct igt_wedge_me w; - long timeout; - - if (!intel_engine_can_store_dword(engine)) - continue; - - rq = hang_create_request(&h, engine); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - pr_err("Failed to create request for %s, err=%d\n", - engine->name, err); - goto fini; - } - - i915_request_get(rq); - - *h.batch = MI_BATCH_BUFFER_END; - i915_gem_chipset_flush(i915); - - i915_request_add(rq); - - timeout = 0; - igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/) - timeout = i915_request_wait(rq, - I915_WAIT_LOCKED, - MAX_SCHEDULE_TIMEOUT); - if (i915_reset_failed(i915)) - timeout = -EIO; - - i915_request_put(rq); - - if (timeout < 0) { - err = timeout; - pr_err("Wait for request failed on %s, err=%d\n", - engine->name, err); - goto fini; - } - } - -fini: - hang_fini(&h); -unlock: - mutex_unlock(&i915->drm.struct_mutex); - return err; -} - -static int igt_global_reset(void *arg) -{ - struct drm_i915_private *i915 = arg; - unsigned int reset_count; - int err = 0; - - /* Check that we can issue a global GPU reset */ - - igt_global_reset_lock(i915); - - reset_count = i915_reset_count(&i915->gpu_error); - - i915_reset(i915, ALL_ENGINES, NULL); - - if (i915_reset_count(&i915->gpu_error) == reset_count) { - pr_err("No GPU reset recorded!\n"); - err = -EINVAL; - } - - igt_global_reset_unlock(i915); - - if (i915_reset_failed(i915)) - err = -EIO; - - return err; -} - -static int igt_wedged_reset(void *arg) -{ - struct drm_i915_private *i915 = arg; - intel_wakeref_t wakeref; - - /* Check that we can recover a wedged device with a GPU reset */ - - igt_global_reset_lock(i915); - wakeref = intel_runtime_pm_get(i915); - - i915_gem_set_wedged(i915); - - GEM_BUG_ON(!i915_reset_failed(i915)); - i915_reset(i915, ALL_ENGINES, NULL); - - intel_runtime_pm_put(i915, wakeref); - igt_global_reset_unlock(i915); - - return i915_reset_failed(i915) ? -EIO : 0; -} - -static bool wait_for_idle(struct intel_engine_cs *engine) -{ - return wait_for(intel_engine_is_idle(engine), IGT_IDLE_TIMEOUT) == 0; -} - -static int igt_reset_nop(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct intel_engine_cs *engine; - struct i915_gem_context *ctx; - unsigned int reset_count, count; - enum intel_engine_id id; - intel_wakeref_t wakeref; - struct drm_file *file; - IGT_TIMEOUT(end_time); - int err = 0; - - /* Check that we can reset during non-user portions of requests */ - - file = mock_file(i915); - if (IS_ERR(file)) - return PTR_ERR(file); - - mutex_lock(&i915->drm.struct_mutex); - ctx = live_context(i915, file); - mutex_unlock(&i915->drm.struct_mutex); - if (IS_ERR(ctx)) { - err = PTR_ERR(ctx); - goto out; - } - - i915_gem_context_clear_bannable(ctx); - wakeref = intel_runtime_pm_get(i915); - reset_count = i915_reset_count(&i915->gpu_error); - count = 0; - do { - mutex_lock(&i915->drm.struct_mutex); - for_each_engine(engine, i915, id) { - int i; - - for (i = 0; i < 16; i++) { - struct i915_request *rq; - - rq = i915_request_alloc(engine, ctx); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - break; - } - - i915_request_add(rq); - } - } - mutex_unlock(&i915->drm.struct_mutex); - - igt_global_reset_lock(i915); - i915_reset(i915, ALL_ENGINES, NULL); - igt_global_reset_unlock(i915); - if (i915_reset_failed(i915)) { - err = -EIO; - break; - } - - if (i915_reset_count(&i915->gpu_error) != - reset_count + ++count) { - pr_err("Full GPU reset not recorded!\n"); - err = -EINVAL; - break; - } - - if (!i915_reset_flush(i915)) { - struct drm_printer p = - drm_info_printer(i915->drm.dev); - - pr_err("%s failed to idle after reset\n", - engine->name); - intel_engine_dump(engine, &p, - "%s\n", engine->name); - - err = -EIO; - break; - } - - err = igt_flush_test(i915, 0); - if (err) - break; - } while (time_before(jiffies, end_time)); - pr_info("%s: %d resets\n", __func__, count); - - mutex_lock(&i915->drm.struct_mutex); - err = igt_flush_test(i915, I915_WAIT_LOCKED); - mutex_unlock(&i915->drm.struct_mutex); - - intel_runtime_pm_put(i915, wakeref); - -out: - mock_file_free(i915, file); - if (i915_reset_failed(i915)) - err = -EIO; - return err; -} - -static int igt_reset_nop_engine(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct intel_engine_cs *engine; - struct i915_gem_context *ctx; - enum intel_engine_id id; - intel_wakeref_t wakeref; - struct drm_file *file; - int err = 0; - - /* Check that we can engine-reset during non-user portions */ - - if (!intel_has_reset_engine(i915)) - return 0; - - file = mock_file(i915); - if (IS_ERR(file)) - return PTR_ERR(file); - - mutex_lock(&i915->drm.struct_mutex); - ctx = live_context(i915, file); - mutex_unlock(&i915->drm.struct_mutex); - if (IS_ERR(ctx)) { - err = PTR_ERR(ctx); - goto out; - } - - i915_gem_context_clear_bannable(ctx); - wakeref = intel_runtime_pm_get(i915); - for_each_engine(engine, i915, id) { - unsigned int reset_count, reset_engine_count; - unsigned int count; - IGT_TIMEOUT(end_time); - - reset_count = i915_reset_count(&i915->gpu_error); - reset_engine_count = i915_reset_engine_count(&i915->gpu_error, - engine); - count = 0; - - set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); - do { - int i; - - if (!wait_for_idle(engine)) { - pr_err("%s failed to idle before reset\n", - engine->name); - err = -EIO; - break; - } - - mutex_lock(&i915->drm.struct_mutex); - for (i = 0; i < 16; i++) { - struct i915_request *rq; - - rq = i915_request_alloc(engine, ctx); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - break; - } - - i915_request_add(rq); - } - mutex_unlock(&i915->drm.struct_mutex); - - err = i915_reset_engine(engine, NULL); - if (err) { - pr_err("i915_reset_engine failed\n"); - break; - } - - if (i915_reset_count(&i915->gpu_error) != reset_count) { - pr_err("Full GPU reset recorded! (engine reset expected)\n"); - err = -EINVAL; - break; - } - - if (i915_reset_engine_count(&i915->gpu_error, engine) != - reset_engine_count + ++count) { - pr_err("%s engine reset not recorded!\n", - engine->name); - err = -EINVAL; - break; - } - - if (!i915_reset_flush(i915)) { - struct drm_printer p = - drm_info_printer(i915->drm.dev); - - pr_err("%s failed to idle after reset\n", - engine->name); - intel_engine_dump(engine, &p, - "%s\n", engine->name); - - err = -EIO; - break; - } - } while (time_before(jiffies, end_time)); - clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); - pr_info("%s(%s): %d resets\n", __func__, engine->name, count); - - if (err) - break; - - err = igt_flush_test(i915, 0); - if (err) - break; - } - - mutex_lock(&i915->drm.struct_mutex); - err = igt_flush_test(i915, I915_WAIT_LOCKED); - mutex_unlock(&i915->drm.struct_mutex); - - intel_runtime_pm_put(i915, wakeref); -out: - mock_file_free(i915, file); - if (i915_reset_failed(i915)) - err = -EIO; - return err; -} - -static int __igt_reset_engine(struct drm_i915_private *i915, bool active) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - struct hang h; - int err = 0; - - /* Check that we can issue an engine reset on an idle engine (no-op) */ - - if (!intel_has_reset_engine(i915)) - return 0; - - if (active) { - mutex_lock(&i915->drm.struct_mutex); - err = hang_init(&h, i915); - mutex_unlock(&i915->drm.struct_mutex); - if (err) - return err; - } - - for_each_engine(engine, i915, id) { - unsigned int reset_count, reset_engine_count; - IGT_TIMEOUT(end_time); - - if (active && !intel_engine_can_store_dword(engine)) - continue; - - if (!wait_for_idle(engine)) { - pr_err("%s failed to idle before reset\n", - engine->name); - err = -EIO; - break; - } - - reset_count = i915_reset_count(&i915->gpu_error); - reset_engine_count = i915_reset_engine_count(&i915->gpu_error, - engine); - - set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); - do { - if (active) { - struct i915_request *rq; - - mutex_lock(&i915->drm.struct_mutex); - rq = hang_create_request(&h, engine); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - mutex_unlock(&i915->drm.struct_mutex); - break; - } - - i915_request_get(rq); - i915_request_add(rq); - mutex_unlock(&i915->drm.struct_mutex); - - if (!wait_until_running(&h, rq)) { - struct drm_printer p = drm_info_printer(i915->drm.dev); - - pr_err("%s: Failed to start request %llx, at %x\n", - __func__, rq->fence.seqno, hws_seqno(&h, rq)); - intel_engine_dump(engine, &p, - "%s\n", engine->name); - - i915_request_put(rq); - err = -EIO; - break; - } - - i915_request_put(rq); - } - - err = i915_reset_engine(engine, NULL); - if (err) { - pr_err("i915_reset_engine failed\n"); - break; - } - - if (i915_reset_count(&i915->gpu_error) != reset_count) { - pr_err("Full GPU reset recorded! (engine reset expected)\n"); - err = -EINVAL; - break; - } - - if (i915_reset_engine_count(&i915->gpu_error, engine) != - ++reset_engine_count) { - pr_err("%s engine reset not recorded!\n", - engine->name); - err = -EINVAL; - break; - } - - if (!i915_reset_flush(i915)) { - struct drm_printer p = - drm_info_printer(i915->drm.dev); - - pr_err("%s failed to idle after reset\n", - engine->name); - intel_engine_dump(engine, &p, - "%s\n", engine->name); - - err = -EIO; - break; - } - } while (time_before(jiffies, end_time)); - clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); - - if (err) - break; - - err = igt_flush_test(i915, 0); - if (err) - break; - } - - if (i915_reset_failed(i915)) - err = -EIO; - - if (active) { - mutex_lock(&i915->drm.struct_mutex); - hang_fini(&h); - mutex_unlock(&i915->drm.struct_mutex); - } - - return err; -} - -static int igt_reset_idle_engine(void *arg) -{ - return __igt_reset_engine(arg, false); -} - -static int igt_reset_active_engine(void *arg) -{ - return __igt_reset_engine(arg, true); -} - -struct active_engine { - struct task_struct *task; - struct intel_engine_cs *engine; - unsigned long resets; - unsigned int flags; -}; - -#define TEST_ACTIVE BIT(0) -#define TEST_OTHERS BIT(1) -#define TEST_SELF BIT(2) -#define TEST_PRIORITY BIT(3) - -static int active_request_put(struct i915_request *rq) -{ - int err = 0; - - if (!rq) - return 0; - - if (i915_request_wait(rq, 0, 5 * HZ) < 0) { - GEM_TRACE("%s timed out waiting for completion of fence %llx:%lld\n", - rq->engine->name, - rq->fence.context, - rq->fence.seqno); - GEM_TRACE_DUMP(); - - i915_gem_set_wedged(rq->i915); - err = -EIO; - } - - i915_request_put(rq); - - return err; -} - -static int active_engine(void *data) -{ - I915_RND_STATE(prng); - struct active_engine *arg = data; - struct intel_engine_cs *engine = arg->engine; - struct i915_request *rq[8] = {}; - struct i915_gem_context *ctx[ARRAY_SIZE(rq)]; - struct drm_file *file; - unsigned long count = 0; - int err = 0; - - file = mock_file(engine->i915); - if (IS_ERR(file)) - return PTR_ERR(file); - - for (count = 0; count < ARRAY_SIZE(ctx); count++) { - mutex_lock(&engine->i915->drm.struct_mutex); - ctx[count] = live_context(engine->i915, file); - mutex_unlock(&engine->i915->drm.struct_mutex); - if (IS_ERR(ctx[count])) { - err = PTR_ERR(ctx[count]); - while (--count) - i915_gem_context_put(ctx[count]); - goto err_file; - } - } - - while (!kthread_should_stop()) { - unsigned int idx = count++ & (ARRAY_SIZE(rq) - 1); - struct i915_request *old = rq[idx]; - struct i915_request *new; - - mutex_lock(&engine->i915->drm.struct_mutex); - new = i915_request_alloc(engine, ctx[idx]); - if (IS_ERR(new)) { - mutex_unlock(&engine->i915->drm.struct_mutex); - err = PTR_ERR(new); - break; - } - - if (arg->flags & TEST_PRIORITY) - ctx[idx]->sched.priority = - i915_prandom_u32_max_state(512, &prng); - - rq[idx] = i915_request_get(new); - i915_request_add(new); - mutex_unlock(&engine->i915->drm.struct_mutex); - - err = active_request_put(old); - if (err) - break; - - cond_resched(); - } - - for (count = 0; count < ARRAY_SIZE(rq); count++) { - int err__ = active_request_put(rq[count]); - - /* Keep the first error */ - if (!err) - err = err__; - } - -err_file: - mock_file_free(engine->i915, file); - return err; -} - -static int __igt_reset_engines(struct drm_i915_private *i915, - const char *test_name, - unsigned int flags) -{ - struct intel_engine_cs *engine, *other; - enum intel_engine_id id, tmp; - struct hang h; - int err = 0; - - /* Check that issuing a reset on one engine does not interfere - * with any other engine. - */ - - if (!intel_has_reset_engine(i915)) - return 0; - - if (flags & TEST_ACTIVE) { - mutex_lock(&i915->drm.struct_mutex); - err = hang_init(&h, i915); - mutex_unlock(&i915->drm.struct_mutex); - if (err) - return err; - - if (flags & TEST_PRIORITY) - h.ctx->sched.priority = 1024; - } - - for_each_engine(engine, i915, id) { - struct active_engine threads[I915_NUM_ENGINES] = {}; - unsigned long global = i915_reset_count(&i915->gpu_error); - unsigned long count = 0, reported; - IGT_TIMEOUT(end_time); - - if (flags & TEST_ACTIVE && - !intel_engine_can_store_dword(engine)) - continue; - - if (!wait_for_idle(engine)) { - pr_err("i915_reset_engine(%s:%s): failed to idle before reset\n", - engine->name, test_name); - err = -EIO; - break; - } - - memset(threads, 0, sizeof(threads)); - for_each_engine(other, i915, tmp) { - struct task_struct *tsk; - - threads[tmp].resets = - i915_reset_engine_count(&i915->gpu_error, - other); - - if (!(flags & TEST_OTHERS)) - continue; - - if (other == engine && !(flags & TEST_SELF)) - continue; - - threads[tmp].engine = other; - threads[tmp].flags = flags; - - tsk = kthread_run(active_engine, &threads[tmp], - "igt/%s", other->name); - if (IS_ERR(tsk)) { - err = PTR_ERR(tsk); - goto unwind; - } - - threads[tmp].task = tsk; - get_task_struct(tsk); - } - - set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); - do { - struct i915_request *rq = NULL; - - if (flags & TEST_ACTIVE) { - mutex_lock(&i915->drm.struct_mutex); - rq = hang_create_request(&h, engine); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - mutex_unlock(&i915->drm.struct_mutex); - break; - } - - i915_request_get(rq); - i915_request_add(rq); - mutex_unlock(&i915->drm.struct_mutex); - - if (!wait_until_running(&h, rq)) { - struct drm_printer p = drm_info_printer(i915->drm.dev); - - pr_err("%s: Failed to start request %llx, at %x\n", - __func__, rq->fence.seqno, hws_seqno(&h, rq)); - intel_engine_dump(engine, &p, - "%s\n", engine->name); - - i915_request_put(rq); - err = -EIO; - break; - } - } - - err = i915_reset_engine(engine, NULL); - if (err) { - pr_err("i915_reset_engine(%s:%s): failed, err=%d\n", - engine->name, test_name, err); - break; - } - - count++; - - if (rq) { - if (i915_request_wait(rq, 0, HZ / 5) < 0) { - struct drm_printer p = - drm_info_printer(i915->drm.dev); - - pr_err("i915_reset_engine(%s:%s):" - " failed to complete request after reset\n", - engine->name, test_name); - intel_engine_dump(engine, &p, - "%s\n", engine->name); - i915_request_put(rq); - - GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); - err = -EIO; - break; - } - - i915_request_put(rq); - } - - if (!(flags & TEST_SELF) && !wait_for_idle(engine)) { - struct drm_printer p = - drm_info_printer(i915->drm.dev); - - pr_err("i915_reset_engine(%s:%s):" - " failed to idle after reset\n", - engine->name, test_name); - intel_engine_dump(engine, &p, - "%s\n", engine->name); - - err = -EIO; - break; - } - } while (time_before(jiffies, end_time)); - clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); - pr_info("i915_reset_engine(%s:%s): %lu resets\n", - engine->name, test_name, count); - - reported = i915_reset_engine_count(&i915->gpu_error, engine); - reported -= threads[engine->id].resets; - if (reported != count) { - pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu\n", - engine->name, test_name, count, reported); - if (!err) - err = -EINVAL; - } - -unwind: - for_each_engine(other, i915, tmp) { - int ret; - - if (!threads[tmp].task) - continue; - - ret = kthread_stop(threads[tmp].task); - if (ret) { - pr_err("kthread for other engine %s failed, err=%d\n", - other->name, ret); - if (!err) - err = ret; - } - put_task_struct(threads[tmp].task); - - if (other != engine && - threads[tmp].resets != - i915_reset_engine_count(&i915->gpu_error, other)) { - pr_err("Innocent engine %s was reset (count=%ld)\n", - other->name, - i915_reset_engine_count(&i915->gpu_error, - other) - - threads[tmp].resets); - if (!err) - err = -EINVAL; - } - } - - if (global != i915_reset_count(&i915->gpu_error)) { - pr_err("Global reset (count=%ld)!\n", - i915_reset_count(&i915->gpu_error) - global); - if (!err) - err = -EINVAL; - } - - if (err) - break; - - err = igt_flush_test(i915, 0); - if (err) - break; - } - - if (i915_reset_failed(i915)) - err = -EIO; - - if (flags & TEST_ACTIVE) { - mutex_lock(&i915->drm.struct_mutex); - hang_fini(&h); - mutex_unlock(&i915->drm.struct_mutex); - } - - return err; -} - -static int igt_reset_engines(void *arg) -{ - static const struct { - const char *name; - unsigned int flags; - } phases[] = { - { "idle", 0 }, - { "active", TEST_ACTIVE }, - { "others-idle", TEST_OTHERS }, - { "others-active", TEST_OTHERS | TEST_ACTIVE }, - { - "others-priority", - TEST_OTHERS | TEST_ACTIVE | TEST_PRIORITY - }, - { - "self-priority", - TEST_OTHERS | TEST_ACTIVE | TEST_PRIORITY | TEST_SELF, - }, - { } - }; - struct drm_i915_private *i915 = arg; - typeof(*phases) *p; - int err; - - for (p = phases; p->name; p++) { - if (p->flags & TEST_PRIORITY) { - if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) - continue; - } - - err = __igt_reset_engines(arg, p->name, p->flags); - if (err) - return err; - } - - return 0; -} - -static u32 fake_hangcheck(struct drm_i915_private *i915, - intel_engine_mask_t mask) -{ - u32 count = i915_reset_count(&i915->gpu_error); - - i915_reset(i915, mask, NULL); - - return count; -} - -static int igt_reset_wait(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct i915_request *rq; - unsigned int reset_count; - struct hang h; - long timeout; - int err; - - if (!intel_engine_can_store_dword(i915->engine[RCS0])) - return 0; - - /* Check that we detect a stuck waiter and issue a reset */ - - igt_global_reset_lock(i915); - - mutex_lock(&i915->drm.struct_mutex); - err = hang_init(&h, i915); - if (err) - goto unlock; - - rq = hang_create_request(&h, i915->engine[RCS0]); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto fini; - } - - i915_request_get(rq); - i915_request_add(rq); - - if (!wait_until_running(&h, rq)) { - struct drm_printer p = drm_info_printer(i915->drm.dev); - - pr_err("%s: Failed to start request %llx, at %x\n", - __func__, rq->fence.seqno, hws_seqno(&h, rq)); - intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); - - i915_gem_set_wedged(i915); - - err = -EIO; - goto out_rq; - } - - reset_count = fake_hangcheck(i915, ALL_ENGINES); - - timeout = i915_request_wait(rq, I915_WAIT_LOCKED, 10); - if (timeout < 0) { - pr_err("i915_request_wait failed on a stuck request: err=%ld\n", - timeout); - err = timeout; - goto out_rq; - } - - if (i915_reset_count(&i915->gpu_error) == reset_count) { - pr_err("No GPU reset recorded!\n"); - err = -EINVAL; - goto out_rq; - } - -out_rq: - i915_request_put(rq); -fini: - hang_fini(&h); -unlock: - mutex_unlock(&i915->drm.struct_mutex); - igt_global_reset_unlock(i915); - - if (i915_reset_failed(i915)) - return -EIO; - - return err; -} - -struct evict_vma { - struct completion completion; - struct i915_vma *vma; -}; - -static int evict_vma(void *data) -{ - struct evict_vma *arg = data; - struct i915_address_space *vm = arg->vma->vm; - struct drm_i915_private *i915 = vm->i915; - struct drm_mm_node evict = arg->vma->node; - int err; - - complete(&arg->completion); - - mutex_lock(&i915->drm.struct_mutex); - err = i915_gem_evict_for_node(vm, &evict, 0); - mutex_unlock(&i915->drm.struct_mutex); - - return err; -} - -static int evict_fence(void *data) -{ - struct evict_vma *arg = data; - struct drm_i915_private *i915 = arg->vma->vm->i915; - int err; - - complete(&arg->completion); - - mutex_lock(&i915->drm.struct_mutex); - - /* Mark the fence register as dirty to force the mmio update. */ - err = i915_gem_object_set_tiling(arg->vma->obj, I915_TILING_Y, 512); - if (err) { - pr_err("Invalid Y-tiling settings; err:%d\n", err); - goto out_unlock; - } - - err = i915_vma_pin_fence(arg->vma); - if (err) { - pr_err("Unable to pin Y-tiled fence; err:%d\n", err); - goto out_unlock; - } - - i915_vma_unpin_fence(arg->vma); - -out_unlock: - mutex_unlock(&i915->drm.struct_mutex); - - return err; -} - -static int __igt_reset_evict_vma(struct drm_i915_private *i915, - struct i915_address_space *vm, - int (*fn)(void *), - unsigned int flags) -{ - struct drm_i915_gem_object *obj; - struct task_struct *tsk = NULL; - struct i915_request *rq; - struct evict_vma arg; - struct hang h; - int err; - - if (!intel_engine_can_store_dword(i915->engine[RCS0])) - return 0; - - /* Check that we can recover an unbind stuck on a hanging request */ - - mutex_lock(&i915->drm.struct_mutex); - err = hang_init(&h, i915); - if (err) - goto unlock; - - obj = i915_gem_object_create_internal(i915, SZ_1M); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); - goto fini; - } - - if (flags & EXEC_OBJECT_NEEDS_FENCE) { - err = i915_gem_object_set_tiling(obj, I915_TILING_X, 512); - if (err) { - pr_err("Invalid X-tiling settings; err:%d\n", err); - goto out_obj; - } - } - - arg.vma = i915_vma_instance(obj, vm, NULL); - if (IS_ERR(arg.vma)) { - err = PTR_ERR(arg.vma); - goto out_obj; - } - - rq = hang_create_request(&h, i915->engine[RCS0]); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto out_obj; - } - - err = i915_vma_pin(arg.vma, 0, 0, - i915_vma_is_ggtt(arg.vma) ? - PIN_GLOBAL | PIN_MAPPABLE : - PIN_USER); - if (err) { - i915_request_add(rq); - goto out_obj; - } - - if (flags & EXEC_OBJECT_NEEDS_FENCE) { - err = i915_vma_pin_fence(arg.vma); - if (err) { - pr_err("Unable to pin X-tiled fence; err:%d\n", err); - i915_vma_unpin(arg.vma); - i915_request_add(rq); - goto out_obj; - } - } - - err = i915_vma_move_to_active(arg.vma, rq, flags); - - if (flags & EXEC_OBJECT_NEEDS_FENCE) - i915_vma_unpin_fence(arg.vma); - i915_vma_unpin(arg.vma); - - i915_request_get(rq); - i915_request_add(rq); - if (err) - goto out_rq; - - mutex_unlock(&i915->drm.struct_mutex); - - if (!wait_until_running(&h, rq)) { - struct drm_printer p = drm_info_printer(i915->drm.dev); - - pr_err("%s: Failed to start request %llx, at %x\n", - __func__, rq->fence.seqno, hws_seqno(&h, rq)); - intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); - - i915_gem_set_wedged(i915); - goto out_reset; - } - - init_completion(&arg.completion); - - tsk = kthread_run(fn, &arg, "igt/evict_vma"); - if (IS_ERR(tsk)) { - err = PTR_ERR(tsk); - tsk = NULL; - goto out_reset; - } - get_task_struct(tsk); - - wait_for_completion(&arg.completion); - - if (wait_for(!list_empty(&rq->fence.cb_list), 10)) { - struct drm_printer p = drm_info_printer(i915->drm.dev); - - pr_err("igt/evict_vma kthread did not wait\n"); - intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); - - i915_gem_set_wedged(i915); - goto out_reset; - } - -out_reset: - igt_global_reset_lock(i915); - fake_hangcheck(rq->i915, rq->engine->mask); - igt_global_reset_unlock(i915); - - if (tsk) { - struct igt_wedge_me w; - - /* The reset, even indirectly, should take less than 10ms. */ - igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/) - err = kthread_stop(tsk); - - put_task_struct(tsk); - } - - mutex_lock(&i915->drm.struct_mutex); -out_rq: - i915_request_put(rq); -out_obj: - i915_gem_object_put(obj); -fini: - hang_fini(&h); -unlock: - mutex_unlock(&i915->drm.struct_mutex); - - if (i915_reset_failed(i915)) - return -EIO; - - return err; -} - -static int igt_reset_evict_ggtt(void *arg) -{ - struct drm_i915_private *i915 = arg; - - return __igt_reset_evict_vma(i915, &i915->ggtt.vm, - evict_vma, EXEC_OBJECT_WRITE); -} - -static int igt_reset_evict_ppgtt(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct i915_gem_context *ctx; - struct drm_file *file; - int err; - - file = mock_file(i915); - if (IS_ERR(file)) - return PTR_ERR(file); - - mutex_lock(&i915->drm.struct_mutex); - ctx = live_context(i915, file); - mutex_unlock(&i915->drm.struct_mutex); - if (IS_ERR(ctx)) { - err = PTR_ERR(ctx); - goto out; - } - - err = 0; - if (ctx->ppgtt) /* aliasing == global gtt locking, covered above */ - err = __igt_reset_evict_vma(i915, &ctx->ppgtt->vm, - evict_vma, EXEC_OBJECT_WRITE); - -out: - mock_file_free(i915, file); - return err; -} - -static int igt_reset_evict_fence(void *arg) -{ - struct drm_i915_private *i915 = arg; - - return __igt_reset_evict_vma(i915, &i915->ggtt.vm, - evict_fence, EXEC_OBJECT_NEEDS_FENCE); -} - -static int wait_for_others(struct drm_i915_private *i915, - struct intel_engine_cs *exclude) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - - for_each_engine(engine, i915, id) { - if (engine == exclude) - continue; - - if (!wait_for_idle(engine)) - return -EIO; - } - - return 0; -} - -static int igt_reset_queue(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct intel_engine_cs *engine; - enum intel_engine_id id; - struct hang h; - int err; - - /* Check that we replay pending requests following a hang */ - - igt_global_reset_lock(i915); - - mutex_lock(&i915->drm.struct_mutex); - err = hang_init(&h, i915); - if (err) - goto unlock; - - for_each_engine(engine, i915, id) { - struct i915_request *prev; - IGT_TIMEOUT(end_time); - unsigned int count; - - if (!intel_engine_can_store_dword(engine)) - continue; - - prev = hang_create_request(&h, engine); - if (IS_ERR(prev)) { - err = PTR_ERR(prev); - goto fini; - } - - i915_request_get(prev); - i915_request_add(prev); - - count = 0; - do { - struct i915_request *rq; - unsigned int reset_count; - - rq = hang_create_request(&h, engine); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto fini; - } - - i915_request_get(rq); - i915_request_add(rq); - - /* - * XXX We don't handle resetting the kernel context - * very well. If we trigger a device reset twice in - * quick succession while the kernel context is - * executing, we may end up skipping the breadcrumb. - * This is really only a problem for the selftest as - * normally there is a large interlude between resets - * (hangcheck), or we focus on resetting just one - * engine and so avoid repeatedly resetting innocents. - */ - err = wait_for_others(i915, engine); - if (err) { - pr_err("%s(%s): Failed to idle other inactive engines after device reset\n", - __func__, engine->name); - i915_request_put(rq); - i915_request_put(prev); - - GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); - goto fini; - } - - if (!wait_until_running(&h, prev)) { - struct drm_printer p = drm_info_printer(i915->drm.dev); - - pr_err("%s(%s): Failed to start request %llx, at %x\n", - __func__, engine->name, - prev->fence.seqno, hws_seqno(&h, prev)); - intel_engine_dump(engine, &p, - "%s\n", engine->name); - - i915_request_put(rq); - i915_request_put(prev); - - i915_gem_set_wedged(i915); - - err = -EIO; - goto fini; - } - - reset_count = fake_hangcheck(i915, BIT(id)); - - if (prev->fence.error != -EIO) { - pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n", - prev->fence.error); - i915_request_put(rq); - i915_request_put(prev); - err = -EINVAL; - goto fini; - } - - if (rq->fence.error) { - pr_err("Fence error status not zero [%d] after unrelated reset\n", - rq->fence.error); - i915_request_put(rq); - i915_request_put(prev); - err = -EINVAL; - goto fini; - } - - if (i915_reset_count(&i915->gpu_error) == reset_count) { - pr_err("No GPU reset recorded!\n"); - i915_request_put(rq); - i915_request_put(prev); - err = -EINVAL; - goto fini; - } - - i915_request_put(prev); - prev = rq; - count++; - } while (time_before(jiffies, end_time)); - pr_info("%s: Completed %d resets\n", engine->name, count); - - *h.batch = MI_BATCH_BUFFER_END; - i915_gem_chipset_flush(i915); - - i915_request_put(prev); - - err = igt_flush_test(i915, I915_WAIT_LOCKED); - if (err) - break; - } - -fini: - hang_fini(&h); -unlock: - mutex_unlock(&i915->drm.struct_mutex); - igt_global_reset_unlock(i915); - - if (i915_reset_failed(i915)) - return -EIO; - - return err; -} - -static int igt_handle_error(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct intel_engine_cs *engine = i915->engine[RCS0]; - struct hang h; - struct i915_request *rq; - struct i915_gpu_state *error; - int err; - - /* Check that we can issue a global GPU and engine reset */ - - if (!intel_has_reset_engine(i915)) - return 0; - - if (!engine || !intel_engine_can_store_dword(engine)) - return 0; - - mutex_lock(&i915->drm.struct_mutex); - - err = hang_init(&h, i915); - if (err) - goto err_unlock; - - rq = hang_create_request(&h, engine); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto err_fini; - } - - i915_request_get(rq); - i915_request_add(rq); - - if (!wait_until_running(&h, rq)) { - struct drm_printer p = drm_info_printer(i915->drm.dev); - - pr_err("%s: Failed to start request %llx, at %x\n", - __func__, rq->fence.seqno, hws_seqno(&h, rq)); - intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); - - i915_gem_set_wedged(i915); - - err = -EIO; - goto err_request; - } - - mutex_unlock(&i915->drm.struct_mutex); - - /* Temporarily disable error capture */ - error = xchg(&i915->gpu_error.first_error, (void *)-1); - - i915_handle_error(i915, engine->mask, 0, NULL); - - xchg(&i915->gpu_error.first_error, error); - - mutex_lock(&i915->drm.struct_mutex); - - if (rq->fence.error != -EIO) { - pr_err("Guilty request not identified!\n"); - err = -EINVAL; - goto err_request; - } - -err_request: - i915_request_put(rq); -err_fini: - hang_fini(&h); -err_unlock: - mutex_unlock(&i915->drm.struct_mutex); - return err; -} - -static void __preempt_begin(void) -{ - preempt_disable(); -} - -static void __preempt_end(void) -{ - preempt_enable(); -} - -static void __softirq_begin(void) -{ - local_bh_disable(); -} - -static void __softirq_end(void) -{ - local_bh_enable(); -} - -static void __hardirq_begin(void) -{ - local_irq_disable(); -} - -static void __hardirq_end(void) -{ - local_irq_enable(); -} - -struct atomic_section { - const char *name; - void (*critical_section_begin)(void); - void (*critical_section_end)(void); -}; - -static int __igt_atomic_reset_engine(struct intel_engine_cs *engine, - const struct atomic_section *p, - const char *mode) -{ - struct tasklet_struct * const t = &engine->execlists.tasklet; - int err; - - GEM_TRACE("i915_reset_engine(%s:%s) under %s\n", - engine->name, mode, p->name); - - tasklet_disable_nosync(t); - p->critical_section_begin(); - - err = i915_reset_engine(engine, NULL); - - p->critical_section_end(); - tasklet_enable(t); - - if (err) - pr_err("i915_reset_engine(%s:%s) failed under %s\n", - engine->name, mode, p->name); - - return err; -} - -static int igt_atomic_reset_engine(struct intel_engine_cs *engine, - const struct atomic_section *p) -{ - struct drm_i915_private *i915 = engine->i915; - struct i915_request *rq; - struct hang h; - int err; - - err = __igt_atomic_reset_engine(engine, p, "idle"); - if (err) - return err; - - err = hang_init(&h, i915); - if (err) - return err; - - rq = hang_create_request(&h, engine); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto out; - } - - i915_request_get(rq); - i915_request_add(rq); - - if (wait_until_running(&h, rq)) { - err = __igt_atomic_reset_engine(engine, p, "active"); - } else { - pr_err("%s(%s): Failed to start request %llx, at %x\n", - __func__, engine->name, - rq->fence.seqno, hws_seqno(&h, rq)); - i915_gem_set_wedged(i915); - err = -EIO; - } - - if (err == 0) { - struct igt_wedge_me w; - - igt_wedge_on_timeout(&w, i915, HZ / 20 /* 50ms timeout*/) - i915_request_wait(rq, - I915_WAIT_LOCKED, - MAX_SCHEDULE_TIMEOUT); - if (i915_reset_failed(i915)) - err = -EIO; - } - - i915_request_put(rq); -out: - hang_fini(&h); - return err; -} - -static void force_reset(struct drm_i915_private *i915) -{ - i915_gem_set_wedged(i915); - i915_reset(i915, 0, NULL); -} - -static int igt_atomic_reset(void *arg) -{ - static const struct atomic_section phases[] = { - { "preempt", __preempt_begin, __preempt_end }, - { "softirq", __softirq_begin, __softirq_end }, - { "hardirq", __hardirq_begin, __hardirq_end }, - { } - }; - struct drm_i915_private *i915 = arg; - intel_wakeref_t wakeref; - int err = 0; - - /* Check that the resets are usable from atomic context */ - - igt_global_reset_lock(i915); - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); - - /* Flush any requests before we get started and check basics */ - force_reset(i915); - if (i915_reset_failed(i915)) - goto unlock; - - if (intel_has_gpu_reset(i915)) { - const typeof(*phases) *p; - - for (p = phases; p->name; p++) { - GEM_TRACE("intel_gpu_reset under %s\n", p->name); - - p->critical_section_begin(); - err = intel_gpu_reset(i915, ALL_ENGINES); - p->critical_section_end(); - - if (err) { - pr_err("intel_gpu_reset failed under %s\n", - p->name); - goto out; - } - } - - force_reset(i915); - } - - if (USES_GUC_SUBMISSION(i915)) - goto unlock; - - if (intel_has_reset_engine(i915)) { - struct intel_engine_cs *engine; - enum intel_engine_id id; - - for_each_engine(engine, i915, id) { - const typeof(*phases) *p; - - for (p = phases; p->name; p++) { - err = igt_atomic_reset_engine(engine, p); - if (err) - goto out; - } - } - } - -out: - /* As we poke around the guts, do a full reset before continuing. */ - force_reset(i915); - -unlock: - intel_runtime_pm_put(i915, wakeref); - mutex_unlock(&i915->drm.struct_mutex); - igt_global_reset_unlock(i915); - - return err; -} - -int intel_hangcheck_live_selftests(struct drm_i915_private *i915) -{ - static const struct i915_subtest tests[] = { - SUBTEST(igt_global_reset), /* attempt to recover GPU first */ - SUBTEST(igt_wedged_reset), - SUBTEST(igt_hang_sanitycheck), - SUBTEST(igt_reset_nop), - SUBTEST(igt_reset_nop_engine), - SUBTEST(igt_reset_idle_engine), - SUBTEST(igt_reset_active_engine), - SUBTEST(igt_reset_engines), - SUBTEST(igt_reset_queue), - SUBTEST(igt_reset_wait), - SUBTEST(igt_reset_evict_ggtt), - SUBTEST(igt_reset_evict_ppgtt), - SUBTEST(igt_reset_evict_fence), - SUBTEST(igt_handle_error), - SUBTEST(igt_atomic_reset), - }; - intel_wakeref_t wakeref; - bool saved_hangcheck; - int err; - - if (!intel_has_gpu_reset(i915)) - return 0; - - if (i915_terminally_wedged(i915)) - return -EIO; /* we're long past hope of a successful reset */ - - wakeref = intel_runtime_pm_get(i915); - saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck); - drain_delayed_work(&i915->gpu_error.hangcheck_work); /* flush param */ - - err = i915_subtests(tests, i915); - - mutex_lock(&i915->drm.struct_mutex); - igt_flush_test(i915, I915_WAIT_LOCKED); - mutex_unlock(&i915->drm.struct_mutex); - - i915_modparams.enable_hangcheck = saved_hangcheck; - intel_runtime_pm_put(i915, wakeref); - - return err; -} diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c deleted file mode 100644 index fbee030db940..000000000000 --- a/drivers/gpu/drm/i915/selftests/intel_lrc.c +++ /dev/null @@ -1,1326 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation - */ - -#include <linux/prime_numbers.h> - -#include "../i915_reset.h" - -#include "../i915_selftest.h" -#include "igt_flush_test.h" -#include "igt_live_test.h" -#include "igt_spinner.h" -#include "i915_random.h" - -#include "mock_context.h" - -static int live_sanitycheck(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct intel_engine_cs *engine; - struct i915_gem_context *ctx; - enum intel_engine_id id; - struct igt_spinner spin; - intel_wakeref_t wakeref; - int err = -ENOMEM; - - if (!HAS_LOGICAL_RING_CONTEXTS(i915)) - return 0; - - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); - - if (igt_spinner_init(&spin, i915)) - goto err_unlock; - - ctx = kernel_context(i915); - if (!ctx) - goto err_spin; - - for_each_engine(engine, i915, id) { - struct i915_request *rq; - - rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto err_ctx; - } - - i915_request_add(rq); - if (!igt_wait_for_spinner(&spin, rq)) { - GEM_TRACE("spinner failed to start\n"); - GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); - err = -EIO; - goto err_ctx; - } - - igt_spinner_end(&spin); - if (igt_flush_test(i915, I915_WAIT_LOCKED)) { - err = -EIO; - goto err_ctx; - } - } - - err = 0; -err_ctx: - kernel_context_close(ctx); -err_spin: - igt_spinner_fini(&spin); -err_unlock: - igt_flush_test(i915, I915_WAIT_LOCKED); - intel_runtime_pm_put(i915, wakeref); - mutex_unlock(&i915->drm.struct_mutex); - return err; -} - -static int live_busywait_preempt(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct i915_gem_context *ctx_hi, *ctx_lo; - struct intel_engine_cs *engine; - struct drm_i915_gem_object *obj; - struct i915_vma *vma; - enum intel_engine_id id; - intel_wakeref_t wakeref; - int err = -ENOMEM; - u32 *map; - - /* - * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can - * preempt the busywaits used to synchronise between rings. - */ - - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); - - ctx_hi = kernel_context(i915); - if (!ctx_hi) - goto err_unlock; - ctx_hi->sched.priority = INT_MAX; - - ctx_lo = kernel_context(i915); - if (!ctx_lo) - goto err_ctx_hi; - ctx_lo->sched.priority = INT_MIN; - - obj = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); - goto err_ctx_lo; - } - - map = i915_gem_object_pin_map(obj, I915_MAP_WC); - if (IS_ERR(map)) { - err = PTR_ERR(map); - goto err_obj; - } - - vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto err_map; - } - - err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); - if (err) - goto err_map; - - for_each_engine(engine, i915, id) { - struct i915_request *lo, *hi; - struct igt_live_test t; - u32 *cs; - - if (!intel_engine_can_store_dword(engine)) - continue; - - if (igt_live_test_begin(&t, i915, __func__, engine->name)) { - err = -EIO; - goto err_vma; - } - - /* - * We create two requests. The low priority request - * busywaits on a semaphore (inside the ringbuffer where - * is should be preemptible) and the high priority requests - * uses a MI_STORE_DWORD_IMM to update the semaphore value - * allowing the first request to complete. If preemption - * fails, we hang instead. - */ - - lo = i915_request_alloc(engine, ctx_lo); - if (IS_ERR(lo)) { - err = PTR_ERR(lo); - goto err_vma; - } - - cs = intel_ring_begin(lo, 8); - if (IS_ERR(cs)) { - err = PTR_ERR(cs); - i915_request_add(lo); - goto err_vma; - } - - *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; - *cs++ = i915_ggtt_offset(vma); - *cs++ = 0; - *cs++ = 1; - - /* XXX Do we need a flush + invalidate here? */ - - *cs++ = MI_SEMAPHORE_WAIT | - MI_SEMAPHORE_GLOBAL_GTT | - MI_SEMAPHORE_POLL | - MI_SEMAPHORE_SAD_EQ_SDD; - *cs++ = 0; - *cs++ = i915_ggtt_offset(vma); - *cs++ = 0; - - intel_ring_advance(lo, cs); - i915_request_add(lo); - - if (wait_for(READ_ONCE(*map), 10)) { - err = -ETIMEDOUT; - goto err_vma; - } - - /* Low priority request should be busywaiting now */ - if (i915_request_wait(lo, I915_WAIT_LOCKED, 1) != -ETIME) { - pr_err("%s: Busywaiting request did not!\n", - engine->name); - err = -EIO; - goto err_vma; - } - - hi = i915_request_alloc(engine, ctx_hi); - if (IS_ERR(hi)) { - err = PTR_ERR(hi); - goto err_vma; - } - - cs = intel_ring_begin(hi, 4); - if (IS_ERR(cs)) { - err = PTR_ERR(cs); - i915_request_add(hi); - goto err_vma; - } - - *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; - *cs++ = i915_ggtt_offset(vma); - *cs++ = 0; - *cs++ = 0; - - intel_ring_advance(hi, cs); - i915_request_add(hi); - - if (i915_request_wait(lo, I915_WAIT_LOCKED, HZ / 5) < 0) { - struct drm_printer p = drm_info_printer(i915->drm.dev); - - pr_err("%s: Failed to preempt semaphore busywait!\n", - engine->name); - - intel_engine_dump(engine, &p, "%s\n", engine->name); - GEM_TRACE_DUMP(); - - i915_gem_set_wedged(i915); - err = -EIO; - goto err_vma; - } - GEM_BUG_ON(READ_ONCE(*map)); - - if (igt_live_test_end(&t)) { - err = -EIO; - goto err_vma; - } - } - - err = 0; -err_vma: - i915_vma_unpin(vma); -err_map: - i915_gem_object_unpin_map(obj); -err_obj: - i915_gem_object_put(obj); -err_ctx_lo: - kernel_context_close(ctx_lo); -err_ctx_hi: - kernel_context_close(ctx_hi); -err_unlock: - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - err = -EIO; - intel_runtime_pm_put(i915, wakeref); - mutex_unlock(&i915->drm.struct_mutex); - return err; -} - -static int live_preempt(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct i915_gem_context *ctx_hi, *ctx_lo; - struct igt_spinner spin_hi, spin_lo; - struct intel_engine_cs *engine; - enum intel_engine_id id; - intel_wakeref_t wakeref; - int err = -ENOMEM; - - if (!HAS_LOGICAL_RING_PREEMPTION(i915)) - return 0; - - if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) - pr_err("Logical preemption supported, but not exposed\n"); - - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); - - if (igt_spinner_init(&spin_hi, i915)) - goto err_unlock; - - if (igt_spinner_init(&spin_lo, i915)) - goto err_spin_hi; - - ctx_hi = kernel_context(i915); - if (!ctx_hi) - goto err_spin_lo; - ctx_hi->sched.priority = - I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY); - - ctx_lo = kernel_context(i915); - if (!ctx_lo) - goto err_ctx_hi; - ctx_lo->sched.priority = - I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY); - - for_each_engine(engine, i915, id) { - struct igt_live_test t; - struct i915_request *rq; - - if (!intel_engine_has_preemption(engine)) - continue; - - if (igt_live_test_begin(&t, i915, __func__, engine->name)) { - err = -EIO; - goto err_ctx_lo; - } - - rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine, - MI_ARB_CHECK); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto err_ctx_lo; - } - - i915_request_add(rq); - if (!igt_wait_for_spinner(&spin_lo, rq)) { - GEM_TRACE("lo spinner failed to start\n"); - GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); - err = -EIO; - goto err_ctx_lo; - } - - rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine, - MI_ARB_CHECK); - if (IS_ERR(rq)) { - igt_spinner_end(&spin_lo); - err = PTR_ERR(rq); - goto err_ctx_lo; - } - - i915_request_add(rq); - if (!igt_wait_for_spinner(&spin_hi, rq)) { - GEM_TRACE("hi spinner failed to start\n"); - GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); - err = -EIO; - goto err_ctx_lo; - } - - igt_spinner_end(&spin_hi); - igt_spinner_end(&spin_lo); - - if (igt_live_test_end(&t)) { - err = -EIO; - goto err_ctx_lo; - } - } - - err = 0; -err_ctx_lo: - kernel_context_close(ctx_lo); -err_ctx_hi: - kernel_context_close(ctx_hi); -err_spin_lo: - igt_spinner_fini(&spin_lo); -err_spin_hi: - igt_spinner_fini(&spin_hi); -err_unlock: - igt_flush_test(i915, I915_WAIT_LOCKED); - intel_runtime_pm_put(i915, wakeref); - mutex_unlock(&i915->drm.struct_mutex); - return err; -} - -static int live_late_preempt(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct i915_gem_context *ctx_hi, *ctx_lo; - struct igt_spinner spin_hi, spin_lo; - struct intel_engine_cs *engine; - struct i915_sched_attr attr = {}; - enum intel_engine_id id; - intel_wakeref_t wakeref; - int err = -ENOMEM; - - if (!HAS_LOGICAL_RING_PREEMPTION(i915)) - return 0; - - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); - - if (igt_spinner_init(&spin_hi, i915)) - goto err_unlock; - - if (igt_spinner_init(&spin_lo, i915)) - goto err_spin_hi; - - ctx_hi = kernel_context(i915); - if (!ctx_hi) - goto err_spin_lo; - - ctx_lo = kernel_context(i915); - if (!ctx_lo) - goto err_ctx_hi; - - for_each_engine(engine, i915, id) { - struct igt_live_test t; - struct i915_request *rq; - - if (!intel_engine_has_preemption(engine)) - continue; - - if (igt_live_test_begin(&t, i915, __func__, engine->name)) { - err = -EIO; - goto err_ctx_lo; - } - - rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine, - MI_ARB_CHECK); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto err_ctx_lo; - } - - i915_request_add(rq); - if (!igt_wait_for_spinner(&spin_lo, rq)) { - pr_err("First context failed to start\n"); - goto err_wedged; - } - - rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine, - MI_NOOP); - if (IS_ERR(rq)) { - igt_spinner_end(&spin_lo); - err = PTR_ERR(rq); - goto err_ctx_lo; - } - - i915_request_add(rq); - if (igt_wait_for_spinner(&spin_hi, rq)) { - pr_err("Second context overtook first?\n"); - goto err_wedged; - } - - attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX); - engine->schedule(rq, &attr); - - if (!igt_wait_for_spinner(&spin_hi, rq)) { - pr_err("High priority context failed to preempt the low priority context\n"); - GEM_TRACE_DUMP(); - goto err_wedged; - } - - igt_spinner_end(&spin_hi); - igt_spinner_end(&spin_lo); - - if (igt_live_test_end(&t)) { - err = -EIO; - goto err_ctx_lo; - } - } - - err = 0; -err_ctx_lo: - kernel_context_close(ctx_lo); -err_ctx_hi: - kernel_context_close(ctx_hi); -err_spin_lo: - igt_spinner_fini(&spin_lo); -err_spin_hi: - igt_spinner_fini(&spin_hi); -err_unlock: - igt_flush_test(i915, I915_WAIT_LOCKED); - intel_runtime_pm_put(i915, wakeref); - mutex_unlock(&i915->drm.struct_mutex); - return err; - -err_wedged: - igt_spinner_end(&spin_hi); - igt_spinner_end(&spin_lo); - i915_gem_set_wedged(i915); - err = -EIO; - goto err_ctx_lo; -} - -struct preempt_client { - struct igt_spinner spin; - struct i915_gem_context *ctx; -}; - -static int preempt_client_init(struct drm_i915_private *i915, - struct preempt_client *c) -{ - c->ctx = kernel_context(i915); - if (!c->ctx) - return -ENOMEM; - - if (igt_spinner_init(&c->spin, i915)) - goto err_ctx; - - return 0; - -err_ctx: - kernel_context_close(c->ctx); - return -ENOMEM; -} - -static void preempt_client_fini(struct preempt_client *c) -{ - igt_spinner_fini(&c->spin); - kernel_context_close(c->ctx); -} - -static int live_suppress_self_preempt(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct intel_engine_cs *engine; - struct i915_sched_attr attr = { - .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX) - }; - struct preempt_client a, b; - enum intel_engine_id id; - intel_wakeref_t wakeref; - int err = -ENOMEM; - - /* - * Verify that if a preemption request does not cause a change in - * the current execution order, the preempt-to-idle injection is - * skipped and that we do not accidentally apply it after the CS - * completion event. - */ - - if (!HAS_LOGICAL_RING_PREEMPTION(i915)) - return 0; - - if (USES_GUC_SUBMISSION(i915)) - return 0; /* presume black blox */ - - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); - - if (preempt_client_init(i915, &a)) - goto err_unlock; - if (preempt_client_init(i915, &b)) - goto err_client_a; - - for_each_engine(engine, i915, id) { - struct i915_request *rq_a, *rq_b; - int depth; - - if (!intel_engine_has_preemption(engine)) - continue; - - engine->execlists.preempt_hang.count = 0; - - rq_a = igt_spinner_create_request(&a.spin, - a.ctx, engine, - MI_NOOP); - if (IS_ERR(rq_a)) { - err = PTR_ERR(rq_a); - goto err_client_b; - } - - i915_request_add(rq_a); - if (!igt_wait_for_spinner(&a.spin, rq_a)) { - pr_err("First client failed to start\n"); - goto err_wedged; - } - - for (depth = 0; depth < 8; depth++) { - rq_b = igt_spinner_create_request(&b.spin, - b.ctx, engine, - MI_NOOP); - if (IS_ERR(rq_b)) { - err = PTR_ERR(rq_b); - goto err_client_b; - } - i915_request_add(rq_b); - - GEM_BUG_ON(i915_request_completed(rq_a)); - engine->schedule(rq_a, &attr); - igt_spinner_end(&a.spin); - - if (!igt_wait_for_spinner(&b.spin, rq_b)) { - pr_err("Second client failed to start\n"); - goto err_wedged; - } - - swap(a, b); - rq_a = rq_b; - } - igt_spinner_end(&a.spin); - - if (engine->execlists.preempt_hang.count) { - pr_err("Preemption recorded x%d, depth %d; should have been suppressed!\n", - engine->execlists.preempt_hang.count, - depth); - err = -EINVAL; - goto err_client_b; - } - - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - goto err_wedged; - } - - err = 0; -err_client_b: - preempt_client_fini(&b); -err_client_a: - preempt_client_fini(&a); -err_unlock: - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - err = -EIO; - intel_runtime_pm_put(i915, wakeref); - mutex_unlock(&i915->drm.struct_mutex); - return err; - -err_wedged: - igt_spinner_end(&b.spin); - igt_spinner_end(&a.spin); - i915_gem_set_wedged(i915); - err = -EIO; - goto err_client_b; -} - -static int __i915_sw_fence_call -dummy_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) -{ - return NOTIFY_DONE; -} - -static struct i915_request *dummy_request(struct intel_engine_cs *engine) -{ - struct i915_request *rq; - - rq = kzalloc(sizeof(*rq), GFP_KERNEL); - if (!rq) - return NULL; - - INIT_LIST_HEAD(&rq->active_list); - rq->engine = engine; - - i915_sched_node_init(&rq->sched); - - /* mark this request as permanently incomplete */ - rq->fence.seqno = 1; - BUILD_BUG_ON(sizeof(rq->fence.seqno) != 8); /* upper 32b == 0 */ - rq->hwsp_seqno = (u32 *)&rq->fence.seqno + 1; - GEM_BUG_ON(i915_request_completed(rq)); - - i915_sw_fence_init(&rq->submit, dummy_notify); - i915_sw_fence_commit(&rq->submit); - - return rq; -} - -static void dummy_request_free(struct i915_request *dummy) -{ - i915_request_mark_complete(dummy); - i915_sched_node_fini(&dummy->sched); - i915_sw_fence_fini(&dummy->submit); - - dma_fence_free(&dummy->fence); -} - -static int live_suppress_wait_preempt(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct preempt_client client[4]; - struct intel_engine_cs *engine; - enum intel_engine_id id; - intel_wakeref_t wakeref; - int err = -ENOMEM; - int i; - - /* - * Waiters are given a little priority nudge, but not enough - * to actually cause any preemption. Double check that we do - * not needlessly generate preempt-to-idle cycles. - */ - - if (!HAS_LOGICAL_RING_PREEMPTION(i915)) - return 0; - - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); - - if (preempt_client_init(i915, &client[0])) /* ELSP[0] */ - goto err_unlock; - if (preempt_client_init(i915, &client[1])) /* ELSP[1] */ - goto err_client_0; - if (preempt_client_init(i915, &client[2])) /* head of queue */ - goto err_client_1; - if (preempt_client_init(i915, &client[3])) /* bystander */ - goto err_client_2; - - for_each_engine(engine, i915, id) { - int depth; - - if (!intel_engine_has_preemption(engine)) - continue; - - if (!engine->emit_init_breadcrumb) - continue; - - for (depth = 0; depth < ARRAY_SIZE(client); depth++) { - struct i915_request *rq[ARRAY_SIZE(client)]; - struct i915_request *dummy; - - engine->execlists.preempt_hang.count = 0; - - dummy = dummy_request(engine); - if (!dummy) - goto err_client_3; - - for (i = 0; i < ARRAY_SIZE(client); i++) { - rq[i] = igt_spinner_create_request(&client[i].spin, - client[i].ctx, engine, - MI_NOOP); - if (IS_ERR(rq[i])) { - err = PTR_ERR(rq[i]); - goto err_wedged; - } - - /* Disable NEWCLIENT promotion */ - __i915_active_request_set(&rq[i]->timeline->last_request, - dummy); - i915_request_add(rq[i]); - } - - dummy_request_free(dummy); - - GEM_BUG_ON(i915_request_completed(rq[0])); - if (!igt_wait_for_spinner(&client[0].spin, rq[0])) { - pr_err("%s: First client failed to start\n", - engine->name); - goto err_wedged; - } - GEM_BUG_ON(!i915_request_started(rq[0])); - - if (i915_request_wait(rq[depth], - I915_WAIT_LOCKED | - I915_WAIT_PRIORITY, - 1) != -ETIME) { - pr_err("%s: Waiter depth:%d completed!\n", - engine->name, depth); - goto err_wedged; - } - - for (i = 0; i < ARRAY_SIZE(client); i++) - igt_spinner_end(&client[i].spin); - - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - goto err_wedged; - - if (engine->execlists.preempt_hang.count) { - pr_err("%s: Preemption recorded x%d, depth %d; should have been suppressed!\n", - engine->name, - engine->execlists.preempt_hang.count, - depth); - err = -EINVAL; - goto err_client_3; - } - } - } - - err = 0; -err_client_3: - preempt_client_fini(&client[3]); -err_client_2: - preempt_client_fini(&client[2]); -err_client_1: - preempt_client_fini(&client[1]); -err_client_0: - preempt_client_fini(&client[0]); -err_unlock: - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - err = -EIO; - intel_runtime_pm_put(i915, wakeref); - mutex_unlock(&i915->drm.struct_mutex); - return err; - -err_wedged: - for (i = 0; i < ARRAY_SIZE(client); i++) - igt_spinner_end(&client[i].spin); - i915_gem_set_wedged(i915); - err = -EIO; - goto err_client_3; -} - -static int live_chain_preempt(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct intel_engine_cs *engine; - struct preempt_client hi, lo; - enum intel_engine_id id; - intel_wakeref_t wakeref; - int err = -ENOMEM; - - /* - * Build a chain AB...BA between two contexts (A, B) and request - * preemption of the last request. It should then complete before - * the previously submitted spinner in B. - */ - - if (!HAS_LOGICAL_RING_PREEMPTION(i915)) - return 0; - - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); - - if (preempt_client_init(i915, &hi)) - goto err_unlock; - - if (preempt_client_init(i915, &lo)) - goto err_client_hi; - - for_each_engine(engine, i915, id) { - struct i915_sched_attr attr = { - .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX), - }; - struct igt_live_test t; - struct i915_request *rq; - int ring_size, count, i; - - if (!intel_engine_has_preemption(engine)) - continue; - - rq = igt_spinner_create_request(&lo.spin, - lo.ctx, engine, - MI_ARB_CHECK); - if (IS_ERR(rq)) - goto err_wedged; - i915_request_add(rq); - - ring_size = rq->wa_tail - rq->head; - if (ring_size < 0) - ring_size += rq->ring->size; - ring_size = rq->ring->size / ring_size; - pr_debug("%s(%s): Using maximum of %d requests\n", - __func__, engine->name, ring_size); - - igt_spinner_end(&lo.spin); - if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 2) < 0) { - pr_err("Timed out waiting to flush %s\n", engine->name); - goto err_wedged; - } - - if (igt_live_test_begin(&t, i915, __func__, engine->name)) { - err = -EIO; - goto err_wedged; - } - - for_each_prime_number_from(count, 1, ring_size) { - rq = igt_spinner_create_request(&hi.spin, - hi.ctx, engine, - MI_ARB_CHECK); - if (IS_ERR(rq)) - goto err_wedged; - i915_request_add(rq); - if (!igt_wait_for_spinner(&hi.spin, rq)) - goto err_wedged; - - rq = igt_spinner_create_request(&lo.spin, - lo.ctx, engine, - MI_ARB_CHECK); - if (IS_ERR(rq)) - goto err_wedged; - i915_request_add(rq); - - for (i = 0; i < count; i++) { - rq = i915_request_alloc(engine, lo.ctx); - if (IS_ERR(rq)) - goto err_wedged; - i915_request_add(rq); - } - - rq = i915_request_alloc(engine, hi.ctx); - if (IS_ERR(rq)) - goto err_wedged; - i915_request_add(rq); - engine->schedule(rq, &attr); - - igt_spinner_end(&hi.spin); - if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) { - struct drm_printer p = - drm_info_printer(i915->drm.dev); - - pr_err("Failed to preempt over chain of %d\n", - count); - intel_engine_dump(engine, &p, - "%s\n", engine->name); - goto err_wedged; - } - igt_spinner_end(&lo.spin); - - rq = i915_request_alloc(engine, lo.ctx); - if (IS_ERR(rq)) - goto err_wedged; - i915_request_add(rq); - if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) { - struct drm_printer p = - drm_info_printer(i915->drm.dev); - - pr_err("Failed to flush low priority chain of %d requests\n", - count); - intel_engine_dump(engine, &p, - "%s\n", engine->name); - goto err_wedged; - } - } - - if (igt_live_test_end(&t)) { - err = -EIO; - goto err_wedged; - } - } - - err = 0; -err_client_lo: - preempt_client_fini(&lo); -err_client_hi: - preempt_client_fini(&hi); -err_unlock: - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - err = -EIO; - intel_runtime_pm_put(i915, wakeref); - mutex_unlock(&i915->drm.struct_mutex); - return err; - -err_wedged: - igt_spinner_end(&hi.spin); - igt_spinner_end(&lo.spin); - i915_gem_set_wedged(i915); - err = -EIO; - goto err_client_lo; -} - -static int live_preempt_hang(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct i915_gem_context *ctx_hi, *ctx_lo; - struct igt_spinner spin_hi, spin_lo; - struct intel_engine_cs *engine; - enum intel_engine_id id; - intel_wakeref_t wakeref; - int err = -ENOMEM; - - if (!HAS_LOGICAL_RING_PREEMPTION(i915)) - return 0; - - if (!intel_has_reset_engine(i915)) - return 0; - - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); - - if (igt_spinner_init(&spin_hi, i915)) - goto err_unlock; - - if (igt_spinner_init(&spin_lo, i915)) - goto err_spin_hi; - - ctx_hi = kernel_context(i915); - if (!ctx_hi) - goto err_spin_lo; - ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY; - - ctx_lo = kernel_context(i915); - if (!ctx_lo) - goto err_ctx_hi; - ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY; - - for_each_engine(engine, i915, id) { - struct i915_request *rq; - - if (!intel_engine_has_preemption(engine)) - continue; - - rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine, - MI_ARB_CHECK); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto err_ctx_lo; - } - - i915_request_add(rq); - if (!igt_wait_for_spinner(&spin_lo, rq)) { - GEM_TRACE("lo spinner failed to start\n"); - GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); - err = -EIO; - goto err_ctx_lo; - } - - rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine, - MI_ARB_CHECK); - if (IS_ERR(rq)) { - igt_spinner_end(&spin_lo); - err = PTR_ERR(rq); - goto err_ctx_lo; - } - - init_completion(&engine->execlists.preempt_hang.completion); - engine->execlists.preempt_hang.inject_hang = true; - - i915_request_add(rq); - - if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion, - HZ / 10)) { - pr_err("Preemption did not occur within timeout!"); - GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); - err = -EIO; - goto err_ctx_lo; - } - - set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); - i915_reset_engine(engine, NULL); - clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); - - engine->execlists.preempt_hang.inject_hang = false; - - if (!igt_wait_for_spinner(&spin_hi, rq)) { - GEM_TRACE("hi spinner failed to start\n"); - GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); - err = -EIO; - goto err_ctx_lo; - } - - igt_spinner_end(&spin_hi); - igt_spinner_end(&spin_lo); - if (igt_flush_test(i915, I915_WAIT_LOCKED)) { - err = -EIO; - goto err_ctx_lo; - } - } - - err = 0; -err_ctx_lo: - kernel_context_close(ctx_lo); -err_ctx_hi: - kernel_context_close(ctx_hi); -err_spin_lo: - igt_spinner_fini(&spin_lo); -err_spin_hi: - igt_spinner_fini(&spin_hi); -err_unlock: - igt_flush_test(i915, I915_WAIT_LOCKED); - intel_runtime_pm_put(i915, wakeref); - mutex_unlock(&i915->drm.struct_mutex); - return err; -} - -static int random_range(struct rnd_state *rnd, int min, int max) -{ - return i915_prandom_u32_max_state(max - min, rnd) + min; -} - -static int random_priority(struct rnd_state *rnd) -{ - return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX); -} - -struct preempt_smoke { - struct drm_i915_private *i915; - struct i915_gem_context **contexts; - struct intel_engine_cs *engine; - struct drm_i915_gem_object *batch; - unsigned int ncontext; - struct rnd_state prng; - unsigned long count; -}; - -static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke) -{ - return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext, - &smoke->prng)]; -} - -static int smoke_submit(struct preempt_smoke *smoke, - struct i915_gem_context *ctx, int prio, - struct drm_i915_gem_object *batch) -{ - struct i915_request *rq; - struct i915_vma *vma = NULL; - int err = 0; - - if (batch) { - vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL); - if (IS_ERR(vma)) - return PTR_ERR(vma); - - err = i915_vma_pin(vma, 0, 0, PIN_USER); - if (err) - return err; - } - - ctx->sched.priority = prio; - - rq = i915_request_alloc(smoke->engine, ctx); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto unpin; - } - - if (vma) { - err = rq->engine->emit_bb_start(rq, - vma->node.start, - PAGE_SIZE, 0); - if (!err) - err = i915_vma_move_to_active(vma, rq, 0); - } - - i915_request_add(rq); - -unpin: - if (vma) - i915_vma_unpin(vma); - - return err; -} - -static int smoke_crescendo_thread(void *arg) -{ - struct preempt_smoke *smoke = arg; - IGT_TIMEOUT(end_time); - unsigned long count; - - count = 0; - do { - struct i915_gem_context *ctx = smoke_context(smoke); - int err; - - mutex_lock(&smoke->i915->drm.struct_mutex); - err = smoke_submit(smoke, - ctx, count % I915_PRIORITY_MAX, - smoke->batch); - mutex_unlock(&smoke->i915->drm.struct_mutex); - if (err) - return err; - - count++; - } while (!__igt_timeout(end_time, NULL)); - - smoke->count = count; - return 0; -} - -static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags) -#define BATCH BIT(0) -{ - struct task_struct *tsk[I915_NUM_ENGINES] = {}; - struct preempt_smoke arg[I915_NUM_ENGINES]; - struct intel_engine_cs *engine; - enum intel_engine_id id; - unsigned long count; - int err = 0; - - mutex_unlock(&smoke->i915->drm.struct_mutex); - - for_each_engine(engine, smoke->i915, id) { - arg[id] = *smoke; - arg[id].engine = engine; - if (!(flags & BATCH)) - arg[id].batch = NULL; - arg[id].count = 0; - - tsk[id] = kthread_run(smoke_crescendo_thread, &arg, - "igt/smoke:%d", id); - if (IS_ERR(tsk[id])) { - err = PTR_ERR(tsk[id]); - break; - } - get_task_struct(tsk[id]); - } - - count = 0; - for_each_engine(engine, smoke->i915, id) { - int status; - - if (IS_ERR_OR_NULL(tsk[id])) - continue; - - status = kthread_stop(tsk[id]); - if (status && !err) - err = status; - - count += arg[id].count; - - put_task_struct(tsk[id]); - } - - mutex_lock(&smoke->i915->drm.struct_mutex); - - pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n", - count, flags, - RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext); - return 0; -} - -static int smoke_random(struct preempt_smoke *smoke, unsigned int flags) -{ - enum intel_engine_id id; - IGT_TIMEOUT(end_time); - unsigned long count; - - count = 0; - do { - for_each_engine(smoke->engine, smoke->i915, id) { - struct i915_gem_context *ctx = smoke_context(smoke); - int err; - - err = smoke_submit(smoke, - ctx, random_priority(&smoke->prng), - flags & BATCH ? smoke->batch : NULL); - if (err) - return err; - - count++; - } - } while (!__igt_timeout(end_time, NULL)); - - pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n", - count, flags, - RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext); - return 0; -} - -static int live_preempt_smoke(void *arg) -{ - struct preempt_smoke smoke = { - .i915 = arg, - .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed), - .ncontext = 1024, - }; - const unsigned int phase[] = { 0, BATCH }; - intel_wakeref_t wakeref; - struct igt_live_test t; - int err = -ENOMEM; - u32 *cs; - int n; - - if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915)) - return 0; - - smoke.contexts = kmalloc_array(smoke.ncontext, - sizeof(*smoke.contexts), - GFP_KERNEL); - if (!smoke.contexts) - return -ENOMEM; - - mutex_lock(&smoke.i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(smoke.i915); - - smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE); - if (IS_ERR(smoke.batch)) { - err = PTR_ERR(smoke.batch); - goto err_unlock; - } - - cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB); - if (IS_ERR(cs)) { - err = PTR_ERR(cs); - goto err_batch; - } - for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++) - cs[n] = MI_ARB_CHECK; - cs[n] = MI_BATCH_BUFFER_END; - i915_gem_object_flush_map(smoke.batch); - i915_gem_object_unpin_map(smoke.batch); - - if (igt_live_test_begin(&t, smoke.i915, __func__, "all")) { - err = -EIO; - goto err_batch; - } - - for (n = 0; n < smoke.ncontext; n++) { - smoke.contexts[n] = kernel_context(smoke.i915); - if (!smoke.contexts[n]) - goto err_ctx; - } - - for (n = 0; n < ARRAY_SIZE(phase); n++) { - err = smoke_crescendo(&smoke, phase[n]); - if (err) - goto err_ctx; - - err = smoke_random(&smoke, phase[n]); - if (err) - goto err_ctx; - } - -err_ctx: - if (igt_live_test_end(&t)) - err = -EIO; - - for (n = 0; n < smoke.ncontext; n++) { - if (!smoke.contexts[n]) - break; - kernel_context_close(smoke.contexts[n]); - } - -err_batch: - i915_gem_object_put(smoke.batch); -err_unlock: - intel_runtime_pm_put(smoke.i915, wakeref); - mutex_unlock(&smoke.i915->drm.struct_mutex); - kfree(smoke.contexts); - - return err; -} - -int intel_execlists_live_selftests(struct drm_i915_private *i915) -{ - static const struct i915_subtest tests[] = { - SUBTEST(live_sanitycheck), - SUBTEST(live_busywait_preempt), - SUBTEST(live_preempt), - SUBTEST(live_late_preempt), - SUBTEST(live_suppress_self_preempt), - SUBTEST(live_suppress_wait_preempt), - SUBTEST(live_chain_preempt), - SUBTEST(live_preempt_hang), - SUBTEST(live_preempt_smoke), - }; - - if (!HAS_EXECLISTS(i915)) - return 0; - - if (i915_terminally_wedged(i915)) - return 0; - - return i915_subtests(tests, i915); -} diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c deleted file mode 100644 index aa841e4d3031..000000000000 --- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c +++ /dev/null @@ -1,1172 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation - */ - -#include "../i915_selftest.h" -#include "../i915_reset.h" - -#include "igt_flush_test.h" -#include "igt_reset.h" -#include "igt_spinner.h" -#include "igt_wedge_me.h" -#include "mock_context.h" -#include "mock_drm.h" - -static const struct wo_register { - enum intel_platform platform; - u32 reg; -} wo_registers[] = { - { INTEL_GEMINILAKE, 0x731c } -}; - -#define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 4) -struct wa_lists { - struct i915_wa_list gt_wa_list; - struct { - char name[REF_NAME_MAX]; - struct i915_wa_list wa_list; - } engine[I915_NUM_ENGINES]; -}; - -static void -reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - - memset(lists, 0, sizeof(*lists)); - - wa_init_start(&lists->gt_wa_list, "GT_REF"); - gt_init_workarounds(i915, &lists->gt_wa_list); - wa_init_finish(&lists->gt_wa_list); - - for_each_engine(engine, i915, id) { - struct i915_wa_list *wal = &lists->engine[id].wa_list; - char *name = lists->engine[id].name; - - snprintf(name, REF_NAME_MAX, "%s_REF", engine->name); - - wa_init_start(wal, name); - engine_init_workarounds(engine, wal); - wa_init_finish(wal); - } -} - -static void -reference_lists_fini(struct drm_i915_private *i915, struct wa_lists *lists) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - - for_each_engine(engine, i915, id) - intel_wa_list_free(&lists->engine[id].wa_list); - - intel_wa_list_free(&lists->gt_wa_list); -} - -static struct drm_i915_gem_object * -read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) -{ - const u32 base = engine->mmio_base; - struct drm_i915_gem_object *result; - intel_wakeref_t wakeref; - struct i915_request *rq; - struct i915_vma *vma; - u32 srm, *cs; - int err; - int i; - - result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE); - if (IS_ERR(result)) - return result; - - i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC); - - cs = i915_gem_object_pin_map(result, I915_MAP_WB); - if (IS_ERR(cs)) { - err = PTR_ERR(cs); - goto err_obj; - } - memset(cs, 0xc5, PAGE_SIZE); - i915_gem_object_flush_map(result); - i915_gem_object_unpin_map(result); - - vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto err_obj; - } - - err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); - if (err) - goto err_obj; - - rq = ERR_PTR(-ENODEV); - with_intel_runtime_pm(engine->i915, wakeref) - rq = i915_request_alloc(engine, ctx); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto err_pin; - } - - err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); - if (err) - goto err_req; - - srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; - if (INTEL_GEN(ctx->i915) >= 8) - srm++; - - cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS); - if (IS_ERR(cs)) { - err = PTR_ERR(cs); - goto err_req; - } - - for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) { - *cs++ = srm; - *cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i)); - *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i; - *cs++ = 0; - } - intel_ring_advance(rq, cs); - - i915_gem_object_get(result); - i915_gem_object_set_active_reference(result); - - i915_request_add(rq); - i915_vma_unpin(vma); - - return result; - -err_req: - i915_request_add(rq); -err_pin: - i915_vma_unpin(vma); -err_obj: - i915_gem_object_put(result); - return ERR_PTR(err); -} - -static u32 -get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i) -{ - i915_reg_t reg = i < engine->whitelist.count ? - engine->whitelist.list[i].reg : - RING_NOPID(engine->mmio_base); - - return i915_mmio_reg_offset(reg); -} - -static void -print_results(const struct intel_engine_cs *engine, const u32 *results) -{ - unsigned int i; - - for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) { - u32 expected = get_whitelist_reg(engine, i); - u32 actual = results[i]; - - pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n", - i, expected, actual); - } -} - -static int check_whitelist(struct i915_gem_context *ctx, - struct intel_engine_cs *engine) -{ - struct drm_i915_gem_object *results; - struct igt_wedge_me wedge; - u32 *vaddr; - int err; - int i; - - results = read_nonprivs(ctx, engine); - if (IS_ERR(results)) - return PTR_ERR(results); - - err = 0; - igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */ - err = i915_gem_object_set_to_cpu_domain(results, false); - if (i915_terminally_wedged(ctx->i915)) - err = -EIO; - if (err) - goto out_put; - - vaddr = i915_gem_object_pin_map(results, I915_MAP_WB); - if (IS_ERR(vaddr)) { - err = PTR_ERR(vaddr); - goto out_put; - } - - for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) { - u32 expected = get_whitelist_reg(engine, i); - u32 actual = vaddr[i]; - - if (expected != actual) { - print_results(engine, vaddr); - pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n", - i, expected, actual); - - err = -EINVAL; - break; - } - } - - i915_gem_object_unpin_map(results); -out_put: - i915_gem_object_put(results); - return err; -} - -static int do_device_reset(struct intel_engine_cs *engine) -{ - i915_reset(engine->i915, engine->mask, "live_workarounds"); - return 0; -} - -static int do_engine_reset(struct intel_engine_cs *engine) -{ - return i915_reset_engine(engine, "live_workarounds"); -} - -static int -switch_to_scratch_context(struct intel_engine_cs *engine, - struct igt_spinner *spin) -{ - struct i915_gem_context *ctx; - struct i915_request *rq; - intel_wakeref_t wakeref; - int err = 0; - - ctx = kernel_context(engine->i915); - if (IS_ERR(ctx)) - return PTR_ERR(ctx); - - GEM_BUG_ON(i915_gem_context_is_bannable(ctx)); - - rq = ERR_PTR(-ENODEV); - with_intel_runtime_pm(engine->i915, wakeref) - rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP); - - kernel_context_close(ctx); - - if (IS_ERR(rq)) { - spin = NULL; - err = PTR_ERR(rq); - goto err; - } - - i915_request_add(rq); - - if (spin && !igt_wait_for_spinner(spin, rq)) { - pr_err("Spinner failed to start\n"); - err = -ETIMEDOUT; - } - -err: - if (err && spin) - igt_spinner_end(spin); - - return err; -} - -static int check_whitelist_across_reset(struct intel_engine_cs *engine, - int (*reset)(struct intel_engine_cs *), - const char *name) -{ - struct drm_i915_private *i915 = engine->i915; - struct i915_gem_context *ctx; - struct igt_spinner spin; - intel_wakeref_t wakeref; - int err; - - pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n", - engine->whitelist.count, name); - - err = igt_spinner_init(&spin, i915); - if (err) - return err; - - ctx = kernel_context(i915); - if (IS_ERR(ctx)) - return PTR_ERR(ctx); - - err = check_whitelist(ctx, engine); - if (err) { - pr_err("Invalid whitelist *before* %s reset!\n", name); - goto out; - } - - err = switch_to_scratch_context(engine, &spin); - if (err) - goto out; - - with_intel_runtime_pm(i915, wakeref) - err = reset(engine); - - igt_spinner_end(&spin); - igt_spinner_fini(&spin); - - if (err) { - pr_err("%s reset failed\n", name); - goto out; - } - - err = check_whitelist(ctx, engine); - if (err) { - pr_err("Whitelist not preserved in context across %s reset!\n", - name); - goto out; - } - - kernel_context_close(ctx); - - ctx = kernel_context(i915); - if (IS_ERR(ctx)) - return PTR_ERR(ctx); - - err = check_whitelist(ctx, engine); - if (err) { - pr_err("Invalid whitelist *after* %s reset in fresh context!\n", - name); - goto out; - } - -out: - kernel_context_close(ctx); - return err; -} - -static struct i915_vma *create_batch(struct i915_gem_context *ctx) -{ - struct drm_i915_gem_object *obj; - struct i915_vma *vma; - int err; - - obj = i915_gem_object_create_internal(ctx->i915, 16 * PAGE_SIZE); - if (IS_ERR(obj)) - return ERR_CAST(obj); - - vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto err_obj; - } - - err = i915_vma_pin(vma, 0, 0, PIN_USER); - if (err) - goto err_obj; - - err = i915_gem_object_set_to_wc_domain(obj, true); - if (err) - goto err_obj; - - return vma; - -err_obj: - i915_gem_object_put(obj); - return ERR_PTR(err); -} - -static u32 reg_write(u32 old, u32 new, u32 rsvd) -{ - if (rsvd == 0x0000ffff) { - old &= ~(new >> 16); - old |= new & (new >> 16); - } else { - old &= ~rsvd; - old |= new & rsvd; - } - - return old; -} - -static bool wo_register(struct intel_engine_cs *engine, u32 reg) -{ - enum intel_platform platform = INTEL_INFO(engine->i915)->platform; - int i; - - for (i = 0; i < ARRAY_SIZE(wo_registers); i++) { - if (wo_registers[i].platform == platform && - wo_registers[i].reg == reg) - return true; - } - - return false; -} - -static int check_dirty_whitelist(struct i915_gem_context *ctx, - struct intel_engine_cs *engine) -{ - const u32 values[] = { - 0x00000000, - 0x01010101, - 0x10100101, - 0x03030303, - 0x30300303, - 0x05050505, - 0x50500505, - 0x0f0f0f0f, - 0xf00ff00f, - 0x10101010, - 0xf0f01010, - 0x30303030, - 0xa0a03030, - 0x50505050, - 0xc0c05050, - 0xf0f0f0f0, - 0x11111111, - 0x33333333, - 0x55555555, - 0x0000ffff, - 0x00ff00ff, - 0xff0000ff, - 0xffff00ff, - 0xffffffff, - }; - struct i915_vma *scratch; - struct i915_vma *batch; - int err = 0, i, v; - u32 *cs, *results; - - scratch = create_scratch(&ctx->ppgtt->vm, 2 * ARRAY_SIZE(values) + 1); - if (IS_ERR(scratch)) - return PTR_ERR(scratch); - - batch = create_batch(ctx); - if (IS_ERR(batch)) { - err = PTR_ERR(batch); - goto out_scratch; - } - - for (i = 0; i < engine->whitelist.count; i++) { - u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg); - u64 addr = scratch->node.start; - struct i915_request *rq; - u32 srm, lrm, rsvd; - u32 expect; - int idx; - - if (wo_register(engine, reg)) - continue; - - srm = MI_STORE_REGISTER_MEM; - lrm = MI_LOAD_REGISTER_MEM; - if (INTEL_GEN(ctx->i915) >= 8) - lrm++, srm++; - - pr_debug("%s: Writing garbage to %x\n", - engine->name, reg); - - cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC); - if (IS_ERR(cs)) { - err = PTR_ERR(cs); - goto out_batch; - } - - /* SRM original */ - *cs++ = srm; - *cs++ = reg; - *cs++ = lower_32_bits(addr); - *cs++ = upper_32_bits(addr); - - idx = 1; - for (v = 0; v < ARRAY_SIZE(values); v++) { - /* LRI garbage */ - *cs++ = MI_LOAD_REGISTER_IMM(1); - *cs++ = reg; - *cs++ = values[v]; - - /* SRM result */ - *cs++ = srm; - *cs++ = reg; - *cs++ = lower_32_bits(addr + sizeof(u32) * idx); - *cs++ = upper_32_bits(addr + sizeof(u32) * idx); - idx++; - } - for (v = 0; v < ARRAY_SIZE(values); v++) { - /* LRI garbage */ - *cs++ = MI_LOAD_REGISTER_IMM(1); - *cs++ = reg; - *cs++ = ~values[v]; - - /* SRM result */ - *cs++ = srm; - *cs++ = reg; - *cs++ = lower_32_bits(addr + sizeof(u32) * idx); - *cs++ = upper_32_bits(addr + sizeof(u32) * idx); - idx++; - } - GEM_BUG_ON(idx * sizeof(u32) > scratch->size); - - /* LRM original -- don't leave garbage in the context! */ - *cs++ = lrm; - *cs++ = reg; - *cs++ = lower_32_bits(addr); - *cs++ = upper_32_bits(addr); - - *cs++ = MI_BATCH_BUFFER_END; - - i915_gem_object_flush_map(batch->obj); - i915_gem_object_unpin_map(batch->obj); - i915_gem_chipset_flush(ctx->i915); - - rq = i915_request_alloc(engine, ctx); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto out_batch; - } - - if (engine->emit_init_breadcrumb) { /* Be nice if we hang */ - err = engine->emit_init_breadcrumb(rq); - if (err) - goto err_request; - } - - err = engine->emit_bb_start(rq, - batch->node.start, PAGE_SIZE, - 0); - if (err) - goto err_request; - -err_request: - i915_request_add(rq); - if (err) - goto out_batch; - - if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) { - pr_err("%s: Futzing %x timedout; cancelling test\n", - engine->name, reg); - i915_gem_set_wedged(ctx->i915); - err = -EIO; - goto out_batch; - } - - results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB); - if (IS_ERR(results)) { - err = PTR_ERR(results); - goto out_batch; - } - - GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff); - rsvd = results[ARRAY_SIZE(values)]; /* detect write masking */ - if (!rsvd) { - pr_err("%s: Unable to write to whitelisted register %x\n", - engine->name, reg); - err = -EINVAL; - goto out_unpin; - } - - expect = results[0]; - idx = 1; - for (v = 0; v < ARRAY_SIZE(values); v++) { - expect = reg_write(expect, values[v], rsvd); - if (results[idx] != expect) - err++; - idx++; - } - for (v = 0; v < ARRAY_SIZE(values); v++) { - expect = reg_write(expect, ~values[v], rsvd); - if (results[idx] != expect) - err++; - idx++; - } - if (err) { - pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n", - engine->name, err, reg); - - pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n", - engine->name, reg, results[0], rsvd); - - expect = results[0]; - idx = 1; - for (v = 0; v < ARRAY_SIZE(values); v++) { - u32 w = values[v]; - - expect = reg_write(expect, w, rsvd); - pr_info("Wrote %08x, read %08x, expect %08x\n", - w, results[idx], expect); - idx++; - } - for (v = 0; v < ARRAY_SIZE(values); v++) { - u32 w = ~values[v]; - - expect = reg_write(expect, w, rsvd); - pr_info("Wrote %08x, read %08x, expect %08x\n", - w, results[idx], expect); - idx++; - } - - err = -EINVAL; - } -out_unpin: - i915_gem_object_unpin_map(scratch->obj); - if (err) - break; - } - - if (igt_flush_test(ctx->i915, I915_WAIT_LOCKED)) - err = -EIO; -out_batch: - i915_vma_unpin_and_release(&batch, 0); -out_scratch: - i915_vma_unpin_and_release(&scratch, 0); - return err; -} - -static int live_dirty_whitelist(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct intel_engine_cs *engine; - struct i915_gem_context *ctx; - enum intel_engine_id id; - intel_wakeref_t wakeref; - struct drm_file *file; - int err = 0; - - /* Can the user write to the whitelisted registers? */ - - if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */ - return 0; - - wakeref = intel_runtime_pm_get(i915); - - mutex_unlock(&i915->drm.struct_mutex); - file = mock_file(i915); - mutex_lock(&i915->drm.struct_mutex); - if (IS_ERR(file)) { - err = PTR_ERR(file); - goto out_rpm; - } - - ctx = live_context(i915, file); - if (IS_ERR(ctx)) { - err = PTR_ERR(ctx); - goto out_file; - } - - for_each_engine(engine, i915, id) { - if (engine->whitelist.count == 0) - continue; - - err = check_dirty_whitelist(ctx, engine); - if (err) - goto out_file; - } - -out_file: - mutex_unlock(&i915->drm.struct_mutex); - mock_file_free(i915, file); - mutex_lock(&i915->drm.struct_mutex); -out_rpm: - intel_runtime_pm_put(i915, wakeref); - return err; -} - -static int live_reset_whitelist(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct intel_engine_cs *engine = i915->engine[RCS0]; - int err = 0; - - /* If we reset the gpu, we should not lose the RING_NONPRIV */ - - if (!engine || engine->whitelist.count == 0) - return 0; - - igt_global_reset_lock(i915); - - if (intel_has_reset_engine(i915)) { - err = check_whitelist_across_reset(engine, - do_engine_reset, - "engine"); - if (err) - goto out; - } - - if (intel_has_gpu_reset(i915)) { - err = check_whitelist_across_reset(engine, - do_device_reset, - "device"); - if (err) - goto out; - } - -out: - igt_global_reset_unlock(i915); - return err; -} - -static int read_whitelisted_registers(struct i915_gem_context *ctx, - struct intel_engine_cs *engine, - struct i915_vma *results) -{ - intel_wakeref_t wakeref; - struct i915_request *rq; - int i, err = 0; - u32 srm, *cs; - - rq = ERR_PTR(-ENODEV); - with_intel_runtime_pm(engine->i915, wakeref) - rq = i915_request_alloc(engine, ctx); - if (IS_ERR(rq)) - return PTR_ERR(rq); - - srm = MI_STORE_REGISTER_MEM; - if (INTEL_GEN(ctx->i915) >= 8) - srm++; - - cs = intel_ring_begin(rq, 4 * engine->whitelist.count); - if (IS_ERR(cs)) { - err = PTR_ERR(cs); - goto err_req; - } - - for (i = 0; i < engine->whitelist.count; i++) { - u64 offset = results->node.start + sizeof(u32) * i; - - *cs++ = srm; - *cs++ = i915_mmio_reg_offset(engine->whitelist.list[i].reg); - *cs++ = lower_32_bits(offset); - *cs++ = upper_32_bits(offset); - } - intel_ring_advance(rq, cs); - -err_req: - i915_request_add(rq); - - if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) - err = -EIO; - - return err; -} - -static int scrub_whitelisted_registers(struct i915_gem_context *ctx, - struct intel_engine_cs *engine) -{ - intel_wakeref_t wakeref; - struct i915_request *rq; - struct i915_vma *batch; - int i, err = 0; - u32 *cs; - - batch = create_batch(ctx); - if (IS_ERR(batch)) - return PTR_ERR(batch); - - cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC); - if (IS_ERR(cs)) { - err = PTR_ERR(cs); - goto err_batch; - } - - *cs++ = MI_LOAD_REGISTER_IMM(engine->whitelist.count); - for (i = 0; i < engine->whitelist.count; i++) { - *cs++ = i915_mmio_reg_offset(engine->whitelist.list[i].reg); - *cs++ = 0xffffffff; - } - *cs++ = MI_BATCH_BUFFER_END; - - i915_gem_object_flush_map(batch->obj); - i915_gem_chipset_flush(ctx->i915); - - rq = ERR_PTR(-ENODEV); - with_intel_runtime_pm(engine->i915, wakeref) - rq = i915_request_alloc(engine, ctx); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto err_unpin; - } - - if (engine->emit_init_breadcrumb) { /* Be nice if we hang */ - err = engine->emit_init_breadcrumb(rq); - if (err) - goto err_request; - } - - /* Perform the writes from an unprivileged "user" batch */ - err = engine->emit_bb_start(rq, batch->node.start, 0, 0); - -err_request: - i915_request_add(rq); - if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) - err = -EIO; - -err_unpin: - i915_gem_object_unpin_map(batch->obj); -err_batch: - i915_vma_unpin_and_release(&batch, 0); - return err; -} - -struct regmask { - i915_reg_t reg; - unsigned long gen_mask; -}; - -static bool find_reg(struct drm_i915_private *i915, - i915_reg_t reg, - const struct regmask *tbl, - unsigned long count) -{ - u32 offset = i915_mmio_reg_offset(reg); - - while (count--) { - if (INTEL_INFO(i915)->gen_mask & tbl->gen_mask && - i915_mmio_reg_offset(tbl->reg) == offset) - return true; - tbl++; - } - - return false; -} - -static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg) -{ - /* Alas, we must pardon some whitelists. Mistakes already made */ - static const struct regmask pardon[] = { - { GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) }, - { GEN8_L3SQCREG4, INTEL_GEN_MASK(9, 9) }, - }; - - return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon)); -} - -static bool result_eq(struct intel_engine_cs *engine, - u32 a, u32 b, i915_reg_t reg) -{ - if (a != b && !pardon_reg(engine->i915, reg)) { - pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n", - i915_mmio_reg_offset(reg), a, b); - return false; - } - - return true; -} - -static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg) -{ - /* Some registers do not seem to behave and our writes unreadable */ - static const struct regmask wo[] = { - { GEN9_SLICE_COMMON_ECO_CHICKEN1, INTEL_GEN_MASK(9, 9) }, - }; - - return find_reg(i915, reg, wo, ARRAY_SIZE(wo)); -} - -static bool result_neq(struct intel_engine_cs *engine, - u32 a, u32 b, i915_reg_t reg) -{ - if (a == b && !writeonly_reg(engine->i915, reg)) { - pr_err("Whitelist register 0x%4x:%08x was unwritable\n", - i915_mmio_reg_offset(reg), a); - return false; - } - - return true; -} - -static int -check_whitelisted_registers(struct intel_engine_cs *engine, - struct i915_vma *A, - struct i915_vma *B, - bool (*fn)(struct intel_engine_cs *engine, - u32 a, u32 b, - i915_reg_t reg)) -{ - u32 *a, *b; - int i, err; - - a = i915_gem_object_pin_map(A->obj, I915_MAP_WB); - if (IS_ERR(a)) - return PTR_ERR(a); - - b = i915_gem_object_pin_map(B->obj, I915_MAP_WB); - if (IS_ERR(b)) { - err = PTR_ERR(b); - goto err_a; - } - - err = 0; - for (i = 0; i < engine->whitelist.count; i++) { - if (!fn(engine, a[i], b[i], engine->whitelist.list[i].reg)) - err = -EINVAL; - } - - i915_gem_object_unpin_map(B->obj); -err_a: - i915_gem_object_unpin_map(A->obj); - return err; -} - -static int live_isolated_whitelist(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct { - struct i915_gem_context *ctx; - struct i915_vma *scratch[2]; - } client[2] = {}; - struct intel_engine_cs *engine; - enum intel_engine_id id; - int i, err = 0; - - /* - * Check that a write into a whitelist register works, but - * invisible to a second context. - */ - - if (!intel_engines_has_context_isolation(i915)) - return 0; - - if (!i915->kernel_context->ppgtt) - return 0; - - for (i = 0; i < ARRAY_SIZE(client); i++) { - struct i915_gem_context *c; - - c = kernel_context(i915); - if (IS_ERR(c)) { - err = PTR_ERR(c); - goto err; - } - - client[i].scratch[0] = create_scratch(&c->ppgtt->vm, 1024); - if (IS_ERR(client[i].scratch[0])) { - err = PTR_ERR(client[i].scratch[0]); - kernel_context_close(c); - goto err; - } - - client[i].scratch[1] = create_scratch(&c->ppgtt->vm, 1024); - if (IS_ERR(client[i].scratch[1])) { - err = PTR_ERR(client[i].scratch[1]); - i915_vma_unpin_and_release(&client[i].scratch[0], 0); - kernel_context_close(c); - goto err; - } - - client[i].ctx = c; - } - - for_each_engine(engine, i915, id) { - if (!engine->whitelist.count) - continue; - - /* Read default values */ - err = read_whitelisted_registers(client[0].ctx, engine, - client[0].scratch[0]); - if (err) - goto err; - - /* Try to overwrite registers (should only affect ctx0) */ - err = scrub_whitelisted_registers(client[0].ctx, engine); - if (err) - goto err; - - /* Read values from ctx1, we expect these to be defaults */ - err = read_whitelisted_registers(client[1].ctx, engine, - client[1].scratch[0]); - if (err) - goto err; - - /* Verify that both reads return the same default values */ - err = check_whitelisted_registers(engine, - client[0].scratch[0], - client[1].scratch[0], - result_eq); - if (err) - goto err; - - /* Read back the updated values in ctx0 */ - err = read_whitelisted_registers(client[0].ctx, engine, - client[0].scratch[1]); - if (err) - goto err; - - /* User should be granted privilege to overwhite regs */ - err = check_whitelisted_registers(engine, - client[0].scratch[0], - client[0].scratch[1], - result_neq); - if (err) - goto err; - } - -err: - for (i = 0; i < ARRAY_SIZE(client); i++) { - if (!client[i].ctx) - break; - - i915_vma_unpin_and_release(&client[i].scratch[1], 0); - i915_vma_unpin_and_release(&client[i].scratch[0], 0); - kernel_context_close(client[i].ctx); - } - - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - err = -EIO; - - return err; -} - -static bool verify_gt_engine_wa(struct drm_i915_private *i915, - struct wa_lists *lists, const char *str) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - bool ok = true; - - ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str); - - for_each_engine(engine, i915, id) { - ok &= engine_wa_list_verify(engine, - &lists->engine[id].wa_list, - str) == 0; - } - - return ok; -} - -static int -live_gpu_reset_gt_engine_workarounds(void *arg) -{ - struct drm_i915_private *i915 = arg; - intel_wakeref_t wakeref; - struct wa_lists lists; - bool ok; - - if (!intel_has_gpu_reset(i915)) - return 0; - - pr_info("Verifying after GPU reset...\n"); - - igt_global_reset_lock(i915); - wakeref = intel_runtime_pm_get(i915); - - reference_lists_init(i915, &lists); - - ok = verify_gt_engine_wa(i915, &lists, "before reset"); - if (!ok) - goto out; - - i915_reset(i915, ALL_ENGINES, "live_workarounds"); - - ok = verify_gt_engine_wa(i915, &lists, "after reset"); - -out: - reference_lists_fini(i915, &lists); - intel_runtime_pm_put(i915, wakeref); - igt_global_reset_unlock(i915); - - return ok ? 0 : -ESRCH; -} - -static int -live_engine_reset_gt_engine_workarounds(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct intel_engine_cs *engine; - struct i915_gem_context *ctx; - struct igt_spinner spin; - enum intel_engine_id id; - struct i915_request *rq; - intel_wakeref_t wakeref; - struct wa_lists lists; - int ret = 0; - - if (!intel_has_reset_engine(i915)) - return 0; - - ctx = kernel_context(i915); - if (IS_ERR(ctx)) - return PTR_ERR(ctx); - - igt_global_reset_lock(i915); - wakeref = intel_runtime_pm_get(i915); - - reference_lists_init(i915, &lists); - - for_each_engine(engine, i915, id) { - bool ok; - - pr_info("Verifying after %s reset...\n", engine->name); - - ok = verify_gt_engine_wa(i915, &lists, "before reset"); - if (!ok) { - ret = -ESRCH; - goto err; - } - - i915_reset_engine(engine, "live_workarounds"); - - ok = verify_gt_engine_wa(i915, &lists, "after idle reset"); - if (!ok) { - ret = -ESRCH; - goto err; - } - - ret = igt_spinner_init(&spin, i915); - if (ret) - goto err; - - rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP); - if (IS_ERR(rq)) { - ret = PTR_ERR(rq); - igt_spinner_fini(&spin); - goto err; - } - - i915_request_add(rq); - - if (!igt_wait_for_spinner(&spin, rq)) { - pr_err("Spinner failed to start\n"); - igt_spinner_fini(&spin); - ret = -ETIMEDOUT; - goto err; - } - - i915_reset_engine(engine, "live_workarounds"); - - igt_spinner_end(&spin); - igt_spinner_fini(&spin); - - ok = verify_gt_engine_wa(i915, &lists, "after busy reset"); - if (!ok) { - ret = -ESRCH; - goto err; - } - } - -err: - reference_lists_fini(i915, &lists); - intel_runtime_pm_put(i915, wakeref); - igt_global_reset_unlock(i915); - kernel_context_close(ctx); - - igt_flush_test(i915, I915_WAIT_LOCKED); - - return ret; -} - -int intel_workarounds_live_selftests(struct drm_i915_private *i915) -{ - static const struct i915_subtest tests[] = { - SUBTEST(live_dirty_whitelist), - SUBTEST(live_reset_whitelist), - SUBTEST(live_isolated_whitelist), - SUBTEST(live_gpu_reset_gt_engine_workarounds), - SUBTEST(live_engine_reset_gt_engine_workarounds), - }; - int err; - - if (i915_terminally_wedged(i915)) - return 0; - - mutex_lock(&i915->drm.struct_mutex); - err = i915_subtests(tests, i915); - mutex_unlock(&i915->drm.struct_mutex); - - return err; -} diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c deleted file mode 100644 index 61a8206ed677..000000000000 --- a/drivers/gpu/drm/i915/selftests/mock_engine.c +++ /dev/null @@ -1,321 +0,0 @@ -/* - * Copyright © 2016 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#include "mock_engine.h" -#include "mock_request.h" - -struct mock_ring { - struct intel_ring base; - struct i915_timeline timeline; -}; - -static void mock_timeline_pin(struct i915_timeline *tl) -{ - tl->pin_count++; -} - -static void mock_timeline_unpin(struct i915_timeline *tl) -{ - GEM_BUG_ON(!tl->pin_count); - tl->pin_count--; -} - -static struct intel_ring *mock_ring(struct intel_engine_cs *engine) -{ - const unsigned long sz = PAGE_SIZE / 2; - struct mock_ring *ring; - - ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL); - if (!ring) - return NULL; - - if (i915_timeline_init(engine->i915, &ring->timeline, NULL)) { - kfree(ring); - return NULL; - } - - kref_init(&ring->base.ref); - ring->base.size = sz; - ring->base.effective_size = sz; - ring->base.vaddr = (void *)(ring + 1); - ring->base.timeline = &ring->timeline; - - INIT_LIST_HEAD(&ring->base.request_list); - intel_ring_update_space(&ring->base); - - return &ring->base; -} - -static void mock_ring_free(struct intel_ring *base) -{ - struct mock_ring *ring = container_of(base, typeof(*ring), base); - - i915_timeline_fini(&ring->timeline); - kfree(ring); -} - -static struct i915_request *first_request(struct mock_engine *engine) -{ - return list_first_entry_or_null(&engine->hw_queue, - struct i915_request, - mock.link); -} - -static void advance(struct i915_request *request) -{ - list_del_init(&request->mock.link); - i915_request_mark_complete(request); - GEM_BUG_ON(!i915_request_completed(request)); - - intel_engine_queue_breadcrumbs(request->engine); -} - -static void hw_delay_complete(struct timer_list *t) -{ - struct mock_engine *engine = from_timer(engine, t, hw_delay); - struct i915_request *request; - unsigned long flags; - - spin_lock_irqsave(&engine->hw_lock, flags); - - /* Timer fired, first request is complete */ - request = first_request(engine); - if (request) - advance(request); - - /* - * Also immediately signal any subsequent 0-delay requests, but - * requeue the timer for the next delayed request. - */ - while ((request = first_request(engine))) { - if (request->mock.delay) { - mod_timer(&engine->hw_delay, - jiffies + request->mock.delay); - break; - } - - advance(request); - } - - spin_unlock_irqrestore(&engine->hw_lock, flags); -} - -static void mock_context_unpin(struct intel_context *ce) -{ - mock_timeline_unpin(ce->ring->timeline); -} - -static void mock_context_destroy(struct kref *ref) -{ - struct intel_context *ce = container_of(ref, typeof(*ce), ref); - - GEM_BUG_ON(intel_context_is_pinned(ce)); - - if (ce->ring) - mock_ring_free(ce->ring); - - intel_context_free(ce); -} - -static int mock_context_pin(struct intel_context *ce) -{ - if (!ce->ring) { - ce->ring = mock_ring(ce->engine); - if (!ce->ring) - return -ENOMEM; - } - - mock_timeline_pin(ce->ring->timeline); - return 0; -} - -static const struct intel_context_ops mock_context_ops = { - .pin = mock_context_pin, - .unpin = mock_context_unpin, - - .destroy = mock_context_destroy, -}; - -static int mock_request_alloc(struct i915_request *request) -{ - INIT_LIST_HEAD(&request->mock.link); - request->mock.delay = 0; - - return 0; -} - -static int mock_emit_flush(struct i915_request *request, - unsigned int flags) -{ - return 0; -} - -static u32 *mock_emit_breadcrumb(struct i915_request *request, u32 *cs) -{ - return cs; -} - -static void mock_submit_request(struct i915_request *request) -{ - struct mock_engine *engine = - container_of(request->engine, typeof(*engine), base); - unsigned long flags; - - i915_request_submit(request); - - spin_lock_irqsave(&engine->hw_lock, flags); - list_add_tail(&request->mock.link, &engine->hw_queue); - if (list_is_first(&request->mock.link, &engine->hw_queue)) { - if (request->mock.delay) - mod_timer(&engine->hw_delay, - jiffies + request->mock.delay); - else - advance(request); - } - spin_unlock_irqrestore(&engine->hw_lock, flags); -} - -static void mock_reset_prepare(struct intel_engine_cs *engine) -{ -} - -static void mock_reset(struct intel_engine_cs *engine, bool stalled) -{ - GEM_BUG_ON(stalled); -} - -static void mock_reset_finish(struct intel_engine_cs *engine) -{ -} - -static void mock_cancel_requests(struct intel_engine_cs *engine) -{ - struct i915_request *request; - unsigned long flags; - - spin_lock_irqsave(&engine->timeline.lock, flags); - - /* Mark all submitted requests as skipped. */ - list_for_each_entry(request, &engine->timeline.requests, sched.link) { - if (!i915_request_signaled(request)) - dma_fence_set_error(&request->fence, -EIO); - - i915_request_mark_complete(request); - } - - spin_unlock_irqrestore(&engine->timeline.lock, flags); -} - -struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, - const char *name, - int id) -{ - struct mock_engine *engine; - - GEM_BUG_ON(id >= I915_NUM_ENGINES); - - engine = kzalloc(sizeof(*engine) + PAGE_SIZE, GFP_KERNEL); - if (!engine) - return NULL; - - /* minimal engine setup for requests */ - engine->base.i915 = i915; - snprintf(engine->base.name, sizeof(engine->base.name), "%s", name); - engine->base.id = id; - engine->base.mask = BIT(id); - engine->base.status_page.addr = (void *)(engine + 1); - - engine->base.cops = &mock_context_ops; - engine->base.request_alloc = mock_request_alloc; - engine->base.emit_flush = mock_emit_flush; - engine->base.emit_fini_breadcrumb = mock_emit_breadcrumb; - engine->base.submit_request = mock_submit_request; - - engine->base.reset.prepare = mock_reset_prepare; - engine->base.reset.reset = mock_reset; - engine->base.reset.finish = mock_reset_finish; - engine->base.cancel_requests = mock_cancel_requests; - - if (i915_timeline_init(i915, &engine->base.timeline, NULL)) - goto err_free; - i915_timeline_set_subclass(&engine->base.timeline, TIMELINE_ENGINE); - - intel_engine_init_breadcrumbs(&engine->base); - - /* fake hw queue */ - spin_lock_init(&engine->hw_lock); - timer_setup(&engine->hw_delay, hw_delay_complete, 0); - INIT_LIST_HEAD(&engine->hw_queue); - - if (pin_context(i915->kernel_context, &engine->base, - &engine->base.kernel_context)) - goto err_breadcrumbs; - - return &engine->base; - -err_breadcrumbs: - intel_engine_fini_breadcrumbs(&engine->base); - i915_timeline_fini(&engine->base.timeline); -err_free: - kfree(engine); - return NULL; -} - -void mock_engine_flush(struct intel_engine_cs *engine) -{ - struct mock_engine *mock = - container_of(engine, typeof(*mock), base); - struct i915_request *request, *rn; - - del_timer_sync(&mock->hw_delay); - - spin_lock_irq(&mock->hw_lock); - list_for_each_entry_safe(request, rn, &mock->hw_queue, mock.link) - advance(request); - spin_unlock_irq(&mock->hw_lock); -} - -void mock_engine_reset(struct intel_engine_cs *engine) -{ -} - -void mock_engine_free(struct intel_engine_cs *engine) -{ - struct mock_engine *mock = - container_of(engine, typeof(*mock), base); - struct intel_context *ce; - - GEM_BUG_ON(timer_pending(&mock->hw_delay)); - - ce = fetch_and_zero(&engine->last_retired_context); - if (ce) - intel_context_unpin(ce); - - intel_context_unpin(engine->kernel_context); - - intel_engine_fini_breadcrumbs(engine); - i915_timeline_fini(&engine->timeline); - - kfree(engine); -} diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.h b/drivers/gpu/drm/i915/selftests/mock_engine.h deleted file mode 100644 index b9cc3a245f16..000000000000 --- a/drivers/gpu/drm/i915/selftests/mock_engine.h +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright © 2016 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#ifndef __MOCK_ENGINE_H__ -#define __MOCK_ENGINE_H__ - -#include <linux/list.h> -#include <linux/spinlock.h> -#include <linux/timer.h> - -#include "../intel_ringbuffer.h" - -struct mock_engine { - struct intel_engine_cs base; - - spinlock_t hw_lock; - struct list_head hw_queue; - struct timer_list hw_delay; -}; - -struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, - const char *name, - int id); -void mock_engine_flush(struct intel_engine_cs *engine); -void mock_engine_reset(struct intel_engine_cs *engine); -void mock_engine_free(struct intel_engine_cs *engine); - -#endif /* !__MOCK_ENGINE_H__ */ diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 60bbf8b4df40..f444ee5add27 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -25,7 +25,8 @@ #include <linux/pm_domain.h> #include <linux/pm_runtime.h> -#include "mock_engine.h" +#include "gt/mock_engine.h" + #include "mock_context.h" #include "mock_request.h" #include "mock_gem_device.h" diff --git a/drivers/gpu/drm/i915/selftests/mock_request.c b/drivers/gpu/drm/i915/selftests/mock_request.c index d1a7c9608712..f739ba63057f 100644 --- a/drivers/gpu/drm/i915/selftests/mock_request.c +++ b/drivers/gpu/drm/i915/selftests/mock_request.c @@ -22,7 +22,8 @@ * */ -#include "mock_engine.h" +#include "gt/mock_engine.h" + #include "mock_request.h" struct i915_request * |