summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/tests
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/xe/tests')
-rw-r--r--drivers/gpu/drm/xe/tests/Makefile8
-rw-r--r--drivers/gpu/drm/xe/tests/xe_args_test.c221
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo.c341
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo_test.c26
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo_test.h14
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf.c87
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf_test.c25
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf_test.h13
-rw-r--r--drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_service_test.c232
-rw-r--r--drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c334
-rw-r--r--drivers/gpu/drm/xe/tests/xe_guc_id_mgr_test.c136
-rw-r--r--drivers/gpu/drm/xe/tests/xe_kunit_helpers.c39
-rw-r--r--drivers/gpu/drm/xe/tests/xe_kunit_helpers.h2
-rw-r--r--drivers/gpu/drm/xe/tests/xe_live_test_mod.c23
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate.c557
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate_test.c25
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate_test.h13
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs.c172
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs_test.c26
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs_test.h14
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci.c108
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci_test.c4
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci_test.h3
-rw-r--r--drivers/gpu/drm/xe/tests/xe_rtp_test.c279
-rw-r--r--drivers/gpu/drm/xe/tests/xe_test.h10
-rw-r--r--drivers/gpu/drm/xe/tests/xe_test_mod.c2
-rw-r--r--drivers/gpu/drm/xe/tests/xe_wa_test.c2
27 files changed, 2216 insertions, 500 deletions
diff --git a/drivers/gpu/drm/xe/tests/Makefile b/drivers/gpu/drm/xe/tests/Makefile
index 9d1d88af8b2f..0e3408f4952c 100644
--- a/drivers/gpu/drm/xe/tests/Makefile
+++ b/drivers/gpu/drm/xe/tests/Makefile
@@ -1,15 +1,13 @@
# SPDX-License-Identifier: GPL-2.0
# "live" kunit tests
-obj-$(CONFIG_DRM_XE_KUNIT_TEST) += \
- xe_bo_test.o \
- xe_dma_buf_test.o \
- xe_migrate_test.o \
- xe_mocs_test.o
+obj-$(CONFIG_DRM_XE_KUNIT_TEST) += xe_live_test.o
+xe_live_test-y = xe_live_test_mod.o
# Normal kunit tests
obj-$(CONFIG_DRM_XE_KUNIT_TEST) += xe_test.o
xe_test-y = xe_test_mod.o \
+ xe_args_test.o \
xe_pci_test.o \
xe_rtp_test.o \
xe_wa_test.o
diff --git a/drivers/gpu/drm/xe/tests/xe_args_test.c b/drivers/gpu/drm/xe/tests/xe_args_test.c
new file mode 100644
index 000000000000..f3fb23aa5d2e
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_args_test.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include <kunit/test.h>
+
+#include "xe_args.h"
+
+static void call_args_example(struct kunit *test)
+{
+#define foo X, Y, Z, Q
+#define bar COUNT_ARGS(foo)
+#define buz CALL_ARGS(COUNT_ARGS, foo)
+
+ KUNIT_EXPECT_EQ(test, bar, 1);
+ KUNIT_EXPECT_EQ(test, buz, 4);
+
+#undef foo
+#undef bar
+#undef buz
+}
+
+static void drop_first_arg_example(struct kunit *test)
+{
+#define foo X, Y, Z, Q
+#define bar CALL_ARGS(COUNT_ARGS, DROP_FIRST_ARG(foo))
+
+ KUNIT_EXPECT_EQ(test, bar, 3);
+
+#undef foo
+#undef bar
+}
+
+static void first_arg_example(struct kunit *test)
+{
+ int X = 1;
+
+#define foo X, Y, Z, Q
+#define bar FIRST_ARG(foo)
+
+ KUNIT_EXPECT_EQ(test, bar, X);
+ KUNIT_EXPECT_STREQ(test, __stringify(bar), "X");
+
+#undef foo
+#undef bar
+}
+
+static void last_arg_example(struct kunit *test)
+{
+ int Q = 1;
+
+#define foo X, Y, Z, Q
+#define bar LAST_ARG(foo)
+
+ KUNIT_EXPECT_EQ(test, bar, Q);
+ KUNIT_EXPECT_STREQ(test, __stringify(bar), "Q");
+
+#undef foo
+#undef bar
+}
+
+static void pick_arg_example(struct kunit *test)
+{
+ int Y = 1, Z = 2;
+
+#define foo X, Y, Z, Q
+#define bar PICK_ARG(2, foo)
+#define buz PICK_ARG3(foo)
+
+ KUNIT_EXPECT_EQ(test, bar, Y);
+ KUNIT_EXPECT_STREQ(test, __stringify(bar), "Y");
+ KUNIT_EXPECT_EQ(test, buz, Z);
+ KUNIT_EXPECT_STREQ(test, __stringify(buz), "Z");
+
+#undef foo
+#undef bar
+#undef buz
+}
+
+static void sep_comma_example(struct kunit *test)
+{
+#define foo(f) f(X) f(Y) f(Z) f(Q)
+#define bar DROP_FIRST_ARG(foo(ARGS_SEP_COMMA __stringify))
+#define buz CALL_ARGS(COUNT_ARGS, DROP_FIRST_ARG(foo(ARGS_SEP_COMMA)))
+
+ static const char * const a[] = { bar };
+
+ KUNIT_EXPECT_STREQ(test, a[0], "X");
+ KUNIT_EXPECT_STREQ(test, a[1], "Y");
+ KUNIT_EXPECT_STREQ(test, a[2], "Z");
+ KUNIT_EXPECT_STREQ(test, a[3], "Q");
+
+ KUNIT_EXPECT_EQ(test, buz, 4);
+
+#undef foo
+#undef bar
+#undef buz
+}
+
+#define NO_ARGS
+#define FOO_ARGS X, Y, Z, Q
+#define MAX_ARGS -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12
+
+static void count_args_test(struct kunit *test)
+{
+ int count;
+
+ /* COUNT_ARGS() counts to 12 */
+
+ count = COUNT_ARGS();
+ KUNIT_EXPECT_EQ(test, count, 0);
+
+ count = COUNT_ARGS(1);
+ KUNIT_EXPECT_EQ(test, count, 1);
+
+ count = COUNT_ARGS(a, b, c, d, e);
+ KUNIT_EXPECT_EQ(test, count, 5);
+
+ count = COUNT_ARGS(a, b, c, d, e, f, g, h, i, j, k, l);
+ KUNIT_EXPECT_EQ(test, count, 12);
+
+ /* COUNT_ARGS() does not expand params */
+
+ count = COUNT_ARGS(NO_ARGS);
+ KUNIT_EXPECT_EQ(test, count, 1);
+
+ count = COUNT_ARGS(FOO_ARGS);
+ KUNIT_EXPECT_EQ(test, count, 1);
+}
+
+static void call_args_test(struct kunit *test)
+{
+ int count;
+
+ count = CALL_ARGS(COUNT_ARGS, NO_ARGS);
+ KUNIT_EXPECT_EQ(test, count, 0);
+ KUNIT_EXPECT_EQ(test, CALL_ARGS(COUNT_ARGS, NO_ARGS), 0);
+ KUNIT_EXPECT_EQ(test, CALL_ARGS(COUNT_ARGS, FOO_ARGS), 4);
+ KUNIT_EXPECT_EQ(test, CALL_ARGS(COUNT_ARGS, FOO_ARGS, FOO_ARGS), 8);
+ KUNIT_EXPECT_EQ(test, CALL_ARGS(COUNT_ARGS, MAX_ARGS), 12);
+}
+
+static void drop_first_arg_test(struct kunit *test)
+{
+ int Y = -2, Z = -3, Q = -4;
+ int a[] = { DROP_FIRST_ARG(FOO_ARGS) };
+
+ KUNIT_EXPECT_EQ(test, DROP_FIRST_ARG(0, -1), -1);
+ KUNIT_EXPECT_EQ(test, DROP_FIRST_ARG(DROP_FIRST_ARG(0, -1, -2)), -2);
+
+ KUNIT_EXPECT_EQ(test, CALL_ARGS(COUNT_ARGS, DROP_FIRST_ARG(FOO_ARGS)), 3);
+ KUNIT_EXPECT_EQ(test, DROP_FIRST_ARG(DROP_FIRST_ARG(DROP_FIRST_ARG(FOO_ARGS))), -4);
+ KUNIT_EXPECT_EQ(test, a[0], -2);
+ KUNIT_EXPECT_EQ(test, a[1], -3);
+ KUNIT_EXPECT_EQ(test, a[2], -4);
+
+#define foo DROP_FIRST_ARG(FOO_ARGS)
+#define bar DROP_FIRST_ARG(DROP_FIRST_ARG(FOO_ARGS))
+#define buz DROP_FIRST_ARG(DROP_FIRST_ARG(DROP_FIRST_ARG(FOO_ARGS)))
+
+ KUNIT_EXPECT_EQ(test, CALL_ARGS(COUNT_ARGS, foo), 3);
+ KUNIT_EXPECT_EQ(test, CALL_ARGS(COUNT_ARGS, bar), 2);
+ KUNIT_EXPECT_EQ(test, CALL_ARGS(COUNT_ARGS, buz), 1);
+ KUNIT_EXPECT_STREQ(test, __stringify(buz), "Q");
+
+#undef foo
+#undef bar
+#undef buz
+}
+
+static void first_arg_test(struct kunit *test)
+{
+ int X = -1;
+ int a[] = { FIRST_ARG(FOO_ARGS) };
+
+ KUNIT_EXPECT_EQ(test, FIRST_ARG(-1, -2), -1);
+
+ KUNIT_EXPECT_EQ(test, CALL_ARGS(COUNT_ARGS, FIRST_ARG(FOO_ARGS)), 1);
+ KUNIT_EXPECT_EQ(test, FIRST_ARG(FOO_ARGS), -1);
+ KUNIT_EXPECT_EQ(test, a[0], -1);
+ KUNIT_EXPECT_STREQ(test, __stringify(FIRST_ARG(FOO_ARGS)), "X");
+}
+
+static void last_arg_test(struct kunit *test)
+{
+ int Q = -4;
+ int a[] = { LAST_ARG(FOO_ARGS) };
+
+ KUNIT_EXPECT_EQ(test, LAST_ARG(-1, -2), -2);
+
+ KUNIT_EXPECT_EQ(test, CALL_ARGS(COUNT_ARGS, LAST_ARG(FOO_ARGS)), 1);
+ KUNIT_EXPECT_EQ(test, LAST_ARG(FOO_ARGS), -4);
+ KUNIT_EXPECT_EQ(test, a[0], -4);
+ KUNIT_EXPECT_STREQ(test, __stringify(LAST_ARG(FOO_ARGS)), "Q");
+
+ KUNIT_EXPECT_EQ(test, LAST_ARG(MAX_ARGS), -12);
+ KUNIT_EXPECT_STREQ(test, __stringify(LAST_ARG(MAX_ARGS)), "-12");
+}
+
+static struct kunit_case args_tests[] = {
+ KUNIT_CASE(count_args_test),
+ KUNIT_CASE(call_args_example),
+ KUNIT_CASE(call_args_test),
+ KUNIT_CASE(drop_first_arg_example),
+ KUNIT_CASE(drop_first_arg_test),
+ KUNIT_CASE(first_arg_example),
+ KUNIT_CASE(first_arg_test),
+ KUNIT_CASE(last_arg_example),
+ KUNIT_CASE(last_arg_test),
+ KUNIT_CASE(pick_arg_example),
+ KUNIT_CASE(sep_comma_example),
+ {}
+};
+
+static struct kunit_suite args_test_suite = {
+ .name = "args",
+ .test_cases = args_tests,
+};
+
+kunit_test_suite(args_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c
index 3436fd9cf2b2..378dcd0fb414 100644
--- a/drivers/gpu/drm/xe/tests/xe_bo.c
+++ b/drivers/gpu/drm/xe/tests/xe_bo.c
@@ -6,7 +6,14 @@
#include <kunit/test.h>
#include <kunit/visibility.h>
-#include "tests/xe_bo_test.h"
+#include <linux/iosys-map.h>
+#include <linux/math64.h>
+#include <linux/prandom.h>
+#include <linux/swap.h>
+
+#include <uapi/linux/sysinfo.h>
+
+#include "tests/xe_kunit_helpers.h"
#include "tests/xe_pci_test.h"
#include "tests/xe_test.h"
@@ -36,16 +43,24 @@ static int ccs_test_migrate(struct xe_tile *tile, struct xe_bo *bo,
/* Optionally clear bo *and* CCS data in VRAM. */
if (clear) {
- fence = xe_migrate_clear(tile->migrate, bo, bo->ttm.resource);
+ fence = xe_migrate_clear(tile->migrate, bo, bo->ttm.resource,
+ XE_MIGRATE_CLEAR_FLAG_FULL);
if (IS_ERR(fence)) {
KUNIT_FAIL(test, "Failed to submit bo clear.\n");
return PTR_ERR(fence);
}
+
+ if (dma_fence_wait_timeout(fence, false, 5 * HZ) <= 0) {
+ dma_fence_put(fence);
+ KUNIT_FAIL(test, "Timeout while clearing bo.\n");
+ return -ETIME;
+ }
+
dma_fence_put(fence);
}
/* Evict to system. CCS data should be copied. */
- ret = xe_bo_evict(bo, true);
+ ret = xe_bo_evict(bo);
if (ret) {
KUNIT_FAIL(test, "Failed to evict bo.\n");
return ret;
@@ -116,7 +131,7 @@ static void ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile,
int ret;
/* TODO: Sanity check */
- unsigned int bo_flags = XE_BO_CREATE_VRAM_IF_DGFX(tile);
+ unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile);
if (IS_DGFX(xe))
kunit_info(test, "Testing vram id %u\n", tile->id);
@@ -124,7 +139,7 @@ static void ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile,
kunit_info(test, "Testing system memory\n");
bo = xe_bo_create_user(xe, NULL, NULL, SZ_1M, DRM_XE_GEM_CPU_CACHING_WC,
- ttm_bo_type_device, bo_flags);
+ bo_flags);
if (IS_ERR(bo)) {
KUNIT_FAIL(test, "Failed to create bo.\n");
return;
@@ -154,16 +169,22 @@ out_unlock:
static int ccs_test_run_device(struct xe_device *xe)
{
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
struct xe_tile *tile;
int id;
if (!xe_device_has_flat_ccs(xe)) {
- kunit_info(test, "Skipping non-flat-ccs device.\n");
+ kunit_skip(test, "non-flat-ccs device\n");
return 0;
}
- xe_device_mem_access_get(xe);
+ /* For xe2+ dgfx, we don't handle ccs metadata */
+ if (GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe)) {
+ kunit_skip(test, "xe2+ dgfx device\n");
+ return 0;
+ }
+
+ xe_pm_runtime_get(xe);
for_each_tile(tile, xe, id) {
/* For igfx run only for primary tile */
@@ -172,21 +193,22 @@ static int ccs_test_run_device(struct xe_device *xe)
ccs_test_run_tile(xe, tile, test);
}
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
return 0;
}
-void xe_ccs_migrate_kunit(struct kunit *test)
+static void xe_ccs_migrate_kunit(struct kunit *test)
{
- xe_call_for_each_device(ccs_test_run_device);
+ struct xe_device *xe = test->priv;
+
+ ccs_test_run_device(xe);
}
-EXPORT_SYMBOL_IF_KUNIT(xe_ccs_migrate_kunit);
static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struct kunit *test)
{
struct xe_bo *bo, *external;
- unsigned int bo_flags = XE_BO_CREATE_VRAM_IF_DGFX(tile);
+ unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile);
struct xe_vm *vm = xe_migrate_get_vm(xe_device_get_root_tile(xe)->migrate);
struct xe_gt *__gt;
int err, i, id;
@@ -198,7 +220,6 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
xe_vm_lock(vm, false);
bo = xe_bo_create_user(xe, NULL, vm, 0x10000,
DRM_XE_GEM_CPU_CACHING_WC,
- ttm_bo_type_device,
bo_flags);
xe_vm_unlock(vm);
if (IS_ERR(bo)) {
@@ -208,7 +229,7 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
external = xe_bo_create_user(xe, NULL, NULL, 0x10000,
DRM_XE_GEM_CPU_CACHING_WC,
- ttm_bo_type_device, bo_flags);
+ bo_flags);
if (IS_ERR(external)) {
KUNIT_FAIL(test, "external bo create err=%pe\n", external);
goto cleanup_bo;
@@ -231,7 +252,7 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
for_each_gt(__gt, xe, id)
xe_gt_sanitize(__gt);
- err = xe_bo_restore_kernel(xe);
+ err = xe_bo_restore_early(xe);
/*
* Snapshotting the CTB and copying back a potentially old
* version seems risky, depending on what might have been
@@ -243,17 +264,16 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
* however seems quite fragile not to also restart the GT. Try
* to do that here by triggering a GT reset.
*/
- for_each_gt(__gt, xe, id) {
- xe_gt_reset_async(__gt);
- flush_work(&__gt->reset.worker);
- }
+ for_each_gt(__gt, xe, id)
+ xe_gt_reset(__gt);
+
if (err) {
KUNIT_FAIL(test, "restore kernel err=%pe\n",
ERR_PTR(err));
goto cleanup_all;
}
- err = xe_bo_restore_user(xe);
+ err = xe_bo_restore_late(xe);
if (err) {
KUNIT_FAIL(test, "restore user err=%pe\n", ERR_PTR(err));
goto cleanup_all;
@@ -325,28 +345,291 @@ cleanup_bo:
static int evict_test_run_device(struct xe_device *xe)
{
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
struct xe_tile *tile;
int id;
if (!IS_DGFX(xe)) {
- kunit_info(test, "Skipping non-discrete device %s.\n",
- dev_name(xe->drm.dev));
+ kunit_skip(test, "non-discrete device\n");
return 0;
}
- xe_device_mem_access_get(xe);
+ xe_pm_runtime_get(xe);
for_each_tile(tile, xe, id)
evict_test_run_tile(xe, tile, test);
- xe_device_mem_access_put(xe);
+ xe_pm_runtime_put(xe);
+
+ return 0;
+}
+
+static void xe_bo_evict_kunit(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+
+ evict_test_run_device(xe);
+}
+
+struct xe_bo_link {
+ struct list_head link;
+ struct xe_bo *bo;
+ u32 val;
+};
+
+#define XE_BO_SHRINK_SIZE ((unsigned long)SZ_64M)
+
+static int shrink_test_fill_random(struct xe_bo *bo, struct rnd_state *state,
+ struct xe_bo_link *link)
+{
+ struct iosys_map map;
+ int ret = ttm_bo_vmap(&bo->ttm, &map);
+ size_t __maybe_unused i;
+
+ if (ret)
+ return ret;
+
+ for (i = 0; i < bo->ttm.base.size; i += sizeof(u32)) {
+ u32 val = prandom_u32_state(state);
+
+ iosys_map_wr(&map, i, u32, val);
+ if (i == 0)
+ link->val = val;
+ }
+
+ ttm_bo_vunmap(&bo->ttm, &map);
+ return 0;
+}
+
+static bool shrink_test_verify(struct kunit *test, struct xe_bo *bo,
+ unsigned int bo_nr, struct rnd_state *state,
+ struct xe_bo_link *link)
+{
+ struct iosys_map map;
+ int ret = ttm_bo_vmap(&bo->ttm, &map);
+ size_t i;
+ bool failed = false;
+
+ if (ret) {
+ KUNIT_FAIL(test, "Error mapping bo %u for content check.\n", bo_nr);
+ return true;
+ }
+
+ for (i = 0; i < bo->ttm.base.size; i += sizeof(u32)) {
+ u32 val = prandom_u32_state(state);
+
+ if (iosys_map_rd(&map, i, u32) != val) {
+ KUNIT_FAIL(test, "Content not preserved, bo %u offset 0x%016llx",
+ bo_nr, (unsigned long long)i);
+ kunit_info(test, "Failed value is 0x%08x, recorded 0x%08x\n",
+ (unsigned int)iosys_map_rd(&map, i, u32), val);
+ if (i == 0 && val != link->val)
+ kunit_info(test, "Looks like PRNG is out of sync.\n");
+ failed = true;
+ break;
+ }
+ }
+
+ ttm_bo_vunmap(&bo->ttm, &map);
+
+ return failed;
+}
+
+/*
+ * Try to create system bos corresponding to twice the amount
+ * of available system memory to test shrinker functionality.
+ * If no swap space is available to accommodate the
+ * memory overcommit, mark bos purgeable.
+ */
+static int shrink_test_run_device(struct xe_device *xe)
+{
+ struct kunit *test = kunit_get_current_test();
+ LIST_HEAD(bos);
+ struct xe_bo_link *link, *next;
+ struct sysinfo si;
+ u64 ram, ram_and_swap, purgeable = 0, alloced, to_alloc, limit;
+ unsigned int interrupted = 0, successful = 0, count = 0;
+ struct rnd_state prng;
+ u64 rand_seed;
+ bool failed = false;
+
+ rand_seed = get_random_u64();
+ prandom_seed_state(&prng, rand_seed);
+ kunit_info(test, "Random seed is 0x%016llx.\n",
+ (unsigned long long)rand_seed);
+
+ /* Skip if execution time is expected to be too long. */
+
+ limit = SZ_32G;
+ /* IGFX with flat CCS needs to copy when swapping / shrinking */
+ if (!IS_DGFX(xe) && xe_device_has_flat_ccs(xe))
+ limit = SZ_16G;
+
+ si_meminfo(&si);
+ ram = (size_t)si.freeram * si.mem_unit;
+ if (ram > limit) {
+ kunit_skip(test, "Too long expected execution time.\n");
+ return 0;
+ }
+ to_alloc = ram * 2;
+
+ ram_and_swap = ram + get_nr_swap_pages() * PAGE_SIZE;
+ if (to_alloc > ram_and_swap)
+ purgeable = to_alloc - ram_and_swap;
+ purgeable += div64_u64(purgeable, 5);
+
+ kunit_info(test, "Free ram is %lu bytes. Will allocate twice of that.\n",
+ (unsigned long)ram);
+ for (alloced = 0; alloced < to_alloc; alloced += XE_BO_SHRINK_SIZE) {
+ struct xe_bo *bo;
+ unsigned int mem_type;
+ struct xe_ttm_tt *xe_tt;
+
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link) {
+ KUNIT_FAIL(test, "Unexpected link allocation failure\n");
+ failed = true;
+ break;
+ }
+
+ INIT_LIST_HEAD(&link->link);
+
+ /* We can create bos using WC caching here. But it is slower. */
+ bo = xe_bo_create_user(xe, NULL, NULL, XE_BO_SHRINK_SIZE,
+ DRM_XE_GEM_CPU_CACHING_WB,
+ XE_BO_FLAG_SYSTEM);
+ if (IS_ERR(bo)) {
+ if (bo != ERR_PTR(-ENOMEM) && bo != ERR_PTR(-ENOSPC) &&
+ bo != ERR_PTR(-EINTR) && bo != ERR_PTR(-ERESTARTSYS))
+ KUNIT_FAIL(test, "Error creating bo: %pe\n", bo);
+ kfree(link);
+ failed = true;
+ break;
+ }
+ xe_bo_lock(bo, false);
+ xe_tt = container_of(bo->ttm.ttm, typeof(*xe_tt), ttm);
+
+ /*
+ * Allocate purgeable bos first, because if we do it the
+ * other way around, they may not be subject to swapping...
+ */
+ if (alloced < purgeable) {
+ xe_ttm_tt_account_subtract(&xe_tt->ttm);
+ xe_tt->purgeable = true;
+ xe_ttm_tt_account_add(&xe_tt->ttm);
+ bo->ttm.priority = 0;
+ spin_lock(&bo->ttm.bdev->lru_lock);
+ ttm_bo_move_to_lru_tail(&bo->ttm);
+ spin_unlock(&bo->ttm.bdev->lru_lock);
+ } else {
+ int ret = shrink_test_fill_random(bo, &prng, link);
+
+ if (ret) {
+ xe_bo_unlock(bo);
+ xe_bo_put(bo);
+ KUNIT_FAIL(test, "Error filling bo with random data: %pe\n",
+ ERR_PTR(ret));
+ kfree(link);
+ failed = true;
+ break;
+ }
+ }
+
+ mem_type = bo->ttm.resource->mem_type;
+ xe_bo_unlock(bo);
+ link->bo = bo;
+ list_add_tail(&link->link, &bos);
+
+ if (mem_type != XE_PL_TT) {
+ KUNIT_FAIL(test, "Bo in incorrect memory type: %u\n",
+ bo->ttm.resource->mem_type);
+ failed = true;
+ }
+ cond_resched();
+ if (signal_pending(current))
+ break;
+ }
+
+ /*
+ * Read back and destroy bos. Reset the pseudo-random seed to get an
+ * identical pseudo-random number sequence for readback.
+ */
+ prandom_seed_state(&prng, rand_seed);
+ list_for_each_entry_safe(link, next, &bos, link) {
+ static struct ttm_operation_ctx ctx = {.interruptible = true};
+ struct xe_bo *bo = link->bo;
+ struct xe_ttm_tt *xe_tt;
+ int ret;
+
+ count++;
+ if (!signal_pending(current) && !failed) {
+ bool purgeable, intr = false;
+
+ xe_bo_lock(bo, NULL);
+
+ /* xe_tt->purgeable is cleared on validate. */
+ xe_tt = container_of(bo->ttm.ttm, typeof(*xe_tt), ttm);
+ purgeable = xe_tt->purgeable;
+ do {
+ ret = ttm_bo_validate(&bo->ttm, &tt_placement, &ctx);
+ if (ret == -EINTR)
+ intr = true;
+ } while (ret == -EINTR && !signal_pending(current));
+ if (!ret && !purgeable)
+ failed = shrink_test_verify(test, bo, count, &prng, link);
+
+ xe_bo_unlock(bo);
+ if (ret) {
+ KUNIT_FAIL(test, "Validation failed: %pe\n",
+ ERR_PTR(ret));
+ failed = true;
+ } else if (intr) {
+ interrupted++;
+ } else {
+ successful++;
+ }
+ }
+ xe_bo_put(link->bo);
+ list_del(&link->link);
+ kfree(link);
+ }
+ kunit_info(test, "Readbacks interrupted: %u successful: %u\n",
+ interrupted, successful);
return 0;
}
-void xe_bo_evict_kunit(struct kunit *test)
+static void xe_bo_shrink_kunit(struct kunit *test)
{
- xe_call_for_each_device(evict_test_run_device);
+ struct xe_device *xe = test->priv;
+
+ shrink_test_run_device(xe);
}
-EXPORT_SYMBOL_IF_KUNIT(xe_bo_evict_kunit);
+
+static struct kunit_case xe_bo_tests[] = {
+ KUNIT_CASE_PARAM(xe_ccs_migrate_kunit, xe_pci_live_device_gen_param),
+ KUNIT_CASE_PARAM(xe_bo_evict_kunit, xe_pci_live_device_gen_param),
+ {}
+};
+
+VISIBLE_IF_KUNIT
+struct kunit_suite xe_bo_test_suite = {
+ .name = "xe_bo",
+ .test_cases = xe_bo_tests,
+ .init = xe_kunit_helper_xe_device_live_test_init,
+};
+EXPORT_SYMBOL_IF_KUNIT(xe_bo_test_suite);
+
+static struct kunit_case xe_bo_shrink_test[] = {
+ KUNIT_CASE_PARAM_ATTR(xe_bo_shrink_kunit, xe_pci_live_device_gen_param,
+ {.speed = KUNIT_SPEED_SLOW}),
+ {}
+};
+
+VISIBLE_IF_KUNIT
+struct kunit_suite xe_bo_shrink_test_suite = {
+ .name = "xe_bo_shrink",
+ .test_cases = xe_bo_shrink_test,
+ .init = xe_kunit_helper_xe_device_live_test_init,
+};
+EXPORT_SYMBOL_IF_KUNIT(xe_bo_shrink_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_bo_test.c b/drivers/gpu/drm/xe/tests/xe_bo_test.c
deleted file mode 100644
index f408f17f2164..000000000000
--- a/drivers/gpu/drm/xe/tests/xe_bo_test.c
+++ /dev/null
@@ -1,26 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright © 2022 Intel Corporation
- */
-
-#include "xe_bo_test.h"
-
-#include <kunit/test.h>
-
-static struct kunit_case xe_bo_tests[] = {
- KUNIT_CASE(xe_ccs_migrate_kunit),
- KUNIT_CASE(xe_bo_evict_kunit),
- {}
-};
-
-static struct kunit_suite xe_bo_test_suite = {
- .name = "xe_bo",
- .test_cases = xe_bo_tests,
-};
-
-kunit_test_suite(xe_bo_test_suite);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("xe_bo kunit test");
-MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
diff --git a/drivers/gpu/drm/xe/tests/xe_bo_test.h b/drivers/gpu/drm/xe/tests/xe_bo_test.h
deleted file mode 100644
index 0113ab45066a..000000000000
--- a/drivers/gpu/drm/xe/tests/xe_bo_test.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 AND MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef _XE_BO_TEST_H_
-#define _XE_BO_TEST_H_
-
-struct kunit;
-
-void xe_ccs_migrate_kunit(struct kunit *test);
-void xe_bo_evict_kunit(struct kunit *test);
-
-#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf.c b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
index 9f6d571d7fa9..c53f67ce4b0a 100644
--- a/drivers/gpu/drm/xe/tests/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
@@ -3,15 +3,16 @@
* Copyright © 2022 Intel Corporation
*/
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include <kunit/test.h>
#include <kunit/visibility.h>
-#include "tests/xe_dma_buf_test.h"
+#include "tests/xe_kunit_helpers.h"
#include "tests/xe_pci_test.h"
#include "xe_pci.h"
+#include "xe_pm.h"
static bool p2p_enabled(struct dma_buf_test_params *params)
{
@@ -36,14 +37,14 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
xe_bo_assert_held(imported);
mem_type = XE_PL_VRAM0;
- if (!(params->mem_mask & XE_BO_CREATE_VRAM0_BIT))
+ if (!(params->mem_mask & XE_BO_FLAG_VRAM0))
/* No VRAM allowed */
mem_type = XE_PL_TT;
else if (params->force_different_devices && !p2p_enabled(params))
/* No P2P */
mem_type = XE_PL_TT;
else if (params->force_different_devices && !is_dynamic(params) &&
- (params->mem_mask & XE_BO_CREATE_SYSTEM_BIT))
+ (params->mem_mask & XE_BO_FLAG_SYSTEM))
/* Pin migrated to TT */
mem_type = XE_PL_TT;
@@ -64,7 +65,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
* the exporter and the importer should be the same bo.
*/
swap(exported->ttm.base.dma_buf, dmabuf);
- ret = xe_bo_evict(exported, true);
+ ret = xe_bo_evict(exported);
swap(exported->ttm.base.dma_buf, dmabuf);
if (ret) {
if (ret != -EINTR && ret != -ERESTARTSYS)
@@ -93,7 +94,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
* possible, saving a migration step as the transfer is just
* likely as fast from system memory.
*/
- if (params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)
+ if (params->mem_mask & XE_BO_FLAG_SYSTEM)
KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, XE_PL_TT));
else
KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type));
@@ -106,7 +107,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
{
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
struct dma_buf_test_params *params = to_dma_buf_test_params(test->priv);
struct drm_gem_object *import;
struct dma_buf *dmabuf;
@@ -115,17 +116,17 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
/* No VRAM on this device? */
if (!ttm_manager_type(&xe->ttm, XE_PL_VRAM0) &&
- (params->mem_mask & XE_BO_CREATE_VRAM0_BIT))
+ (params->mem_mask & XE_BO_FLAG_VRAM0))
return;
size = PAGE_SIZE;
- if ((params->mem_mask & XE_BO_CREATE_VRAM0_BIT) &&
+ if ((params->mem_mask & XE_BO_FLAG_VRAM0) &&
xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
size = SZ_64K;
kunit_info(test, "running %s\n", __func__);
bo = xe_bo_create_user(xe, NULL, NULL, size, DRM_XE_GEM_CPU_CACHING_WC,
- ttm_bo_type_device, XE_BO_CREATE_USER_BIT | params->mem_mask);
+ params->mem_mask);
if (IS_ERR(bo)) {
KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
PTR_ERR(bo));
@@ -148,7 +149,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
*/
if (params->force_different_devices &&
!p2p_enabled(params) &&
- !(params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)) {
+ !(params->mem_mask & XE_BO_FLAG_SYSTEM)) {
KUNIT_FAIL(test,
"xe_gem_prime_import() succeeded when it shouldn't have\n");
} else {
@@ -161,7 +162,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
/* Pinning in VRAM is not allowed. */
if (!is_dynamic(params) &&
params->force_different_devices &&
- !(params->mem_mask & XE_BO_CREATE_SYSTEM_BIT))
+ !(params->mem_mask & XE_BO_FLAG_SYSTEM))
KUNIT_EXPECT_EQ(test, err, -EINVAL);
/* Otherwise only expect interrupts or success. */
else if (err && err != -EINTR && err != -ERESTARTSYS)
@@ -180,7 +181,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
PTR_ERR(import));
} else if (!params->force_different_devices ||
p2p_enabled(params) ||
- (params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)) {
+ (params->mem_mask & XE_BO_FLAG_SYSTEM)) {
/* Shouldn't fail if we can reuse same bo, use p2p or use system */
KUNIT_FAIL(test, "dynamic p2p attachment failed with err=%ld\n",
PTR_ERR(import));
@@ -203,52 +204,52 @@ static const struct dma_buf_attach_ops nop2p_attach_ops = {
* gem object.
*/
static const struct dma_buf_test_params test_params[] = {
- {.mem_mask = XE_BO_CREATE_VRAM0_BIT,
+ {.mem_mask = XE_BO_FLAG_VRAM0,
.attach_ops = &xe_dma_buf_attach_ops},
- {.mem_mask = XE_BO_CREATE_VRAM0_BIT,
+ {.mem_mask = XE_BO_FLAG_VRAM0,
.attach_ops = &xe_dma_buf_attach_ops,
.force_different_devices = true},
- {.mem_mask = XE_BO_CREATE_VRAM0_BIT,
+ {.mem_mask = XE_BO_FLAG_VRAM0,
.attach_ops = &nop2p_attach_ops},
- {.mem_mask = XE_BO_CREATE_VRAM0_BIT,
+ {.mem_mask = XE_BO_FLAG_VRAM0,
.attach_ops = &nop2p_attach_ops,
.force_different_devices = true},
- {.mem_mask = XE_BO_CREATE_VRAM0_BIT},
- {.mem_mask = XE_BO_CREATE_VRAM0_BIT,
+ {.mem_mask = XE_BO_FLAG_VRAM0},
+ {.mem_mask = XE_BO_FLAG_VRAM0,
.force_different_devices = true},
- {.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
+ {.mem_mask = XE_BO_FLAG_SYSTEM,
.attach_ops = &xe_dma_buf_attach_ops},
- {.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
+ {.mem_mask = XE_BO_FLAG_SYSTEM,
.attach_ops = &xe_dma_buf_attach_ops,
.force_different_devices = true},
- {.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
+ {.mem_mask = XE_BO_FLAG_SYSTEM,
.attach_ops = &nop2p_attach_ops},
- {.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
+ {.mem_mask = XE_BO_FLAG_SYSTEM,
.attach_ops = &nop2p_attach_ops,
.force_different_devices = true},
- {.mem_mask = XE_BO_CREATE_SYSTEM_BIT},
- {.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
+ {.mem_mask = XE_BO_FLAG_SYSTEM},
+ {.mem_mask = XE_BO_FLAG_SYSTEM,
.force_different_devices = true},
- {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
+ {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
.attach_ops = &xe_dma_buf_attach_ops},
- {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
+ {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
.attach_ops = &xe_dma_buf_attach_ops,
.force_different_devices = true},
- {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
+ {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
.attach_ops = &nop2p_attach_ops},
- {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
+ {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
.attach_ops = &nop2p_attach_ops,
.force_different_devices = true},
- {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT},
- {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
+ {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0},
+ {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
.force_different_devices = true},
{}
@@ -257,8 +258,9 @@ static const struct dma_buf_test_params test_params[] = {
static int dma_buf_run_device(struct xe_device *xe)
{
const struct dma_buf_test_params *params;
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
+ xe_pm_runtime_get(xe);
for (params = test_params; params->mem_mask; ++params) {
struct dma_buf_test_params p = *params;
@@ -266,13 +268,28 @@ static int dma_buf_run_device(struct xe_device *xe)
test->priv = &p;
xe_test_dmabuf_import_same_driver(xe);
}
+ xe_pm_runtime_put(xe);
/* A non-zero return would halt iteration over driver devices */
return 0;
}
-void xe_dma_buf_kunit(struct kunit *test)
+static void xe_dma_buf_kunit(struct kunit *test)
{
- xe_call_for_each_device(dma_buf_run_device);
+ struct xe_device *xe = test->priv;
+
+ dma_buf_run_device(xe);
}
-EXPORT_SYMBOL_IF_KUNIT(xe_dma_buf_kunit);
+
+static struct kunit_case xe_dma_buf_tests[] = {
+ KUNIT_CASE_PARAM(xe_dma_buf_kunit, xe_pci_live_device_gen_param),
+ {}
+};
+
+VISIBLE_IF_KUNIT
+struct kunit_suite xe_dma_buf_test_suite = {
+ .name = "xe_dma_buf",
+ .test_cases = xe_dma_buf_tests,
+ .init = xe_kunit_helper_xe_device_live_test_init,
+};
+EXPORT_SYMBOL_IF_KUNIT(xe_dma_buf_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf_test.c b/drivers/gpu/drm/xe/tests/xe_dma_buf_test.c
deleted file mode 100644
index 9f5a9cda8c0f..000000000000
--- a/drivers/gpu/drm/xe/tests/xe_dma_buf_test.c
+++ /dev/null
@@ -1,25 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright © 2022 Intel Corporation
- */
-
-#include "xe_dma_buf_test.h"
-
-#include <kunit/test.h>
-
-static struct kunit_case xe_dma_buf_tests[] = {
- KUNIT_CASE(xe_dma_buf_kunit),
- {}
-};
-
-static struct kunit_suite xe_dma_buf_test_suite = {
- .name = "xe_dma_buf",
- .test_cases = xe_dma_buf_tests,
-};
-
-kunit_test_suite(xe_dma_buf_test_suite);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("xe_dma_buf kunit test");
-MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf_test.h b/drivers/gpu/drm/xe/tests/xe_dma_buf_test.h
deleted file mode 100644
index e6b464ddd526..000000000000
--- a/drivers/gpu/drm/xe/tests/xe_dma_buf_test.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 AND MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef _XE_DMA_BUF_TEST_H_
-#define _XE_DMA_BUF_TEST_H_
-
-struct kunit;
-
-void xe_dma_buf_kunit(struct kunit *test);
-
-#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_service_test.c b/drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_service_test.c
new file mode 100644
index 000000000000..b683585db852
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_service_test.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include <kunit/test.h>
+
+#include "xe_device.h"
+#include "xe_kunit_helpers.h"
+#include "xe_pci_test.h"
+
+static int pf_service_test_init(struct kunit *test)
+{
+ struct xe_pci_fake_data fake = {
+ .sriov_mode = XE_SRIOV_MODE_PF,
+ .platform = XE_TIGERLAKE, /* some random platform */
+ .subplatform = XE_SUBPLATFORM_NONE,
+ };
+ struct xe_device *xe;
+ struct xe_gt *gt;
+
+ test->priv = &fake;
+ xe_kunit_helper_xe_device_test_init(test);
+
+ xe = test->priv;
+ KUNIT_ASSERT_EQ(test, xe_sriov_init(xe), 0);
+
+ gt = xe_device_get_gt(xe, 0);
+ pf_init_versions(gt);
+
+ /*
+ * sanity check:
+ * - all supported platforms VF/PF ABI versions must be defined
+ * - base version can't be newer than latest
+ */
+ KUNIT_ASSERT_NE(test, 0, gt->sriov.pf.service.version.base.major);
+ KUNIT_ASSERT_NE(test, 0, gt->sriov.pf.service.version.latest.major);
+ KUNIT_ASSERT_LE(test, gt->sriov.pf.service.version.base.major,
+ gt->sriov.pf.service.version.latest.major);
+ if (gt->sriov.pf.service.version.base.major == gt->sriov.pf.service.version.latest.major)
+ KUNIT_ASSERT_LE(test, gt->sriov.pf.service.version.base.minor,
+ gt->sriov.pf.service.version.latest.minor);
+
+ test->priv = gt;
+ return 0;
+}
+
+static void pf_negotiate_any(struct kunit *test)
+{
+ struct xe_gt *gt = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(gt, VF2PF_HANDSHAKE_MAJOR_ANY,
+ VF2PF_HANDSHAKE_MINOR_ANY,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.latest.major);
+ KUNIT_ASSERT_EQ(test, minor, gt->sriov.pf.service.version.latest.minor);
+}
+
+static void pf_negotiate_base_match(struct kunit *test)
+{
+ struct xe_gt *gt = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(gt,
+ gt->sriov.pf.service.version.base.major,
+ gt->sriov.pf.service.version.base.minor,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.base.major);
+ KUNIT_ASSERT_EQ(test, minor, gt->sriov.pf.service.version.base.minor);
+}
+
+static void pf_negotiate_base_newer(struct kunit *test)
+{
+ struct xe_gt *gt = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(gt,
+ gt->sriov.pf.service.version.base.major,
+ gt->sriov.pf.service.version.base.minor + 1,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.base.major);
+ KUNIT_ASSERT_GE(test, minor, gt->sriov.pf.service.version.base.minor);
+ if (gt->sriov.pf.service.version.base.major == gt->sriov.pf.service.version.latest.major)
+ KUNIT_ASSERT_LE(test, minor, gt->sriov.pf.service.version.latest.minor);
+ else
+ KUNIT_FAIL(test, "FIXME: don't know how to test multi-version yet!\n");
+}
+
+static void pf_negotiate_base_next(struct kunit *test)
+{
+ struct xe_gt *gt = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(gt,
+ gt->sriov.pf.service.version.base.major + 1, 0,
+ &major, &minor));
+ KUNIT_ASSERT_GE(test, major, gt->sriov.pf.service.version.base.major);
+ KUNIT_ASSERT_LE(test, major, gt->sriov.pf.service.version.latest.major);
+ if (major == gt->sriov.pf.service.version.latest.major)
+ KUNIT_ASSERT_LE(test, minor, gt->sriov.pf.service.version.latest.minor);
+ else
+ KUNIT_FAIL(test, "FIXME: don't know how to test multi-version yet!\n");
+}
+
+static void pf_negotiate_base_older(struct kunit *test)
+{
+ struct xe_gt *gt = test->priv;
+ u32 major, minor;
+
+ if (!gt->sriov.pf.service.version.base.minor)
+ kunit_skip(test, "no older minor\n");
+
+ KUNIT_ASSERT_NE(test, 0,
+ pf_negotiate_version(gt,
+ gt->sriov.pf.service.version.base.major,
+ gt->sriov.pf.service.version.base.minor - 1,
+ &major, &minor));
+}
+
+static void pf_negotiate_base_prev(struct kunit *test)
+{
+ struct xe_gt *gt = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_NE(test, 0,
+ pf_negotiate_version(gt,
+ gt->sriov.pf.service.version.base.major - 1, 1,
+ &major, &minor));
+}
+
+static void pf_negotiate_latest_match(struct kunit *test)
+{
+ struct xe_gt *gt = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(gt,
+ gt->sriov.pf.service.version.latest.major,
+ gt->sriov.pf.service.version.latest.minor,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.latest.major);
+ KUNIT_ASSERT_EQ(test, minor, gt->sriov.pf.service.version.latest.minor);
+}
+
+static void pf_negotiate_latest_newer(struct kunit *test)
+{
+ struct xe_gt *gt = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(gt,
+ gt->sriov.pf.service.version.latest.major,
+ gt->sriov.pf.service.version.latest.minor + 1,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.latest.major);
+ KUNIT_ASSERT_EQ(test, minor, gt->sriov.pf.service.version.latest.minor);
+}
+
+static void pf_negotiate_latest_next(struct kunit *test)
+{
+ struct xe_gt *gt = test->priv;
+ u32 major, minor;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(gt,
+ gt->sriov.pf.service.version.latest.major + 1, 0,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.latest.major);
+ KUNIT_ASSERT_EQ(test, minor, gt->sriov.pf.service.version.latest.minor);
+}
+
+static void pf_negotiate_latest_older(struct kunit *test)
+{
+ struct xe_gt *gt = test->priv;
+ u32 major, minor;
+
+ if (!gt->sriov.pf.service.version.latest.minor)
+ kunit_skip(test, "no older minor\n");
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(gt,
+ gt->sriov.pf.service.version.latest.major,
+ gt->sriov.pf.service.version.latest.minor - 1,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.latest.major);
+ KUNIT_ASSERT_EQ(test, minor, gt->sriov.pf.service.version.latest.minor - 1);
+}
+
+static void pf_negotiate_latest_prev(struct kunit *test)
+{
+ struct xe_gt *gt = test->priv;
+ u32 major, minor;
+
+ if (gt->sriov.pf.service.version.base.major == gt->sriov.pf.service.version.latest.major)
+ kunit_skip(test, "no prev major");
+
+ KUNIT_ASSERT_EQ(test, 0,
+ pf_negotiate_version(gt,
+ gt->sriov.pf.service.version.latest.major - 1,
+ gt->sriov.pf.service.version.base.minor + 1,
+ &major, &minor));
+ KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.latest.major - 1);
+ KUNIT_ASSERT_GE(test, major, gt->sriov.pf.service.version.base.major);
+}
+
+static struct kunit_case pf_service_test_cases[] = {
+ KUNIT_CASE(pf_negotiate_any),
+ KUNIT_CASE(pf_negotiate_base_match),
+ KUNIT_CASE(pf_negotiate_base_newer),
+ KUNIT_CASE(pf_negotiate_base_next),
+ KUNIT_CASE(pf_negotiate_base_older),
+ KUNIT_CASE(pf_negotiate_base_prev),
+ KUNIT_CASE(pf_negotiate_latest_match),
+ KUNIT_CASE(pf_negotiate_latest_newer),
+ KUNIT_CASE(pf_negotiate_latest_next),
+ KUNIT_CASE(pf_negotiate_latest_older),
+ KUNIT_CASE(pf_negotiate_latest_prev),
+ {}
+};
+
+static struct kunit_suite pf_service_suite = {
+ .name = "pf_service",
+ .test_cases = pf_service_test_cases,
+ .init = pf_service_test_init,
+};
+
+kunit_test_suite(pf_service_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c b/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c
new file mode 100644
index 000000000000..6faffcd74869
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include <kunit/static_stub.h>
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+
+#include "xe_device.h"
+#include "xe_ggtt.h"
+#include "xe_guc_ct.h"
+#include "xe_kunit_helpers.h"
+#include "xe_pci_test.h"
+
+#define DUT_GGTT_START SZ_1M
+#define DUT_GGTT_SIZE SZ_2M
+
+static struct xe_bo *replacement_xe_managed_bo_create_pin_map(struct xe_device *xe,
+ struct xe_tile *tile,
+ size_t size, u32 flags)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct xe_bo *bo;
+ void *buf;
+
+ bo = drmm_kzalloc(&xe->drm, sizeof(*bo), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bo);
+
+ buf = drmm_kzalloc(&xe->drm, size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+
+ bo->tile = tile;
+ bo->ttm.bdev = &xe->ttm;
+ bo->size = size;
+ iosys_map_set_vaddr(&bo->vmap, buf);
+
+ if (flags & XE_BO_FLAG_GGTT) {
+ struct xe_ggtt *ggtt = tile->mem.ggtt;
+
+ bo->ggtt_node[tile->id] = xe_ggtt_node_init(ggtt);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bo->ggtt_node[tile->id]);
+
+ KUNIT_ASSERT_EQ(test, 0,
+ drm_mm_insert_node_in_range(&ggtt->mm,
+ &bo->ggtt_node[tile->id]->base,
+ bo->size, SZ_4K,
+ 0, 0, U64_MAX, 0));
+ }
+
+ return bo;
+}
+
+static int guc_buf_test_init(struct kunit *test)
+{
+ struct xe_pci_fake_data fake = {
+ .sriov_mode = XE_SRIOV_MODE_PF,
+ .platform = XE_TIGERLAKE, /* some random platform */
+ .subplatform = XE_SUBPLATFORM_NONE,
+ };
+ struct xe_ggtt *ggtt;
+ struct xe_guc *guc;
+
+ test->priv = &fake;
+ xe_kunit_helper_xe_device_test_init(test);
+
+ ggtt = xe_device_get_root_tile(test->priv)->mem.ggtt;
+ guc = &xe_device_get_gt(test->priv, 0)->uc.guc;
+
+ drm_mm_init(&ggtt->mm, DUT_GGTT_START, DUT_GGTT_SIZE);
+ mutex_init(&ggtt->lock);
+
+ kunit_activate_static_stub(test, xe_managed_bo_create_pin_map,
+ replacement_xe_managed_bo_create_pin_map);
+
+ KUNIT_ASSERT_EQ(test, 0, xe_guc_buf_cache_init(&guc->buf));
+
+ test->priv = &guc->buf;
+ return 0;
+}
+
+static void test_smallest(struct kunit *test)
+{
+ struct xe_guc_buf_cache *cache = test->priv;
+ struct xe_guc_buf buf;
+
+ buf = xe_guc_buf_reserve(cache, 1);
+ KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
+ KUNIT_EXPECT_NOT_NULL(test, xe_guc_buf_cpu_ptr(buf));
+ KUNIT_EXPECT_NE(test, 0, xe_guc_buf_gpu_addr(buf));
+ KUNIT_EXPECT_LE(test, DUT_GGTT_START, xe_guc_buf_gpu_addr(buf));
+ KUNIT_EXPECT_GT(test, DUT_GGTT_START + DUT_GGTT_SIZE, xe_guc_buf_gpu_addr(buf));
+ xe_guc_buf_release(buf);
+}
+
+static void test_largest(struct kunit *test)
+{
+ struct xe_guc_buf_cache *cache = test->priv;
+ struct xe_guc_buf buf;
+
+ buf = xe_guc_buf_reserve(cache, xe_guc_buf_cache_dwords(cache));
+ KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
+ KUNIT_EXPECT_NOT_NULL(test, xe_guc_buf_cpu_ptr(buf));
+ KUNIT_EXPECT_NE(test, 0, xe_guc_buf_gpu_addr(buf));
+ KUNIT_EXPECT_LE(test, DUT_GGTT_START, xe_guc_buf_gpu_addr(buf));
+ KUNIT_EXPECT_GT(test, DUT_GGTT_START + DUT_GGTT_SIZE, xe_guc_buf_gpu_addr(buf));
+ xe_guc_buf_release(buf);
+}
+
+static void test_granular(struct kunit *test)
+{
+ struct xe_guc_buf_cache *cache = test->priv;
+ struct xe_guc_buf *bufs;
+ int n, dwords;
+
+ dwords = xe_guc_buf_cache_dwords(cache);
+ bufs = kunit_kcalloc(test, dwords, sizeof(*bufs), GFP_KERNEL);
+ KUNIT_EXPECT_NOT_NULL(test, bufs);
+
+ for (n = 0; n < dwords; n++)
+ bufs[n] = xe_guc_buf_reserve(cache, 1);
+
+ for (n = 0; n < dwords; n++)
+ KUNIT_EXPECT_TRUE_MSG(test, xe_guc_buf_is_valid(bufs[n]), "n=%d", n);
+
+ for (n = 0; n < dwords; n++)
+ xe_guc_buf_release(bufs[n]);
+}
+
+static void test_unique(struct kunit *test)
+{
+ struct xe_guc_buf_cache *cache = test->priv;
+ struct xe_guc_buf *bufs;
+ int n, m, dwords;
+
+ dwords = xe_guc_buf_cache_dwords(cache);
+ bufs = kunit_kcalloc(test, dwords, sizeof(*bufs), GFP_KERNEL);
+ KUNIT_EXPECT_NOT_NULL(test, bufs);
+
+ for (n = 0; n < dwords; n++)
+ bufs[n] = xe_guc_buf_reserve(cache, 1);
+
+ for (n = 0; n < dwords; n++) {
+ for (m = n + 1; m < dwords; m++) {
+ KUNIT_EXPECT_PTR_NE_MSG(test, xe_guc_buf_cpu_ptr(bufs[n]),
+ xe_guc_buf_cpu_ptr(bufs[m]), "n=%d, m=%d", n, m);
+ KUNIT_ASSERT_NE_MSG(test, xe_guc_buf_gpu_addr(bufs[n]),
+ xe_guc_buf_gpu_addr(bufs[m]), "n=%d, m=%d", n, m);
+ }
+ }
+
+ for (n = 0; n < dwords; n++)
+ xe_guc_buf_release(bufs[n]);
+}
+
+static void test_overlap(struct kunit *test)
+{
+ struct xe_guc_buf_cache *cache = test->priv;
+ struct xe_guc_buf b1, b2;
+ u32 dwords = xe_guc_buf_cache_dwords(cache) / 2;
+ u32 bytes = dwords * sizeof(u32);
+ void *p1, *p2;
+ u64 a1, a2;
+
+ b1 = xe_guc_buf_reserve(cache, dwords);
+ b2 = xe_guc_buf_reserve(cache, dwords);
+
+ p1 = xe_guc_buf_cpu_ptr(b1);
+ p2 = xe_guc_buf_cpu_ptr(b2);
+
+ a1 = xe_guc_buf_gpu_addr(b1);
+ a2 = xe_guc_buf_gpu_addr(b2);
+
+ KUNIT_EXPECT_PTR_NE(test, p1, p2);
+ if (p1 < p2)
+ KUNIT_EXPECT_LT(test, (uintptr_t)(p1 + bytes - 1), (uintptr_t)p2);
+ else
+ KUNIT_EXPECT_LT(test, (uintptr_t)(p2 + bytes - 1), (uintptr_t)p1);
+
+ KUNIT_EXPECT_NE(test, a1, a2);
+ if (a1 < a2)
+ KUNIT_EXPECT_LT(test, a1 + bytes - 1, a2);
+ else
+ KUNIT_EXPECT_LT(test, a2 + bytes - 1, a1);
+
+ xe_guc_buf_release(b1);
+ xe_guc_buf_release(b2);
+}
+
+static void test_reusable(struct kunit *test)
+{
+ struct xe_guc_buf_cache *cache = test->priv;
+ struct xe_guc_buf b1, b2;
+ void *p1;
+ u64 a1;
+
+ b1 = xe_guc_buf_reserve(cache, xe_guc_buf_cache_dwords(cache));
+ KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(b1));
+ KUNIT_EXPECT_NOT_NULL(test, p1 = xe_guc_buf_cpu_ptr(b1));
+ KUNIT_EXPECT_NE(test, 0, a1 = xe_guc_buf_gpu_addr(b1));
+ xe_guc_buf_release(b1);
+
+ b2 = xe_guc_buf_reserve(cache, xe_guc_buf_cache_dwords(cache));
+ KUNIT_EXPECT_PTR_EQ(test, p1, xe_guc_buf_cpu_ptr(b2));
+ KUNIT_EXPECT_EQ(test, a1, xe_guc_buf_gpu_addr(b2));
+ xe_guc_buf_release(b2);
+}
+
+static void test_too_big(struct kunit *test)
+{
+ struct xe_guc_buf_cache *cache = test->priv;
+ struct xe_guc_buf buf;
+
+ buf = xe_guc_buf_reserve(cache, xe_guc_buf_cache_dwords(cache) + 1);
+ KUNIT_EXPECT_FALSE(test, xe_guc_buf_is_valid(buf));
+ xe_guc_buf_release(buf); /* shouldn't crash */
+}
+
+static void test_flush(struct kunit *test)
+{
+ struct xe_guc_buf_cache *cache = test->priv;
+ struct xe_guc_buf buf;
+ const u32 dwords = xe_guc_buf_cache_dwords(cache);
+ const u32 bytes = dwords * sizeof(u32);
+ u32 *s, *p, *d;
+ int n;
+
+ KUNIT_ASSERT_NOT_NULL(test, s = kunit_kcalloc(test, dwords, sizeof(u32), GFP_KERNEL));
+ KUNIT_ASSERT_NOT_NULL(test, d = kunit_kcalloc(test, dwords, sizeof(u32), GFP_KERNEL));
+
+ for (n = 0; n < dwords; n++)
+ s[n] = n;
+
+ buf = xe_guc_buf_reserve(cache, dwords);
+ KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
+ KUNIT_ASSERT_NOT_NULL(test, p = xe_guc_buf_cpu_ptr(buf));
+ KUNIT_EXPECT_PTR_NE(test, p, s);
+ KUNIT_EXPECT_PTR_NE(test, p, d);
+
+ memcpy(p, s, bytes);
+ KUNIT_EXPECT_NE(test, 0, xe_guc_buf_flush(buf));
+
+ iosys_map_memcpy_from(d, &cache->sam->bo->vmap, 0, bytes);
+ KUNIT_EXPECT_MEMEQ(test, s, d, bytes);
+
+ xe_guc_buf_release(buf);
+}
+
+static void test_lookup(struct kunit *test)
+{
+ struct xe_guc_buf_cache *cache = test->priv;
+ struct xe_guc_buf buf;
+ u32 dwords;
+ u64 addr;
+ u32 *p;
+ int n;
+
+ dwords = xe_guc_buf_cache_dwords(cache);
+ buf = xe_guc_buf_reserve(cache, dwords);
+ KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
+ KUNIT_ASSERT_NOT_NULL(test, p = xe_guc_buf_cpu_ptr(buf));
+ KUNIT_ASSERT_NE(test, 0, addr = xe_guc_buf_gpu_addr(buf));
+
+ KUNIT_EXPECT_EQ(test, 0, xe_guc_cache_gpu_addr_from_ptr(cache, p - 1, sizeof(u32)));
+ KUNIT_EXPECT_EQ(test, 0, xe_guc_cache_gpu_addr_from_ptr(cache, p + dwords, sizeof(u32)));
+
+ for (n = 0; n < dwords; n++)
+ KUNIT_EXPECT_EQ_MSG(test, xe_guc_cache_gpu_addr_from_ptr(cache, p + n, sizeof(u32)),
+ addr + n * sizeof(u32), "n=%d", n);
+
+ xe_guc_buf_release(buf);
+}
+
+static void test_data(struct kunit *test)
+{
+ static const u32 data[] = { 1, 2, 3, 4, 5, 6 };
+ struct xe_guc_buf_cache *cache = test->priv;
+ struct xe_guc_buf buf;
+ void *p;
+
+ buf = xe_guc_buf_from_data(cache, data, sizeof(data));
+ KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
+ KUNIT_ASSERT_NOT_NULL(test, p = xe_guc_buf_cpu_ptr(buf));
+ KUNIT_EXPECT_MEMEQ(test, p, data, sizeof(data));
+
+ xe_guc_buf_release(buf);
+}
+
+static void test_class(struct kunit *test)
+{
+ struct xe_guc_buf_cache *cache = test->priv;
+ u32 dwords = xe_guc_buf_cache_dwords(cache);
+
+ {
+ CLASS(xe_guc_buf, buf)(cache, dwords);
+ KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
+ KUNIT_EXPECT_NOT_NULL(test, xe_guc_buf_cpu_ptr(buf));
+ KUNIT_EXPECT_NE(test, 0, xe_guc_buf_gpu_addr(buf));
+ KUNIT_EXPECT_LE(test, DUT_GGTT_START, xe_guc_buf_gpu_addr(buf));
+ KUNIT_EXPECT_GT(test, DUT_GGTT_START + DUT_GGTT_SIZE, xe_guc_buf_gpu_addr(buf));
+ }
+
+ {
+ CLASS(xe_guc_buf, buf)(cache, dwords);
+ KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
+ KUNIT_EXPECT_NOT_NULL(test, xe_guc_buf_cpu_ptr(buf));
+ KUNIT_EXPECT_NE(test, 0, xe_guc_buf_gpu_addr(buf));
+ KUNIT_EXPECT_LE(test, DUT_GGTT_START, xe_guc_buf_gpu_addr(buf));
+ KUNIT_EXPECT_GT(test, DUT_GGTT_START + DUT_GGTT_SIZE, xe_guc_buf_gpu_addr(buf));
+ }
+}
+
+static struct kunit_case guc_buf_test_cases[] = {
+ KUNIT_CASE(test_smallest),
+ KUNIT_CASE(test_largest),
+ KUNIT_CASE(test_granular),
+ KUNIT_CASE(test_unique),
+ KUNIT_CASE(test_overlap),
+ KUNIT_CASE(test_reusable),
+ KUNIT_CASE(test_too_big),
+ KUNIT_CASE(test_flush),
+ KUNIT_CASE(test_lookup),
+ KUNIT_CASE(test_data),
+ KUNIT_CASE(test_class),
+ {}
+};
+
+static struct kunit_suite guc_buf_suite = {
+ .name = "guc_buf",
+ .test_cases = guc_buf_test_cases,
+ .init = guc_buf_test_init,
+};
+
+kunit_test_suites(&guc_buf_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_guc_id_mgr_test.c b/drivers/gpu/drm/xe/tests/xe_guc_id_mgr_test.c
new file mode 100644
index 000000000000..ee30a1939eb0
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_guc_id_mgr_test.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include <kunit/test.h>
+
+#include "xe_device.h"
+#include "xe_kunit_helpers.h"
+
+static int guc_id_mgr_test_init(struct kunit *test)
+{
+ struct xe_guc_id_mgr *idm;
+
+ xe_kunit_helper_xe_device_test_init(test);
+ idm = &xe_device_get_gt(test->priv, 0)->uc.guc.submission_state.idm;
+
+ mutex_init(idm_mutex(idm));
+ test->priv = idm;
+ return 0;
+}
+
+static void bad_init(struct kunit *test)
+{
+ struct xe_guc_id_mgr *idm = test->priv;
+
+ KUNIT_EXPECT_EQ(test, -EINVAL, xe_guc_id_mgr_init(idm, 0));
+ KUNIT_EXPECT_EQ(test, -ERANGE, xe_guc_id_mgr_init(idm, GUC_ID_MAX + 1));
+}
+
+static void no_init(struct kunit *test)
+{
+ struct xe_guc_id_mgr *idm = test->priv;
+
+ mutex_lock(idm_mutex(idm));
+ KUNIT_EXPECT_EQ(test, -ENODATA, xe_guc_id_mgr_reserve_locked(idm, 0));
+ mutex_unlock(idm_mutex(idm));
+
+ KUNIT_EXPECT_EQ(test, -ENODATA, xe_guc_id_mgr_reserve(idm, 1, 1));
+}
+
+static void init_fini(struct kunit *test)
+{
+ struct xe_guc_id_mgr *idm = test->priv;
+
+ KUNIT_ASSERT_EQ(test, 0, xe_guc_id_mgr_init(idm, -1));
+ KUNIT_EXPECT_NOT_NULL(test, idm->bitmap);
+ KUNIT_EXPECT_EQ(test, idm->total, GUC_ID_MAX);
+ __fini_idm(NULL, idm);
+ KUNIT_EXPECT_NULL(test, idm->bitmap);
+ KUNIT_EXPECT_EQ(test, idm->total, 0);
+}
+
+static void check_used(struct kunit *test)
+{
+ struct xe_guc_id_mgr *idm = test->priv;
+ unsigned int n;
+
+ KUNIT_ASSERT_EQ(test, 0, xe_guc_id_mgr_init(idm, 2));
+
+ mutex_lock(idm_mutex(idm));
+
+ for (n = 0; n < idm->total; n++) {
+ kunit_info(test, "n=%u", n);
+ KUNIT_EXPECT_EQ(test, idm->used, n);
+ KUNIT_EXPECT_GE(test, idm_reserve_chunk_locked(idm, 1, 0), 0);
+ KUNIT_EXPECT_EQ(test, idm->used, n + 1);
+ }
+ KUNIT_EXPECT_EQ(test, idm->used, idm->total);
+ idm_release_chunk_locked(idm, 0, idm->used);
+ KUNIT_EXPECT_EQ(test, idm->used, 0);
+
+ mutex_unlock(idm_mutex(idm));
+}
+
+static void check_quota(struct kunit *test)
+{
+ struct xe_guc_id_mgr *idm = test->priv;
+ unsigned int n;
+
+ KUNIT_ASSERT_EQ(test, 0, xe_guc_id_mgr_init(idm, 2));
+
+ mutex_lock(idm_mutex(idm));
+
+ for (n = 0; n < idm->total - 1; n++) {
+ kunit_info(test, "n=%u", n);
+ KUNIT_EXPECT_EQ(test, idm_reserve_chunk_locked(idm, 1, idm->total), -EDQUOT);
+ KUNIT_EXPECT_EQ(test, idm_reserve_chunk_locked(idm, 1, idm->total - n), -EDQUOT);
+ KUNIT_EXPECT_EQ(test, idm_reserve_chunk_locked(idm, idm->total - n, 1), -EDQUOT);
+ KUNIT_EXPECT_GE(test, idm_reserve_chunk_locked(idm, 1, 1), 0);
+ }
+ KUNIT_EXPECT_LE(test, 0, idm_reserve_chunk_locked(idm, 1, 0));
+ KUNIT_EXPECT_EQ(test, idm->used, idm->total);
+ idm_release_chunk_locked(idm, 0, idm->total);
+ KUNIT_EXPECT_EQ(test, idm->used, 0);
+
+ mutex_unlock(idm_mutex(idm));
+}
+
+static void check_all(struct kunit *test)
+{
+ struct xe_guc_id_mgr *idm = test->priv;
+ unsigned int n;
+
+ KUNIT_ASSERT_EQ(test, 0, xe_guc_id_mgr_init(idm, -1));
+
+ mutex_lock(idm_mutex(idm));
+
+ for (n = 0; n < idm->total; n++)
+ KUNIT_EXPECT_LE(test, 0, idm_reserve_chunk_locked(idm, 1, 0));
+ KUNIT_EXPECT_EQ(test, idm->used, idm->total);
+ for (n = 0; n < idm->total; n++)
+ idm_release_chunk_locked(idm, n, 1);
+
+ mutex_unlock(idm_mutex(idm));
+}
+
+static struct kunit_case guc_id_mgr_test_cases[] = {
+ KUNIT_CASE(bad_init),
+ KUNIT_CASE(no_init),
+ KUNIT_CASE(init_fini),
+ KUNIT_CASE(check_used),
+ KUNIT_CASE(check_quota),
+ KUNIT_CASE_SLOW(check_all),
+ {}
+};
+
+static struct kunit_suite guc_id_mgr_suite = {
+ .name = "guc_idm",
+ .test_cases = guc_id_mgr_test_cases,
+
+ .init = guc_id_mgr_test_init,
+ .exit = NULL,
+};
+
+kunit_test_suites(&guc_id_mgr_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_kunit_helpers.c b/drivers/gpu/drm/xe/tests/xe_kunit_helpers.c
index fefe79b3b75a..bc5156966ce9 100644
--- a/drivers/gpu/drm/xe/tests/xe_kunit_helpers.c
+++ b/drivers/gpu/drm/xe/tests/xe_kunit_helpers.c
@@ -12,7 +12,9 @@
#include "tests/xe_kunit_helpers.h"
#include "tests/xe_pci_test.h"
+#include "xe_device.h"
#include "xe_device_types.h"
+#include "xe_pm.h"
/**
* xe_kunit_helper_alloc_xe_device - Allocate a &xe_device for a KUnit test.
@@ -88,3 +90,40 @@ int xe_kunit_helper_xe_device_test_init(struct kunit *test)
return 0;
}
EXPORT_SYMBOL_IF_KUNIT(xe_kunit_helper_xe_device_test_init);
+
+KUNIT_DEFINE_ACTION_WRAPPER(put_xe_pm_runtime, xe_pm_runtime_put, struct xe_device *);
+
+/**
+ * xe_kunit_helper_xe_device_live_test_init - Prepare a &xe_device for
+ * use in a live KUnit test.
+ * @test: the &kunit where live &xe_device will be used
+ *
+ * This function expects pointer to the &xe_device in the &test.param_value,
+ * like it is prepared by the &xe_pci_live_device_gen_param and stores that
+ * pointer as &kunit.priv to allow the test code to access it.
+ *
+ * This function makes sure that device is not wedged and then resumes it
+ * to avoid waking up the device inside the test. It uses deferred cleanup
+ * action to release a runtime_pm reference.
+ *
+ * This function can be used as custom implementation of &kunit_suite.init.
+ *
+ * This function uses KUNIT_ASSERT to detect any failures.
+ *
+ * Return: Always 0.
+ */
+int xe_kunit_helper_xe_device_live_test_init(struct kunit *test)
+{
+ struct xe_device *xe = xe_device_const_cast(test->param_value);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe);
+ kunit_info(test, "running on %s device\n", xe->info.platform_name);
+
+ KUNIT_ASSERT_FALSE(test, xe_device_wedged(xe));
+ xe_pm_runtime_get(xe);
+ KUNIT_ASSERT_EQ(test, 0, kunit_add_action_or_reset(test, put_xe_pm_runtime, xe));
+
+ test->priv = xe;
+ return 0;
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_kunit_helper_xe_device_live_test_init);
diff --git a/drivers/gpu/drm/xe/tests/xe_kunit_helpers.h b/drivers/gpu/drm/xe/tests/xe_kunit_helpers.h
index 067a1babf049..83665f7b1254 100644
--- a/drivers/gpu/drm/xe/tests/xe_kunit_helpers.h
+++ b/drivers/gpu/drm/xe/tests/xe_kunit_helpers.h
@@ -14,4 +14,6 @@ struct xe_device *xe_kunit_helper_alloc_xe_device(struct kunit *test,
struct device *dev);
int xe_kunit_helper_xe_device_test_init(struct kunit *test);
+int xe_kunit_helper_xe_device_live_test_init(struct kunit *test);
+
#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c
new file mode 100644
index 000000000000..81277c77016d
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#include <linux/module.h>
+#include <kunit/test.h>
+
+extern struct kunit_suite xe_bo_test_suite;
+extern struct kunit_suite xe_bo_shrink_test_suite;
+extern struct kunit_suite xe_dma_buf_test_suite;
+extern struct kunit_suite xe_migrate_test_suite;
+extern struct kunit_suite xe_mocs_test_suite;
+
+kunit_test_suite(xe_bo_test_suite);
+kunit_test_suite(xe_bo_shrink_test_suite);
+kunit_test_suite(xe_dma_buf_test_suite);
+kunit_test_suite(xe_migrate_test_suite);
+kunit_test_suite(xe_mocs_test_suite);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("xe live kunit tests");
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c
index c347e2c29f81..4a65e3103f77 100644
--- a/drivers/gpu/drm/xe/tests/xe_migrate.c
+++ b/drivers/gpu/drm/xe/tests/xe_migrate.c
@@ -6,10 +6,11 @@
#include <kunit/test.h>
#include <kunit/visibility.h>
-#include "tests/xe_migrate_test.h"
+#include "tests/xe_kunit_helpers.h"
#include "tests/xe_pci_test.h"
#include "xe_pci.h"
+#include "xe_pm.h"
static bool sanity_fence_failed(struct xe_device *xe, struct dma_fence *fence,
const char *str, struct kunit *test)
@@ -61,36 +62,6 @@ static int run_sanity_job(struct xe_migrate *m, struct xe_device *xe,
return 0;
}
-static void
-sanity_populate_cb(struct xe_migrate_pt_update *pt_update,
- struct xe_tile *tile, struct iosys_map *map, void *dst,
- u32 qword_ofs, u32 num_qwords,
- const struct xe_vm_pgtable_update *update)
-{
- struct migrate_test_params *p =
- to_migrate_test_params(xe_cur_kunit_priv(XE_TEST_LIVE_MIGRATE));
- int i;
- u64 *ptr = dst;
- u64 value;
-
- for (i = 0; i < num_qwords; i++) {
- value = (qword_ofs + i - update->ofs) * 0x1111111111111111ULL;
- if (map)
- xe_map_wr(tile_to_xe(tile), map, (qword_ofs + i) *
- sizeof(u64), u64, value);
- else
- ptr[i] = value;
- }
-
- kunit_info(xe_cur_kunit(), "Used %s.\n", map ? "CPU" : "GPU");
- if (p->force_gpu && map)
- KUNIT_FAIL(xe_cur_kunit(), "GPU pagetable update used CPU.\n");
-}
-
-static const struct xe_migrate_pt_update_ops sanity_ops = {
- .populate = sanity_populate_cb,
-};
-
#define check(_retval, _expected, str, _test) \
do { if ((_retval) != (_expected)) { \
KUNIT_FAIL(_test, "Sanity check failed: " str \
@@ -112,7 +83,8 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
bo->size,
ttm_bo_type_kernel,
region |
- XE_BO_NEEDS_CPU_ACCESS);
+ XE_BO_FLAG_NEEDS_CPU_ACCESS |
+ XE_BO_FLAG_PINNED);
if (IS_ERR(remote)) {
KUNIT_FAIL(test, "Failed to allocate remote bo for %s: %pe\n",
str, remote);
@@ -134,7 +106,8 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
}
xe_map_memset(xe, &remote->vmap, 0, 0xd0, remote->size);
- fence = xe_migrate_clear(m, remote, remote->ttm.resource);
+ fence = xe_migrate_clear(m, remote, remote->ttm.resource,
+ XE_MIGRATE_CLEAR_FLAG_FULL);
if (!sanity_fence_failed(xe, fence, big ? "Clearing remote big bo" :
"Clearing remote small bo", test)) {
retval = xe_map_rd(xe, &remote->vmap, 0, u64);
@@ -190,7 +163,7 @@ out_unlock:
static void test_copy_sysmem(struct xe_migrate *m, struct xe_bo *bo,
struct kunit *test)
{
- test_copy(m, bo, test, XE_BO_CREATE_SYSTEM_BIT);
+ test_copy(m, bo, test, XE_BO_FLAG_SYSTEM);
}
static void test_copy_vram(struct xe_migrate *m, struct xe_bo *bo,
@@ -202,63 +175,12 @@ static void test_copy_vram(struct xe_migrate *m, struct xe_bo *bo,
return;
if (bo->ttm.resource->mem_type == XE_PL_VRAM0)
- region = XE_BO_CREATE_VRAM1_BIT;
+ region = XE_BO_FLAG_VRAM1;
else
- region = XE_BO_CREATE_VRAM0_BIT;
+ region = XE_BO_FLAG_VRAM0;
test_copy(m, bo, test, region);
}
-static void test_pt_update(struct xe_migrate *m, struct xe_bo *pt,
- struct kunit *test, bool force_gpu)
-{
- struct xe_device *xe = tile_to_xe(m->tile);
- struct dma_fence *fence;
- u64 retval, expected;
- ktime_t then, now;
- int i;
-
- struct xe_vm_pgtable_update update = {
- .ofs = 1,
- .qwords = 0x10,
- .pt_bo = pt,
- };
- struct xe_migrate_pt_update pt_update = {
- .ops = &sanity_ops,
- };
- struct migrate_test_params p = {
- .base.id = XE_TEST_LIVE_MIGRATE,
- .force_gpu = force_gpu,
- };
-
- test->priv = &p;
- /* Test xe_migrate_update_pgtables() updates the pagetable as expected */
- expected = 0xf0f0f0f0f0f0f0f0ULL;
- xe_map_memset(xe, &pt->vmap, 0, (u8)expected, pt->size);
-
- then = ktime_get();
- fence = xe_migrate_update_pgtables(m, m->q->vm, NULL, m->q, &update, 1,
- NULL, 0, &pt_update);
- now = ktime_get();
- if (sanity_fence_failed(xe, fence, "Migration pagetable update", test))
- return;
-
- kunit_info(test, "Updating without syncing took %llu us,\n",
- (unsigned long long)ktime_to_us(ktime_sub(now, then)));
-
- dma_fence_put(fence);
- retval = xe_map_rd(xe, &pt->vmap, 0, u64);
- check(retval, expected, "PTE[0] must stay untouched", test);
-
- for (i = 0; i < update.qwords; i++) {
- retval = xe_map_rd(xe, &pt->vmap, (update.ofs + i) * 8, u64);
- check(retval, i * 0x1111111111111111ULL, "PTE update", test);
- }
-
- retval = xe_map_rd(xe, &pt->vmap, 8 * (update.ofs + update.qwords),
- u64);
- check(retval, expected, "PTE[0x11] must stay untouched", test);
-}
-
static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
{
struct xe_tile *tile = m->tile;
@@ -280,8 +202,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
big = xe_bo_create_pin_map(xe, tile, m->q->vm, SZ_4M,
ttm_bo_type_kernel,
- XE_BO_CREATE_VRAM_IF_DGFX(tile) |
- XE_BO_CREATE_PINNED_BIT);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile));
if (IS_ERR(big)) {
KUNIT_FAIL(test, "Failed to allocate bo: %li\n", PTR_ERR(big));
goto vunmap;
@@ -289,8 +210,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
pt = xe_bo_create_pin_map(xe, tile, m->q->vm, XE_PAGE_SIZE,
ttm_bo_type_kernel,
- XE_BO_CREATE_VRAM_IF_DGFX(tile) |
- XE_BO_CREATE_PINNED_BIT);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile));
if (IS_ERR(pt)) {
KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
PTR_ERR(pt));
@@ -300,11 +220,10 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
tiny = xe_bo_create_pin_map(xe, tile, m->q->vm,
2 * SZ_4K,
ttm_bo_type_kernel,
- XE_BO_CREATE_VRAM_IF_DGFX(tile) |
- XE_BO_CREATE_PINNED_BIT);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile));
if (IS_ERR(tiny)) {
- KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
- PTR_ERR(pt));
+ KUNIT_FAIL(test, "Failed to allocate tiny fake pt: %li\n",
+ PTR_ERR(tiny));
goto free_pt;
}
@@ -359,7 +278,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
kunit_info(test, "Clearing small buffer object\n");
xe_map_memset(xe, &tiny->vmap, 0, 0x22, tiny->size);
expected = 0;
- fence = xe_migrate_clear(m, tiny, tiny->ttm.resource);
+ fence = xe_migrate_clear(m, tiny, tiny->ttm.resource,
+ XE_MIGRATE_CLEAR_FLAG_FULL);
if (sanity_fence_failed(xe, fence, "Clearing small bo", test))
goto out;
@@ -380,7 +300,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
kunit_info(test, "Clearing big buffer object\n");
xe_map_memset(xe, &big->vmap, 0, 0x11, big->size);
expected = 0;
- fence = xe_migrate_clear(m, big, big->ttm.resource);
+ fence = xe_migrate_clear(m, big, big->ttm.resource,
+ XE_MIGRATE_CLEAR_FLAG_FULL);
if (sanity_fence_failed(xe, fence, "Clearing big bo", test))
goto out;
@@ -397,11 +318,6 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
test_copy_vram(m, big, test);
}
- kunit_info(test, "Testing page table update using CPU if GPU idle.\n");
- test_pt_update(m, pt, test, false);
- kunit_info(test, "Testing page table update using GPU\n");
- test_pt_update(m, pt, test, true);
-
out:
xe_bb_free(bb, NULL);
free_tiny:
@@ -419,26 +335,449 @@ vunmap:
static int migrate_test_run_device(struct xe_device *xe)
{
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
struct xe_tile *tile;
int id;
+ xe_pm_runtime_get(xe);
+
for_each_tile(tile, xe, id) {
struct xe_migrate *m = tile->migrate;
kunit_info(test, "Testing tile id %d.\n", id);
- xe_vm_lock(m->q->vm, true);
- xe_device_mem_access_get(xe);
+ xe_vm_lock(m->q->vm, false);
xe_migrate_sanity_test(m, test);
- xe_device_mem_access_put(xe);
xe_vm_unlock(m->q->vm);
}
+ xe_pm_runtime_put(xe);
+
+ return 0;
+}
+
+static void xe_migrate_sanity_kunit(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+
+ migrate_test_run_device(xe);
+}
+
+static struct dma_fence *blt_copy(struct xe_tile *tile,
+ struct xe_bo *src_bo, struct xe_bo *dst_bo,
+ bool copy_only_ccs, const char *str, struct kunit *test)
+{
+ struct xe_gt *gt = tile->primary_gt;
+ struct xe_migrate *m = tile->migrate;
+ struct xe_device *xe = gt_to_xe(gt);
+ struct dma_fence *fence = NULL;
+ u64 size = src_bo->size;
+ struct xe_res_cursor src_it, dst_it;
+ struct ttm_resource *src = src_bo->ttm.resource, *dst = dst_bo->ttm.resource;
+ u64 src_L0_ofs, dst_L0_ofs;
+ u32 src_L0_pt, dst_L0_pt;
+ u64 src_L0, dst_L0;
+ int err;
+ bool src_is_vram = mem_type_is_vram(src->mem_type);
+ bool dst_is_vram = mem_type_is_vram(dst->mem_type);
+
+ if (!src_is_vram)
+ xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it);
+ else
+ xe_res_first(src, 0, size, &src_it);
+
+ if (!dst_is_vram)
+ xe_res_first_sg(xe_bo_sg(dst_bo), 0, size, &dst_it);
+ else
+ xe_res_first(dst, 0, size, &dst_it);
+
+ while (size) {
+ u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */
+ struct xe_sched_job *job;
+ struct xe_bb *bb;
+ u32 flush_flags = 0;
+ u32 update_idx;
+ u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
+ u32 pte_flags;
+
+ src_L0 = xe_migrate_res_sizes(m, &src_it);
+ dst_L0 = xe_migrate_res_sizes(m, &dst_it);
+
+ src_L0 = min(src_L0, dst_L0);
+
+ pte_flags = src_is_vram ? (PTE_UPDATE_FLAG_IS_VRAM |
+ PTE_UPDATE_FLAG_IS_COMP_PTE) : 0;
+ batch_size += pte_update_size(m, pte_flags, src, &src_it, &src_L0,
+ &src_L0_ofs, &src_L0_pt, 0, 0,
+ avail_pts);
+
+ pte_flags = dst_is_vram ? (PTE_UPDATE_FLAG_IS_VRAM |
+ PTE_UPDATE_FLAG_IS_COMP_PTE) : 0;
+ batch_size += pte_update_size(m, pte_flags, dst, &dst_it, &src_L0,
+ &dst_L0_ofs, &dst_L0_pt, 0,
+ avail_pts, avail_pts);
+
+ /* Add copy commands size here */
+ batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) +
+ ((xe_device_has_flat_ccs(xe) && copy_only_ccs) ? EMIT_COPY_CCS_DW : 0);
+
+ bb = xe_bb_new(gt, batch_size, xe->info.has_usm);
+ if (IS_ERR(bb)) {
+ err = PTR_ERR(bb);
+ goto err_sync;
+ }
+
+ if (src_is_vram)
+ xe_res_next(&src_it, src_L0);
+ else
+ emit_pte(m, bb, src_L0_pt, src_is_vram, false,
+ &src_it, src_L0, src);
+
+ if (dst_is_vram)
+ xe_res_next(&dst_it, src_L0);
+ else
+ emit_pte(m, bb, dst_L0_pt, dst_is_vram, false,
+ &dst_it, src_L0, dst);
+
+ bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
+ update_idx = bb->len;
+ if (!copy_only_ccs)
+ emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE);
+
+ if (copy_only_ccs)
+ flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
+ src_is_vram, dst_L0_ofs,
+ dst_is_vram, src_L0, dst_L0_ofs,
+ copy_only_ccs);
+
+ job = xe_bb_create_migration_job(m->q, bb,
+ xe_migrate_batch_base(m, xe->info.has_usm),
+ update_idx);
+ if (IS_ERR(job)) {
+ err = PTR_ERR(job);
+ goto err;
+ }
+
+ xe_sched_job_add_migrate_flush(job, flush_flags);
+
+ mutex_lock(&m->job_mutex);
+ xe_sched_job_arm(job);
+ dma_fence_put(fence);
+ fence = dma_fence_get(&job->drm.s_fence->finished);
+ xe_sched_job_push(job);
+
+ dma_fence_put(m->fence);
+ m->fence = dma_fence_get(fence);
+
+ mutex_unlock(&m->job_mutex);
+
+ xe_bb_free(bb, fence);
+ size -= src_L0;
+ continue;
+
+err:
+ xe_bb_free(bb, NULL);
+
+err_sync:
+ if (fence) {
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+ }
+ return ERR_PTR(err);
+ }
+
+ return fence;
+}
+
+static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
+ struct xe_bo *sys_bo, struct xe_bo *vram_bo, struct xe_bo *ccs_bo,
+ struct kunit *test)
+{
+ struct dma_fence *fence;
+ u64 expected, retval;
+ long timeout;
+ long ret;
+
+ expected = 0xd0d0d0d0d0d0d0d0;
+ xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, sys_bo->size);
+
+ fence = blt_copy(tile, sys_bo, vram_bo, false, "Blit copy from sysmem to vram", test);
+ if (!sanity_fence_failed(xe, fence, "Blit copy from sysmem to vram", test)) {
+ retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
+ if (retval == expected)
+ KUNIT_FAIL(test, "Sanity check failed: VRAM must have compressed value\n");
+ }
+ dma_fence_put(fence);
+
+ kunit_info(test, "Evict vram buffer object\n");
+ ret = xe_bo_evict(vram_bo);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to evict bo.\n");
+ return;
+ }
+
+ ret = xe_bo_vmap(vram_bo);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to vmap vram bo: %li\n", ret);
+ return;
+ }
+
+ retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
+ check(retval, expected, "Clear evicted vram data first value", test);
+ retval = xe_map_rd(xe, &vram_bo->vmap, vram_bo->size - 8, u64);
+ check(retval, expected, "Clear evicted vram data last value", test);
+
+ fence = blt_copy(tile, vram_bo, ccs_bo,
+ true, "Blit surf copy from vram to sysmem", test);
+ if (!sanity_fence_failed(xe, fence, "Clear ccs buffer data", test)) {
+ retval = xe_map_rd(xe, &ccs_bo->vmap, 0, u64);
+ check(retval, 0, "Clear ccs data first value", test);
+
+ retval = xe_map_rd(xe, &ccs_bo->vmap, ccs_bo->size - 8, u64);
+ check(retval, 0, "Clear ccs data last value", test);
+ }
+ dma_fence_put(fence);
+
+ kunit_info(test, "Restore vram buffer object\n");
+ ret = xe_bo_validate(vram_bo, NULL, false);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to validate vram bo for: %li\n", ret);
+ return;
+ }
+
+ /* Sync all migration blits */
+ timeout = dma_resv_wait_timeout(vram_bo->ttm.base.resv,
+ DMA_RESV_USAGE_KERNEL,
+ true,
+ 5 * HZ);
+ if (timeout <= 0) {
+ KUNIT_FAIL(test, "Failed to sync bo eviction.\n");
+ return;
+ }
+
+ ret = xe_bo_vmap(vram_bo);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to vmap vram bo: %li\n", ret);
+ return;
+ }
+
+ retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
+ check(retval, expected, "Restored value must be equal to initial value", test);
+ retval = xe_map_rd(xe, &vram_bo->vmap, vram_bo->size - 8, u64);
+ check(retval, expected, "Restored value must be equal to initial value", test);
+
+ fence = blt_copy(tile, vram_bo, ccs_bo,
+ true, "Blit surf copy from vram to sysmem", test);
+ if (!sanity_fence_failed(xe, fence, "Clear ccs buffer data", test)) {
+ retval = xe_map_rd(xe, &ccs_bo->vmap, 0, u64);
+ check(retval, 0, "Clear ccs data first value", test);
+ retval = xe_map_rd(xe, &ccs_bo->vmap, ccs_bo->size - 8, u64);
+ check(retval, 0, "Clear ccs data last value", test);
+ }
+ dma_fence_put(fence);
+}
+
+static void test_clear(struct xe_device *xe, struct xe_tile *tile,
+ struct xe_bo *sys_bo, struct xe_bo *vram_bo, struct kunit *test)
+{
+ struct dma_fence *fence;
+ u64 expected, retval;
+
+ expected = 0xd0d0d0d0d0d0d0d0;
+ xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, sys_bo->size);
+
+ fence = blt_copy(tile, sys_bo, vram_bo, false, "Blit copy from sysmem to vram", test);
+ if (!sanity_fence_failed(xe, fence, "Blit copy from sysmem to vram", test)) {
+ retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
+ if (retval == expected)
+ KUNIT_FAIL(test, "Sanity check failed: VRAM must have compressed value\n");
+ }
+ dma_fence_put(fence);
+
+ fence = blt_copy(tile, vram_bo, sys_bo, false, "Blit copy from vram to sysmem", test);
+ if (!sanity_fence_failed(xe, fence, "Blit copy from vram to sysmem", test)) {
+ retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64);
+ check(retval, expected, "Decompressed value must be equal to initial value", test);
+ retval = xe_map_rd(xe, &sys_bo->vmap, sys_bo->size - 8, u64);
+ check(retval, expected, "Decompressed value must be equal to initial value", test);
+ }
+ dma_fence_put(fence);
+
+ kunit_info(test, "Clear vram buffer object\n");
+ expected = 0x0000000000000000;
+ fence = xe_migrate_clear(tile->migrate, vram_bo, vram_bo->ttm.resource,
+ XE_MIGRATE_CLEAR_FLAG_FULL);
+ if (sanity_fence_failed(xe, fence, "Clear vram_bo", test))
+ return;
+ dma_fence_put(fence);
+
+ fence = blt_copy(tile, vram_bo, sys_bo,
+ false, "Blit copy from vram to sysmem", test);
+ if (!sanity_fence_failed(xe, fence, "Clear main buffer data", test)) {
+ retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64);
+ check(retval, expected, "Clear main buffer first value", test);
+ retval = xe_map_rd(xe, &sys_bo->vmap, sys_bo->size - 8, u64);
+ check(retval, expected, "Clear main buffer last value", test);
+ }
+ dma_fence_put(fence);
+
+ fence = blt_copy(tile, vram_bo, sys_bo,
+ true, "Blit surf copy from vram to sysmem", test);
+ if (!sanity_fence_failed(xe, fence, "Clear ccs buffer data", test)) {
+ retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64);
+ check(retval, expected, "Clear ccs data first value", test);
+ retval = xe_map_rd(xe, &sys_bo->vmap, sys_bo->size - 8, u64);
+ check(retval, expected, "Clear ccs data last value", test);
+ }
+ dma_fence_put(fence);
+}
+
+static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile,
+ struct kunit *test)
+{
+ struct xe_bo *sys_bo, *vram_bo = NULL, *ccs_bo = NULL;
+ unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile);
+ long ret;
+
+ sys_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
+ DRM_XE_GEM_CPU_CACHING_WC,
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_NEEDS_CPU_ACCESS |
+ XE_BO_FLAG_PINNED);
+
+ if (IS_ERR(sys_bo)) {
+ KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
+ PTR_ERR(sys_bo));
+ return;
+ }
+
+ xe_bo_lock(sys_bo, false);
+ ret = xe_bo_validate(sys_bo, NULL, false);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to validate system bo for: %li\n", ret);
+ goto free_sysbo;
+ }
+
+ ret = xe_bo_vmap(sys_bo);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to vmap system bo: %li\n", ret);
+ goto free_sysbo;
+ }
+ xe_bo_unlock(sys_bo);
+
+ ccs_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
+ DRM_XE_GEM_CPU_CACHING_WC,
+ bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS |
+ XE_BO_FLAG_PINNED);
+
+ if (IS_ERR(ccs_bo)) {
+ KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
+ PTR_ERR(ccs_bo));
+ return;
+ }
+
+ xe_bo_lock(ccs_bo, false);
+ ret = xe_bo_validate(ccs_bo, NULL, false);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to validate system bo for: %li\n", ret);
+ goto free_ccsbo;
+ }
+
+ ret = xe_bo_vmap(ccs_bo);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to vmap system bo: %li\n", ret);
+ goto free_ccsbo;
+ }
+ xe_bo_unlock(ccs_bo);
+
+ vram_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
+ DRM_XE_GEM_CPU_CACHING_WC,
+ bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS |
+ XE_BO_FLAG_PINNED);
+ if (IS_ERR(vram_bo)) {
+ KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
+ PTR_ERR(vram_bo));
+ return;
+ }
+
+ xe_bo_lock(vram_bo, false);
+ ret = xe_bo_validate(vram_bo, NULL, false);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to validate vram bo for: %li\n", ret);
+ goto free_vrambo;
+ }
+
+ ret = xe_bo_vmap(vram_bo);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to vmap vram bo: %li\n", ret);
+ goto free_vrambo;
+ }
+
+ test_clear(xe, tile, sys_bo, vram_bo, test);
+ test_migrate(xe, tile, sys_bo, vram_bo, ccs_bo, test);
+ xe_bo_unlock(vram_bo);
+
+ xe_bo_lock(vram_bo, false);
+ xe_bo_vunmap(vram_bo);
+ xe_bo_unlock(vram_bo);
+
+ xe_bo_lock(ccs_bo, false);
+ xe_bo_vunmap(ccs_bo);
+ xe_bo_unlock(ccs_bo);
+
+ xe_bo_lock(sys_bo, false);
+ xe_bo_vunmap(sys_bo);
+ xe_bo_unlock(sys_bo);
+free_vrambo:
+ xe_bo_put(vram_bo);
+free_ccsbo:
+ xe_bo_put(ccs_bo);
+free_sysbo:
+ xe_bo_put(sys_bo);
+}
+
+static int validate_ccs_test_run_device(struct xe_device *xe)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct xe_tile *tile;
+ int id;
+
+ if (!xe_device_has_flat_ccs(xe)) {
+ kunit_skip(test, "non-flat-ccs device\n");
+ return 0;
+ }
+
+ if (!(GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe))) {
+ kunit_skip(test, "non-xe2 discrete device\n");
+ return 0;
+ }
+
+ xe_pm_runtime_get(xe);
+
+ for_each_tile(tile, xe, id)
+ validate_ccs_test_run_tile(xe, tile, test);
+
+ xe_pm_runtime_put(xe);
+
return 0;
}
-void xe_migrate_sanity_kunit(struct kunit *test)
+static void xe_validate_ccs_kunit(struct kunit *test)
{
- xe_call_for_each_device(migrate_test_run_device);
+ struct xe_device *xe = test->priv;
+
+ validate_ccs_test_run_device(xe);
}
-EXPORT_SYMBOL_IF_KUNIT(xe_migrate_sanity_kunit);
+
+static struct kunit_case xe_migrate_tests[] = {
+ KUNIT_CASE_PARAM(xe_migrate_sanity_kunit, xe_pci_live_device_gen_param),
+ KUNIT_CASE_PARAM(xe_validate_ccs_kunit, xe_pci_live_device_gen_param),
+ {}
+};
+
+VISIBLE_IF_KUNIT
+struct kunit_suite xe_migrate_test_suite = {
+ .name = "xe_migrate",
+ .test_cases = xe_migrate_tests,
+ .init = xe_kunit_helper_xe_device_live_test_init,
+};
+EXPORT_SYMBOL_IF_KUNIT(xe_migrate_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate_test.c b/drivers/gpu/drm/xe/tests/xe_migrate_test.c
deleted file mode 100644
index cf0c173b945f..000000000000
--- a/drivers/gpu/drm/xe/tests/xe_migrate_test.c
+++ /dev/null
@@ -1,25 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright © 2022 Intel Corporation
- */
-
-#include "xe_migrate_test.h"
-
-#include <kunit/test.h>
-
-static struct kunit_case xe_migrate_tests[] = {
- KUNIT_CASE(xe_migrate_sanity_kunit),
- {}
-};
-
-static struct kunit_suite xe_migrate_test_suite = {
- .name = "xe_migrate",
- .test_cases = xe_migrate_tests,
-};
-
-kunit_test_suite(xe_migrate_test_suite);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("xe_migrate kunit test");
-MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate_test.h b/drivers/gpu/drm/xe/tests/xe_migrate_test.h
deleted file mode 100644
index 7c645c66824f..000000000000
--- a/drivers/gpu/drm/xe/tests/xe_migrate_test.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 AND MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef _XE_MIGRATE_TEST_H_
-#define _XE_MIGRATE_TEST_H_
-
-struct kunit;
-
-void xe_migrate_sanity_kunit(struct kunit *test);
-
-#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c
index df5c36b70ab4..0e502feaca81 100644
--- a/drivers/gpu/drm/xe/tests/xe_mocs.c
+++ b/drivers/gpu/drm/xe/tests/xe_mocs.c
@@ -6,14 +6,15 @@
#include <kunit/test.h>
#include <kunit/visibility.h>
-#include "tests/xe_mocs_test.h"
+#include "tests/xe_kunit_helpers.h"
#include "tests/xe_pci_test.h"
#include "tests/xe_test.h"
-#include "xe_pci.h"
+#include "xe_device.h"
#include "xe_gt.h"
#include "xe_mocs.h"
-#include "xe_device.h"
+#include "xe_pci.h"
+#include "xe_pm.h"
struct live_mocs {
struct xe_mocs_info table;
@@ -22,15 +23,17 @@ struct live_mocs {
static int live_mocs_init(struct live_mocs *arg, struct xe_gt *gt)
{
unsigned int flags;
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
memset(arg, 0, sizeof(*arg));
flags = get_mocs_settings(gt_to_xe(gt), &arg->table);
- kunit_info(test, "table size %d", arg->table.size);
+ kunit_info(test, "gt %d", gt->info.id);
+ kunit_info(test, "gt type %d", gt->info.type);
+ kunit_info(test, "table size %d", arg->table.table_size);
kunit_info(test, "table uc_index %d", arg->table.uc_index);
- kunit_info(test, "table n_entries %d", arg->table.n_entries);
+ kunit_info(test, "table num_mocs_regs %d", arg->table.num_mocs_regs);
return flags;
}
@@ -38,69 +41,73 @@ static int live_mocs_init(struct live_mocs *arg, struct xe_gt *gt)
static void read_l3cc_table(struct xe_gt *gt,
const struct xe_mocs_info *info)
{
- unsigned int i;
- u32 l3cc;
+ struct kunit *test = kunit_get_current_test();
+ u32 l3cc, l3cc_expected;
+ unsigned int fw_ref, i;
u32 reg_val;
- u32 ret;
-
- struct kunit *test = xe_cur_kunit();
-
- xe_device_mem_access_get(gt_to_xe(gt));
- ret = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- KUNIT_ASSERT_EQ_MSG(test, ret, 0, "Forcewake Failed.\n");
- mocs_dbg(&gt_to_xe(gt)->drm, "L3CC entries:%d\n", info->n_entries);
- for (i = 0;
- i < (info->n_entries + 1) / 2 ?
- (l3cc = l3cc_combine(get_entry_l3cc(info, 2 * i),
- get_entry_l3cc(info, 2 * i + 1))), 1 : 0;
- i++) {
- if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1250)
- reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_LNCFCMOCS(i));
- else
- reg_val = xe_mmio_read32(gt, XELP_LNCFCMOCS(i));
- mocs_dbg(&gt_to_xe(gt)->drm, "%d 0x%x 0x%x 0x%x\n", i,
- XELP_LNCFCMOCS(i).addr, reg_val, l3cc);
- if (reg_val != l3cc)
- KUNIT_FAIL(test, "l3cc reg 0x%x has incorrect val.\n",
- XELP_LNCFCMOCS(i).addr);
+
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ KUNIT_ASSERT_TRUE_MSG(test, true, "Forcewake Failed.\n");
}
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
- xe_device_mem_access_put(gt_to_xe(gt));
+
+ for (i = 0; i < info->num_mocs_regs; i++) {
+ if (!(i & 1)) {
+ if (regs_are_mcr(gt))
+ reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_LNCFCMOCS(i >> 1));
+ else
+ reg_val = xe_mmio_read32(&gt->mmio, XELP_LNCFCMOCS(i >> 1));
+
+ mocs_dbg(gt, "reg_val=0x%x\n", reg_val);
+ } else {
+ /* Just reuse value read on previous iteration */
+ reg_val >>= 16;
+ }
+
+ l3cc_expected = get_entry_l3cc(info, i);
+ l3cc = reg_val & 0xffff;
+
+ mocs_dbg(gt, "[%u] expected=0x%x actual=0x%x\n",
+ i, l3cc_expected, l3cc);
+
+ KUNIT_EXPECT_EQ_MSG(test, l3cc_expected, l3cc,
+ "l3cc idx=%u has incorrect val.\n", i);
+ }
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
static void read_mocs_table(struct xe_gt *gt,
const struct xe_mocs_info *info)
{
- struct xe_device *xe = gt_to_xe(gt);
-
- unsigned int i;
- u32 mocs;
+ struct kunit *test = kunit_get_current_test();
+ u32 mocs, mocs_expected;
+ unsigned int fw_ref, i;
u32 reg_val;
- u32 ret;
-
- struct kunit *test = xe_cur_kunit();
-
- xe_device_mem_access_get(gt_to_xe(gt));
- ret = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- KUNIT_ASSERT_EQ_MSG(test, ret, 0, "Forcewake Failed.\n");
- mocs_dbg(&gt_to_xe(gt)->drm, "Global MOCS entries:%d\n", info->n_entries);
- drm_WARN_ONCE(&xe->drm, !info->unused_entries_index,
- "Unused entries index should have been defined\n");
- for (i = 0;
- i < info->n_entries ? (mocs = get_entry_control(info, i)), 1 : 0;
- i++) {
- if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1250)
+
+ KUNIT_EXPECT_TRUE_MSG(test, info->unused_entries_index,
+ "Unused entries index should have been defined\n");
+
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ KUNIT_ASSERT_NE_MSG(test, fw_ref, 0, "Forcewake Failed.\n");
+
+ for (i = 0; i < info->num_mocs_regs; i++) {
+ if (regs_are_mcr(gt))
reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_GLOBAL_MOCS(i));
else
- reg_val = xe_mmio_read32(gt, XELP_GLOBAL_MOCS(i));
- mocs_dbg(&gt_to_xe(gt)->drm, "%d 0x%x 0x%x 0x%x\n", i,
- XELP_GLOBAL_MOCS(i).addr, reg_val, mocs);
- if (reg_val != mocs)
- KUNIT_FAIL(test, "mocs reg 0x%x has incorrect val.\n",
- XELP_GLOBAL_MOCS(i).addr);
+ reg_val = xe_mmio_read32(&gt->mmio, XELP_GLOBAL_MOCS(i));
+
+ mocs_expected = get_entry_control(info, i);
+ mocs = reg_val;
+
+ mocs_dbg(gt, "[%u] expected=0x%x actual=0x%x\n",
+ i, mocs_expected, mocs);
+
+ KUNIT_EXPECT_EQ_MSG(test, mocs_expected, mocs,
+ "mocs reg 0x%x has incorrect val.\n", i);
}
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
- xe_device_mem_access_put(gt_to_xe(gt));
+
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
static int mocs_kernel_test_run_device(struct xe_device *xe)
@@ -113,6 +120,8 @@ static int mocs_kernel_test_run_device(struct xe_device *xe)
unsigned int flags;
int id;
+ xe_pm_runtime_get(xe);
+
for_each_gt(gt, xe, id) {
flags = live_mocs_init(&mocs, gt);
if (flags & HAS_GLOBAL_MOCS)
@@ -120,14 +129,21 @@ static int mocs_kernel_test_run_device(struct xe_device *xe)
if (flags & HAS_LNCF_MOCS)
read_l3cc_table(gt, &mocs.table);
}
+
+ xe_pm_runtime_put(xe);
+
return 0;
}
-void xe_live_mocs_kernel_kunit(struct kunit *test)
+static void xe_live_mocs_kernel_kunit(struct kunit *test)
{
- xe_call_for_each_device(mocs_kernel_test_run_device);
+ struct xe_device *xe = test->priv;
+
+ if (IS_SRIOV_VF(xe))
+ kunit_skip(test, "this test is N/A for VF");
+
+ mocs_kernel_test_run_device(xe);
}
-EXPORT_SYMBOL_IF_KUNIT(xe_live_mocs_kernel_kunit);
static int mocs_reset_test_run_device(struct xe_device *xe)
{
@@ -137,7 +153,9 @@ static int mocs_reset_test_run_device(struct xe_device *xe)
struct xe_gt *gt;
unsigned int flags;
int id;
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
+
+ xe_pm_runtime_get(xe);
for_each_gt(gt, xe, id) {
flags = live_mocs_init(&mocs, gt);
@@ -147,8 +165,7 @@ static int mocs_reset_test_run_device(struct xe_device *xe)
if (flags & HAS_LNCF_MOCS)
read_l3cc_table(gt, &mocs.table);
- xe_gt_reset_async(gt);
- flush_work(&gt->reset.worker);
+ xe_gt_reset(gt);
kunit_info(test, "mocs_reset_test after reset\n");
if (flags & HAS_GLOBAL_MOCS)
@@ -156,11 +173,32 @@ static int mocs_reset_test_run_device(struct xe_device *xe)
if (flags & HAS_LNCF_MOCS)
read_l3cc_table(gt, &mocs.table);
}
+
+ xe_pm_runtime_put(xe);
+
return 0;
}
-void xe_live_mocs_reset_kunit(struct kunit *test)
+static void xe_live_mocs_reset_kunit(struct kunit *test)
{
- xe_call_for_each_device(mocs_reset_test_run_device);
+ struct xe_device *xe = test->priv;
+
+ if (IS_SRIOV_VF(xe))
+ kunit_skip(test, "this test is N/A for VF");
+
+ mocs_reset_test_run_device(xe);
}
-EXPORT_SYMBOL_IF_KUNIT(xe_live_mocs_reset_kunit);
+
+static struct kunit_case xe_mocs_tests[] = {
+ KUNIT_CASE_PARAM(xe_live_mocs_kernel_kunit, xe_pci_live_device_gen_param),
+ KUNIT_CASE_PARAM(xe_live_mocs_reset_kunit, xe_pci_live_device_gen_param),
+ {}
+};
+
+VISIBLE_IF_KUNIT
+struct kunit_suite xe_mocs_test_suite = {
+ .name = "xe_mocs",
+ .test_cases = xe_mocs_tests,
+ .init = xe_kunit_helper_xe_device_live_test_init,
+};
+EXPORT_SYMBOL_IF_KUNIT(xe_mocs_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs_test.c b/drivers/gpu/drm/xe/tests/xe_mocs_test.c
deleted file mode 100644
index ee40f31e1e12..000000000000
--- a/drivers/gpu/drm/xe/tests/xe_mocs_test.c
+++ /dev/null
@@ -1,26 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright © 2022 Intel Corporation
- */
-
-#include "xe_mocs_test.h"
-
-#include <kunit/test.h>
-
-static struct kunit_case xe_mocs_tests[] = {
- KUNIT_CASE(xe_live_mocs_kernel_kunit),
- KUNIT_CASE(xe_live_mocs_reset_kunit),
- {}
-};
-
-static struct kunit_suite xe_mocs_test_suite = {
- .name = "xe_mocs",
- .test_cases = xe_mocs_tests,
-};
-
-kunit_test_suite(xe_mocs_test_suite);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("xe_mocs kunit test");
-MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs_test.h b/drivers/gpu/drm/xe/tests/xe_mocs_test.h
deleted file mode 100644
index e7699d495411..000000000000
--- a/drivers/gpu/drm/xe/tests/xe_mocs_test.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 AND MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef _XE_MOCS_TEST_H_
-#define _XE_MOCS_TEST_H_
-
-struct kunit;
-
-void xe_live_mocs_kernel_kunit(struct kunit *test);
-void xe_live_mocs_reset_kunit(struct kunit *test);
-
-#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_pci.c b/drivers/gpu/drm/xe/tests/xe_pci.c
index f62809ca8b51..1d3e2e50c355 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci.c
+++ b/drivers/gpu/drm/xe/tests/xe_pci.c
@@ -12,58 +12,6 @@
#include <kunit/test-bug.h>
#include <kunit/visibility.h>
-struct kunit_test_data {
- int ndevs;
- xe_device_fn xe_fn;
-};
-
-static int dev_to_xe_device_fn(struct device *dev, void *__data)
-
-{
- struct drm_device *drm = dev_get_drvdata(dev);
- struct kunit_test_data *data = __data;
- int ret = 0;
- int idx;
-
- data->ndevs++;
-
- if (drm_dev_enter(drm, &idx))
- ret = data->xe_fn(to_xe_device(dev_get_drvdata(dev)));
- drm_dev_exit(idx);
-
- return ret;
-}
-
-/**
- * xe_call_for_each_device - Iterate over all devices this driver binds to
- * @xe_fn: Function to call for each device.
- *
- * This function iterated over all devices this driver binds to, and calls
- * @xe_fn: for each one of them. If the called function returns anything else
- * than 0, iteration is stopped and the return value is returned by this
- * function. Across each function call, drm_dev_enter() / drm_dev_exit() is
- * called for the corresponding drm device.
- *
- * Return: Number of devices iterated or
- * the error code of a call to @xe_fn returning an error code.
- */
-int xe_call_for_each_device(xe_device_fn xe_fn)
-{
- int ret;
- struct kunit_test_data data = {
- .xe_fn = xe_fn,
- .ndevs = 0,
- };
-
- ret = driver_for_each_device(&xe_pci_driver.driver, NULL,
- &data, dev_to_xe_device_fn);
-
- if (!data.ndevs)
- kunit_skip(current->kunit_test, "test runs only on hardware\n");
-
- return ret ?: data.ndevs;
-}
-
/**
* xe_call_for_each_graphics_ip - Iterate over all recognized graphics IPs
* @xe_fn: Function to call for each device.
@@ -73,15 +21,15 @@ int xe_call_for_each_device(xe_device_fn xe_fn)
*/
void xe_call_for_each_graphics_ip(xe_graphics_fn xe_fn)
{
- const struct xe_graphics_desc *ip, *last = NULL;
+ const struct xe_graphics_desc *desc, *last = NULL;
- for (int i = 0; i < ARRAY_SIZE(graphics_ip_map); i++) {
- ip = graphics_ip_map[i].ip;
- if (ip == last)
+ for (int i = 0; i < ARRAY_SIZE(graphics_ips); i++) {
+ desc = graphics_ips[i].desc;
+ if (desc == last)
continue;
- xe_fn(ip);
- last = ip;
+ xe_fn(desc);
+ last = desc;
}
}
EXPORT_SYMBOL_IF_KUNIT(xe_call_for_each_graphics_ip);
@@ -95,15 +43,15 @@ EXPORT_SYMBOL_IF_KUNIT(xe_call_for_each_graphics_ip);
*/
void xe_call_for_each_media_ip(xe_media_fn xe_fn)
{
- const struct xe_media_desc *ip, *last = NULL;
+ const struct xe_media_desc *desc, *last = NULL;
- for (int i = 0; i < ARRAY_SIZE(media_ip_map); i++) {
- ip = media_ip_map[i].ip;
- if (ip == last)
+ for (int i = 0; i < ARRAY_SIZE(media_ips); i++) {
+ desc = media_ips[i].desc;
+ if (desc == last)
continue;
- xe_fn(ip);
- last = ip;
+ xe_fn(desc);
+ last = desc;
}
}
EXPORT_SYMBOL_IF_KUNIT(xe_call_for_each_media_ip);
@@ -162,8 +110,38 @@ done:
kunit_activate_static_stub(test, read_gmdid, fake_read_gmdid);
xe_info_init_early(xe, desc, subplatform_desc);
- xe_info_init(xe, desc->graphics, desc->media);
+ xe_info_init(xe, desc);
return 0;
}
EXPORT_SYMBOL_IF_KUNIT(xe_pci_fake_device_init);
+
+/**
+ * xe_pci_live_device_gen_param - Helper to iterate Xe devices as KUnit parameters
+ * @prev: the previously returned value, or NULL for the first iteration
+ * @desc: the buffer for a parameter name
+ *
+ * Iterates over the available Xe devices on the system. Uses the device name
+ * as the parameter name.
+ *
+ * To be used only as a parameter generator function in &KUNIT_CASE_PARAM.
+ *
+ * Return: pointer to the next &struct xe_device ready to be used as a parameter
+ * or NULL if there are no more Xe devices on the system.
+ */
+const void *xe_pci_live_device_gen_param(const void *prev, char *desc)
+{
+ const struct xe_device *xe = prev;
+ struct device *dev = xe ? xe->drm.dev : NULL;
+ struct device *next;
+
+ next = driver_find_next_device(&xe_pci_driver.driver, dev);
+ if (dev)
+ put_device(dev);
+ if (!next)
+ return NULL;
+
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s", dev_name(next));
+ return pdev_to_xe_device(to_pci_dev(next));
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_pci_live_device_gen_param);
diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.c b/drivers/gpu/drm/xe/tests/xe_pci_test.c
index a6705a536391..744a37583d2d 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_pci_test.c
@@ -16,7 +16,7 @@
static void check_graphics_ip(const struct xe_graphics_desc *graphics)
{
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
u64 mask = graphics->hw_engine_mask;
/* RCS, CCS, and BCS engines are allowed on the graphics IP */
@@ -30,7 +30,7 @@ static void check_graphics_ip(const struct xe_graphics_desc *graphics)
static void check_media_ip(const struct xe_media_desc *media)
{
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
u64 mask = media->hw_engine_mask;
/* VCS, VECS and GSCCS engines are allowed on the media IP */
diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.h b/drivers/gpu/drm/xe/tests/xe_pci_test.h
index f40dcec83992..ede46800aff1 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci_test.h
+++ b/drivers/gpu/drm/xe/tests/xe_pci_test.h
@@ -19,7 +19,6 @@ typedef int (*xe_device_fn)(struct xe_device *);
typedef void (*xe_graphics_fn)(const struct xe_graphics_desc *);
typedef void (*xe_media_fn)(const struct xe_media_desc *);
-int xe_call_for_each_device(xe_device_fn xe_fn);
void xe_call_for_each_graphics_ip(xe_graphics_fn xe_fn);
void xe_call_for_each_media_ip(xe_media_fn xe_fn);
@@ -35,4 +34,6 @@ struct xe_pci_fake_data {
int xe_pci_fake_device_init(struct xe_device *xe);
+const void *xe_pci_live_device_gen_param(const void *prev, char *desc);
+
#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_rtp_test.c b/drivers/gpu/drm/xe/tests/xe_rtp_test.c
index 06759d754783..b0254b014fe4 100644
--- a/drivers/gpu/drm/xe/tests/xe_rtp_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_rtp_test.c
@@ -31,16 +31,23 @@
#undef XE_REG_MCR
#define XE_REG_MCR(...) XE_REG(__VA_ARGS__, .mcr = 1)
-struct rtp_test_case {
+struct rtp_to_sr_test_case {
const char *name;
struct xe_reg expected_reg;
u32 expected_set_bits;
u32 expected_clr_bits;
- unsigned long expected_count;
+ unsigned long expected_count_sr_entries;
unsigned int expected_sr_errors;
+ unsigned long expected_active;
const struct xe_rtp_entry_sr *entries;
};
+struct rtp_test_case {
+ const char *name;
+ unsigned long expected_active;
+ const struct xe_rtp_entry *entries;
+};
+
static bool match_yes(const struct xe_gt *gt, const struct xe_hw_engine *hwe)
{
return true;
@@ -51,13 +58,14 @@ static bool match_no(const struct xe_gt *gt, const struct xe_hw_engine *hwe)
return false;
}
-static const struct rtp_test_case cases[] = {
+static const struct rtp_to_sr_test_case rtp_to_sr_cases[] = {
{
.name = "coalesce-same-reg",
.expected_reg = REGULAR_REG1,
.expected_set_bits = REG_BIT(0) | REG_BIT(1),
.expected_clr_bits = REG_BIT(0) | REG_BIT(1),
- .expected_count = 1,
+ .expected_active = BIT(0) | BIT(1),
+ .expected_count_sr_entries = 1,
/* Different bits on the same register: create a single entry */
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("basic-1"),
@@ -76,7 +84,8 @@ static const struct rtp_test_case cases[] = {
.expected_reg = REGULAR_REG1,
.expected_set_bits = REG_BIT(0),
.expected_clr_bits = REG_BIT(0),
- .expected_count = 1,
+ .expected_active = BIT(0),
+ .expected_count_sr_entries = 1,
/* Don't coalesce second entry since rules don't match */
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("basic-1"),
@@ -91,11 +100,66 @@ static const struct rtp_test_case cases[] = {
},
},
{
+ .name = "match-or",
+ .expected_reg = REGULAR_REG1,
+ .expected_set_bits = REG_BIT(0) | REG_BIT(1) | REG_BIT(2),
+ .expected_clr_bits = REG_BIT(0) | REG_BIT(1) | REG_BIT(2),
+ .expected_active = BIT(0) | BIT(1) | BIT(2),
+ .expected_count_sr_entries = 1,
+ .entries = (const struct xe_rtp_entry_sr[]) {
+ { XE_RTP_NAME("first"),
+ XE_RTP_RULES(FUNC(match_yes), OR, FUNC(match_no)),
+ XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(0)))
+ },
+ { XE_RTP_NAME("middle"),
+ XE_RTP_RULES(FUNC(match_no), FUNC(match_no), OR,
+ FUNC(match_yes), OR,
+ FUNC(match_no)),
+ XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(1)))
+ },
+ { XE_RTP_NAME("last"),
+ XE_RTP_RULES(FUNC(match_no), OR, FUNC(match_yes)),
+ XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(2)))
+ },
+ { XE_RTP_NAME("no-match"),
+ XE_RTP_RULES(FUNC(match_no), OR, FUNC(match_no)),
+ XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(3)))
+ },
+ {}
+ },
+ },
+ {
+ .name = "match-or-xfail",
+ .expected_reg = REGULAR_REG1,
+ .expected_count_sr_entries = 0,
+ .entries = (const struct xe_rtp_entry_sr[]) {
+ { XE_RTP_NAME("leading-or"),
+ XE_RTP_RULES(OR, FUNC(match_yes)),
+ XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(0)))
+ },
+ { XE_RTP_NAME("trailing-or"),
+ /*
+ * First condition is match_no, otherwise the failure
+ * wouldn't really trigger as RTP stops processing as
+ * soon as it has a matching set of rules
+ */
+ XE_RTP_RULES(FUNC(match_no), OR),
+ XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(1)))
+ },
+ { XE_RTP_NAME("no-or-or-yes"),
+ XE_RTP_RULES(FUNC(match_no), OR, OR, FUNC(match_yes)),
+ XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(2)))
+ },
+ {}
+ },
+ },
+ {
.name = "no-match-no-add-multiple-rules",
.expected_reg = REGULAR_REG1,
.expected_set_bits = REG_BIT(0),
.expected_clr_bits = REG_BIT(0),
- .expected_count = 1,
+ .expected_active = BIT(0),
+ .expected_count_sr_entries = 1,
/* Don't coalesce second entry due to one of the rules */
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("basic-1"),
@@ -114,7 +178,8 @@ static const struct rtp_test_case cases[] = {
.expected_reg = REGULAR_REG1,
.expected_set_bits = REG_BIT(0),
.expected_clr_bits = REG_BIT(0),
- .expected_count = 2,
+ .expected_active = BIT(0) | BIT(1),
+ .expected_count_sr_entries = 2,
/* Same bits on different registers are not coalesced */
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("basic-1"),
@@ -133,7 +198,8 @@ static const struct rtp_test_case cases[] = {
.expected_reg = REGULAR_REG1,
.expected_set_bits = REG_BIT(0),
.expected_clr_bits = REG_BIT(1) | REG_BIT(0),
- .expected_count = 1,
+ .expected_active = BIT(0) | BIT(1),
+ .expected_count_sr_entries = 1,
/* Check clr vs set actions on different bits */
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("basic-1"),
@@ -154,7 +220,8 @@ static const struct rtp_test_case cases[] = {
.expected_reg = REGULAR_REG1,
.expected_set_bits = TEMP_FIELD,
.expected_clr_bits = TEMP_MASK,
- .expected_count = 1,
+ .expected_active = BIT(0),
+ .expected_count_sr_entries = 1,
/* Check FIELD_SET works */
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("basic-1"),
@@ -172,7 +239,8 @@ static const struct rtp_test_case cases[] = {
.expected_reg = REGULAR_REG1,
.expected_set_bits = REG_BIT(0),
.expected_clr_bits = REG_BIT(0),
- .expected_count = 1,
+ .expected_active = BIT(0) | BIT(1),
+ .expected_count_sr_entries = 1,
.expected_sr_errors = 1,
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("basic-1"),
@@ -192,7 +260,8 @@ static const struct rtp_test_case cases[] = {
.expected_reg = REGULAR_REG1,
.expected_set_bits = REG_BIT(0),
.expected_clr_bits = REG_BIT(0),
- .expected_count = 1,
+ .expected_active = BIT(0) | BIT(1),
+ .expected_count_sr_entries = 1,
.expected_sr_errors = 1,
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("basic-1"),
@@ -212,7 +281,8 @@ static const struct rtp_test_case cases[] = {
.expected_reg = REGULAR_REG1,
.expected_set_bits = REG_BIT(0),
.expected_clr_bits = REG_BIT(0),
- .expected_count = 1,
+ .expected_active = BIT(0) | BIT(1) | BIT(2),
+ .expected_count_sr_entries = 1,
.expected_sr_errors = 2,
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("basic-1"),
@@ -234,39 +304,201 @@ static const struct rtp_test_case cases[] = {
},
};
-static void xe_rtp_process_tests(struct kunit *test)
+static void xe_rtp_process_to_sr_tests(struct kunit *test)
{
- const struct rtp_test_case *param = test->param_value;
+ const struct rtp_to_sr_test_case *param = test->param_value;
struct xe_device *xe = test->priv;
struct xe_gt *gt = xe_device_get_root_tile(xe)->primary_gt;
struct xe_reg_sr *reg_sr = &gt->reg_sr;
const struct xe_reg_sr_entry *sre, *sr_entry = NULL;
struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(gt);
- unsigned long idx, count = 0;
+ unsigned long idx, count_sr_entries = 0, count_rtp_entries = 0, active = 0;
- xe_reg_sr_init(reg_sr, "xe_rtp_tests", xe);
- xe_rtp_process_to_sr(&ctx, param->entries, reg_sr);
+ xe_reg_sr_init(reg_sr, "xe_rtp_to_sr_tests", xe);
+
+ while (param->entries[count_rtp_entries].rules)
+ count_rtp_entries++;
+
+ xe_rtp_process_ctx_enable_active_tracking(&ctx, &active, count_rtp_entries);
+ xe_rtp_process_to_sr(&ctx, param->entries, count_rtp_entries, reg_sr);
xa_for_each(&reg_sr->xa, idx, sre) {
if (idx == param->expected_reg.addr)
sr_entry = sre;
- count++;
+ count_sr_entries++;
+ }
+
+ KUNIT_EXPECT_EQ(test, active, param->expected_active);
+
+ KUNIT_EXPECT_EQ(test, count_sr_entries, param->expected_count_sr_entries);
+ if (count_sr_entries) {
+ KUNIT_EXPECT_EQ(test, sr_entry->clr_bits, param->expected_clr_bits);
+ KUNIT_EXPECT_EQ(test, sr_entry->set_bits, param->expected_set_bits);
+ KUNIT_EXPECT_EQ(test, sr_entry->reg.raw, param->expected_reg.raw);
+ } else {
+ KUNIT_EXPECT_NULL(test, sr_entry);
}
- KUNIT_EXPECT_EQ(test, count, param->expected_count);
- KUNIT_EXPECT_EQ(test, sr_entry->clr_bits, param->expected_clr_bits);
- KUNIT_EXPECT_EQ(test, sr_entry->set_bits, param->expected_set_bits);
- KUNIT_EXPECT_EQ(test, sr_entry->reg.raw, param->expected_reg.raw);
KUNIT_EXPECT_EQ(test, reg_sr->errors, param->expected_sr_errors);
}
+/*
+ * Entries below follow the logic used with xe_wa_oob.rules:
+ * 1) Entries with empty name are OR'ed: all entries marked active since the
+ * last entry with a name
+ * 2) There are no action associated with rules
+ */
+static const struct rtp_test_case rtp_cases[] = {
+ {
+ .name = "active1",
+ .expected_active = BIT(0),
+ .entries = (const struct xe_rtp_entry[]) {
+ { XE_RTP_NAME("r1"),
+ XE_RTP_RULES(FUNC(match_yes)),
+ },
+ {}
+ },
+ },
+ {
+ .name = "active2",
+ .expected_active = BIT(0) | BIT(1),
+ .entries = (const struct xe_rtp_entry[]) {
+ { XE_RTP_NAME("r1"),
+ XE_RTP_RULES(FUNC(match_yes)),
+ },
+ { XE_RTP_NAME("r2"),
+ XE_RTP_RULES(FUNC(match_yes)),
+ },
+ {}
+ },
+ },
+ {
+ .name = "active-inactive",
+ .expected_active = BIT(0),
+ .entries = (const struct xe_rtp_entry[]) {
+ { XE_RTP_NAME("r1"),
+ XE_RTP_RULES(FUNC(match_yes)),
+ },
+ { XE_RTP_NAME("r2"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ {}
+ },
+ },
+ {
+ .name = "inactive-active",
+ .expected_active = BIT(1),
+ .entries = (const struct xe_rtp_entry[]) {
+ { XE_RTP_NAME("r1"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ { XE_RTP_NAME("r2"),
+ XE_RTP_RULES(FUNC(match_yes)),
+ },
+ {}
+ },
+ },
+ {
+ .name = "inactive-1st_or_active-inactive",
+ .expected_active = BIT(1),
+ .entries = (const struct xe_rtp_entry[]) {
+ { XE_RTP_NAME("r1"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ { XE_RTP_NAME("r2_or_conditions"),
+ XE_RTP_RULES(FUNC(match_yes), OR,
+ FUNC(match_no), OR,
+ FUNC(match_no)) },
+ { XE_RTP_NAME("r3"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ {}
+ },
+ },
+ {
+ .name = "inactive-2nd_or_active-inactive",
+ .expected_active = BIT(1),
+ .entries = (const struct xe_rtp_entry[]) {
+ { XE_RTP_NAME("r1"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ { XE_RTP_NAME("r2_or_conditions"),
+ XE_RTP_RULES(FUNC(match_no), OR,
+ FUNC(match_yes), OR,
+ FUNC(match_no)) },
+ { XE_RTP_NAME("r3"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ {}
+ },
+ },
+ {
+ .name = "inactive-last_or_active-inactive",
+ .expected_active = BIT(1),
+ .entries = (const struct xe_rtp_entry[]) {
+ { XE_RTP_NAME("r1"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ { XE_RTP_NAME("r2_or_conditions"),
+ XE_RTP_RULES(FUNC(match_no), OR,
+ FUNC(match_no), OR,
+ FUNC(match_yes)) },
+ { XE_RTP_NAME("r3"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ {}
+ },
+ },
+ {
+ .name = "inactive-no_or_active-inactive",
+ .expected_active = 0,
+ .entries = (const struct xe_rtp_entry[]) {
+ { XE_RTP_NAME("r1"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ { XE_RTP_NAME("r2_or_conditions"),
+ XE_RTP_RULES(FUNC(match_no), OR,
+ FUNC(match_no), OR,
+ FUNC(match_no)) },
+ { XE_RTP_NAME("r3"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ {}
+ },
+ },
+};
+
+static void xe_rtp_process_tests(struct kunit *test)
+{
+ const struct rtp_test_case *param = test->param_value;
+ struct xe_device *xe = test->priv;
+ struct xe_gt *gt = xe_device_get_root_tile(xe)->primary_gt;
+ struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(gt);
+ unsigned long count_rtp_entries = 0, active = 0;
+
+ while (param->entries[count_rtp_entries].rules)
+ count_rtp_entries++;
+
+ xe_rtp_process_ctx_enable_active_tracking(&ctx, &active, count_rtp_entries);
+ xe_rtp_process(&ctx, param->entries);
+
+ KUNIT_EXPECT_EQ(test, active, param->expected_active);
+}
+
+static void rtp_to_sr_desc(const struct rtp_to_sr_test_case *t, char *desc)
+{
+ strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(rtp_to_sr, rtp_to_sr_cases, rtp_to_sr_desc);
+
static void rtp_desc(const struct rtp_test_case *t, char *desc)
{
strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
}
-KUNIT_ARRAY_PARAM(rtp, cases, rtp_desc);
+KUNIT_ARRAY_PARAM(rtp, rtp_cases, rtp_desc);
static int xe_rtp_test_init(struct kunit *test)
{
@@ -299,6 +531,7 @@ static void xe_rtp_test_exit(struct kunit *test)
}
static struct kunit_case xe_rtp_tests[] = {
+ KUNIT_CASE_PARAM(xe_rtp_process_to_sr_tests, rtp_to_sr_gen_params),
KUNIT_CASE_PARAM(xe_rtp_process_tests, rtp_gen_params),
{}
};
diff --git a/drivers/gpu/drm/xe/tests/xe_test.h b/drivers/gpu/drm/xe/tests/xe_test.h
index 7a1ae213e750..9c23ad9dba8d 100644
--- a/drivers/gpu/drm/xe/tests/xe_test.h
+++ b/drivers/gpu/drm/xe/tests/xe_test.h
@@ -9,8 +9,8 @@
#include <linux/types.h>
#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
-#include <linux/sched.h>
#include <kunit/test.h>
+#include <kunit/test-bug.h>
/*
* Each test that provides a kunit private test structure, place a test id
@@ -31,8 +31,6 @@ struct xe_test_priv {
#define XE_TEST_DECLARE(x) x
#define XE_TEST_ONLY(x) unlikely(x)
-#define XE_TEST_EXPORT
-#define xe_cur_kunit() current->kunit_test
/**
* xe_cur_kunit_priv - Obtain the struct xe_test_priv pointed to by
@@ -48,10 +46,10 @@ xe_cur_kunit_priv(enum xe_test_priv_id id)
{
struct xe_test_priv *priv;
- if (!xe_cur_kunit())
+ if (!kunit_get_current_test())
return NULL;
- priv = xe_cur_kunit()->priv;
+ priv = kunit_get_current_test()->priv;
return priv->id == id ? priv : NULL;
}
@@ -59,8 +57,6 @@ xe_cur_kunit_priv(enum xe_test_priv_id id)
#define XE_TEST_DECLARE(x)
#define XE_TEST_ONLY(x) 0
-#define XE_TEST_EXPORT static
-#define xe_cur_kunit() NULL
#define xe_cur_kunit_priv(_id) NULL
#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_test_mod.c b/drivers/gpu/drm/xe/tests/xe_test_mod.c
index 875f3e6f965e..93081bcf2ab0 100644
--- a/drivers/gpu/drm/xe/tests/xe_test_mod.c
+++ b/drivers/gpu/drm/xe/tests/xe_test_mod.c
@@ -7,4 +7,4 @@
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("xe kunit tests");
-MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
diff --git a/drivers/gpu/drm/xe/tests/xe_wa_test.c b/drivers/gpu/drm/xe/tests/xe_wa_test.c
index 44570d888355..c96d1fe34151 100644
--- a/drivers/gpu/drm/xe/tests/xe_wa_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_wa_test.c
@@ -71,8 +71,10 @@ static const struct platform_test_case cases[] = {
SUBPLATFORM_CASE(DG2, G12, A1),
GMDID_CASE(METEORLAKE, 1270, A0, 1300, A0),
GMDID_CASE(METEORLAKE, 1271, A0, 1300, A0),
+ GMDID_CASE(METEORLAKE, 1274, A0, 1300, A0),
GMDID_CASE(LUNARLAKE, 2004, A0, 2000, A0),
GMDID_CASE(LUNARLAKE, 2004, B0, 2000, A0),
+ GMDID_CASE(BATTLEMAGE, 2001, A0, 1301, A1),
};
static void platform_desc(const struct platform_test_case *t, char *desc)