summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/Makefile3
-rw-r--r--drivers/gpu/drm/ttm/tests/.kunitconfig3
-rw-r--r--drivers/gpu/drm/ttm/tests/Makefile11
-rw-r--r--drivers/gpu/drm/ttm/tests/TODO27
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_test.c637
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c1176
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_device_test.c206
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c304
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h52
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_mock_manager.c238
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_mock_manager.h30
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_pool_test.c437
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_resource_test.c337
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_tt_test.c402
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_backup.c182
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c909
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_internal.h60
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c468
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c75
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c101
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c921
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool_internal.h25
-rw-r--r--drivers/gpu/drm/ttm/ttm_range_manager.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c452
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c126
28 files changed, 6383 insertions, 807 deletions
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index f906b22959cf..40d07a35293a 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -4,7 +4,8 @@
ttm-y := ttm_tt.o ttm_bo.o ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
ttm_execbuf_util.o ttm_range_manager.o ttm_resource.o ttm_pool.o \
- ttm_device.o ttm_sys_manager.o
+ ttm_device.o ttm_sys_manager.o ttm_backup.o
ttm-$(CONFIG_AGP) += ttm_agp_backend.o
obj-$(CONFIG_DRM_TTM) += ttm.o
+obj-$(CONFIG_DRM_TTM_KUNIT_TEST) += tests/
diff --git a/drivers/gpu/drm/ttm/tests/.kunitconfig b/drivers/gpu/drm/ttm/tests/.kunitconfig
new file mode 100644
index 000000000000..1ae1ffabd51e
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/.kunitconfig
@@ -0,0 +1,3 @@
+CONFIG_KUNIT=y
+CONFIG_DRM=y
+CONFIG_DRM_TTM_KUNIT_TEST=y
diff --git a/drivers/gpu/drm/ttm/tests/Makefile b/drivers/gpu/drm/ttm/tests/Makefile
new file mode 100644
index 000000000000..f3149de77541
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0 AND MIT
+
+obj-$(CONFIG_DRM_TTM_KUNIT_TEST) += \
+ ttm_device_test.o \
+ ttm_pool_test.o \
+ ttm_resource_test.o \
+ ttm_tt_test.o \
+ ttm_bo_test.o \
+ ttm_bo_validate_test.o \
+ ttm_mock_manager.o \
+ ttm_kunit_helpers.o
diff --git a/drivers/gpu/drm/ttm/tests/TODO b/drivers/gpu/drm/ttm/tests/TODO
new file mode 100644
index 000000000000..45b03d184ccf
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/TODO
@@ -0,0 +1,27 @@
+TODO
+=====
+
+- Add a test case where the only evictable BO is busy
+- Update eviction tests so they use parametrized "from" memory type
+- Improve mock manager's implementation, e.g. allocate a block of
+ dummy memory that can be used when testing page mapping functions
+- Suggestion: Add test cases with external BOs
+- Suggestion: randomize the number and size of tested buffers in
+ ttm_bo_validate()
+- Agree on the naming convention
+- Rewrite the mock manager: drop use_tt and manage mock memory using
+ drm_mm manager
+
+Notes and gotchas
+=================
+
+- These tests are built and run with a UML kernel, because
+ 1) We are interested in hardware-independent testing
+ 2) We don't want to have actual DRM devices interacting with TTM
+ at the same time as the test one. Getting these to work in
+ parallel would require some time (...and that's a "todo" in itself!)
+- Triggering ttm_bo_vm_ops callbacks from KUnit (i.e. kernel) might be
+ a challenge, but is worth trying. Look at selftests like
+ i915/gem/selftests/i915_gem_mman.c for inspiration
+- The test suite uses UML where ioremap() call returns NULL, meaning that
+ ttm_bo_ioremap() can't be tested, unless we find a way to stub it
diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
new file mode 100644
index 000000000000..d468f8322072
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
@@ -0,0 +1,637 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#include <linux/dma-resv.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/ww_mutex.h>
+
+#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_tt.h>
+
+#include "ttm_kunit_helpers.h"
+
+#define BO_SIZE SZ_8K
+
+#ifdef CONFIG_PREEMPT_RT
+#define ww_mutex_base_lock(b) rt_mutex_lock(b)
+#else
+#define ww_mutex_base_lock(b) mutex_lock(b)
+#endif
+
+struct ttm_bo_test_case {
+ const char *description;
+ bool interruptible;
+ bool no_wait;
+};
+
+static const struct ttm_bo_test_case ttm_bo_reserved_cases[] = {
+ {
+ .description = "Cannot be interrupted and sleeps",
+ .interruptible = false,
+ .no_wait = false,
+ },
+ {
+ .description = "Cannot be interrupted, locks straight away",
+ .interruptible = false,
+ .no_wait = true,
+ },
+ {
+ .description = "Can be interrupted, sleeps",
+ .interruptible = true,
+ .no_wait = false,
+ },
+};
+
+static void ttm_bo_init_case_desc(const struct ttm_bo_test_case *t,
+ char *desc)
+{
+ strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(ttm_bo_reserve, ttm_bo_reserved_cases, ttm_bo_init_case_desc);
+
+static void ttm_bo_reserve_optimistic_no_ticket(struct kunit *test)
+{
+ const struct ttm_bo_test_case *params = test->param_value;
+ struct ttm_buffer_object *bo;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ err = ttm_bo_reserve(bo, params->interruptible, params->no_wait, NULL);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ dma_resv_unlock(bo->base.resv);
+}
+
+static void ttm_bo_reserve_locked_no_sleep(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ bool interruptible = false;
+ bool no_wait = true;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ /* Let's lock it beforehand */
+ dma_resv_lock(bo->base.resv, NULL);
+
+ err = ttm_bo_reserve(bo, interruptible, no_wait, NULL);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_ASSERT_EQ(test, err, -EBUSY);
+}
+
+static void ttm_bo_reserve_no_wait_ticket(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ struct ww_acquire_ctx ctx;
+ bool interruptible = false;
+ bool no_wait = true;
+ int err;
+
+ ww_acquire_init(&ctx, &reservation_ww_class);
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx);
+ KUNIT_ASSERT_EQ(test, err, -EBUSY);
+
+ ww_acquire_fini(&ctx);
+}
+
+static void ttm_bo_reserve_double_resv(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ struct ww_acquire_ctx ctx;
+ bool interruptible = false;
+ bool no_wait = false;
+ int err;
+
+ ww_acquire_init(&ctx, &reservation_ww_class);
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx);
+
+ dma_resv_unlock(bo->base.resv);
+ ww_acquire_fini(&ctx);
+
+ KUNIT_ASSERT_EQ(test, err, -EALREADY);
+}
+
+/*
+ * A test case heavily inspired by ww_test_edeadlk_normal(). It injects
+ * a deadlock by manipulating the sequence number of the context that holds
+ * dma_resv lock of bo2 so the other context is "wounded" and has to back off
+ * (indicated by -EDEADLK). The subtest checks if ttm_bo_reserve() properly
+ * propagates that error.
+ */
+static void ttm_bo_reserve_deadlock(struct kunit *test)
+{
+ struct ttm_buffer_object *bo1, *bo2;
+ struct ww_acquire_ctx ctx1, ctx2;
+ bool interruptible = false;
+ bool no_wait = false;
+ int err;
+
+ bo1 = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+ bo2 = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ ww_acquire_init(&ctx1, &reservation_ww_class);
+ ww_mutex_base_lock(&bo2->base.resv->lock.base);
+
+ /* The deadlock will be caught by WW mutex, don't warn about it */
+ lock_release(&bo2->base.resv->lock.base.dep_map, 1);
+
+ bo2->base.resv->lock.ctx = &ctx2;
+ ctx2 = ctx1;
+ ctx2.stamp--; /* Make the context holding the lock younger */
+
+ err = ttm_bo_reserve(bo1, interruptible, no_wait, &ctx1);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ err = ttm_bo_reserve(bo2, interruptible, no_wait, &ctx1);
+ KUNIT_ASSERT_EQ(test, err, -EDEADLK);
+
+ dma_resv_unlock(bo1->base.resv);
+ ww_acquire_fini(&ctx1);
+}
+
+#if IS_BUILTIN(CONFIG_DRM_TTM_KUNIT_TEST)
+struct signal_timer {
+ struct timer_list timer;
+ struct ww_acquire_ctx *ctx;
+};
+
+static void signal_for_ttm_bo_reserve(struct timer_list *t)
+{
+ struct signal_timer *s_timer = timer_container_of(s_timer, t, timer);
+ struct task_struct *task = s_timer->ctx->task;
+
+ do_send_sig_info(SIGTERM, SEND_SIG_PRIV, task, PIDTYPE_PID);
+}
+
+static int threaded_ttm_bo_reserve(void *arg)
+{
+ struct ttm_buffer_object *bo = arg;
+ struct signal_timer s_timer;
+ struct ww_acquire_ctx ctx;
+ bool interruptible = true;
+ bool no_wait = false;
+ int err;
+
+ ww_acquire_init(&ctx, &reservation_ww_class);
+
+ /* Prepare a signal that will interrupt the reservation attempt */
+ timer_setup_on_stack(&s_timer.timer, &signal_for_ttm_bo_reserve, 0);
+ s_timer.ctx = &ctx;
+
+ mod_timer(&s_timer.timer, msecs_to_jiffies(100));
+
+ err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx);
+
+ timer_delete_sync(&s_timer.timer);
+ timer_destroy_on_stack(&s_timer.timer);
+
+ ww_acquire_fini(&ctx);
+
+ return err;
+}
+
+static void ttm_bo_reserve_interrupted(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ struct task_struct *task;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ task = kthread_create(threaded_ttm_bo_reserve, bo, "ttm-bo-reserve");
+
+ if (IS_ERR(task))
+ KUNIT_FAIL(test, "Couldn't create ttm bo reserve task\n");
+
+ /* Take a lock so the threaded reserve has to wait */
+ mutex_lock(&bo->base.resv->lock.base);
+
+ wake_up_process(task);
+ msleep(20);
+ err = kthread_stop(task);
+
+ mutex_unlock(&bo->base.resv->lock.base);
+
+ KUNIT_ASSERT_EQ(test, err, -ERESTARTSYS);
+}
+#endif /* IS_BUILTIN(CONFIG_DRM_TTM_KUNIT_TEST) */
+
+static void ttm_bo_unreserve_basic(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_buffer_object *bo;
+ struct ttm_device *ttm_dev;
+ struct ttm_resource *res1, *res2;
+ struct ttm_place *place;
+ struct ttm_resource_manager *man;
+ unsigned int bo_prio = TTM_MAX_BO_PRIORITY - 1;
+ u32 mem_type = TTM_PL_SYSTEM;
+ int err;
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ priv->ttm_dev = ttm_dev;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+ bo->priority = bo_prio;
+
+ err = ttm_resource_alloc(bo, place, &res1, NULL);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ bo->resource = res1;
+
+ /* Add a dummy resource to populate LRU */
+ ttm_resource_alloc(bo, place, &res2, NULL);
+
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_unreserve(bo);
+
+ man = ttm_manager_type(priv->ttm_dev, mem_type);
+ KUNIT_ASSERT_EQ(test,
+ list_is_last(&res1->lru.link, &man->lru[bo->priority]), 1);
+
+ ttm_resource_free(bo, &res2);
+ ttm_resource_free(bo, &res1);
+}
+
+static void ttm_bo_unreserve_pinned(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_buffer_object *bo;
+ struct ttm_device *ttm_dev;
+ struct ttm_resource *res1, *res2;
+ struct ttm_place *place;
+ u32 mem_type = TTM_PL_SYSTEM;
+ int err;
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ priv->ttm_dev = ttm_dev;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+ place = ttm_place_kunit_init(test, mem_type, 0);
+
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_pin(bo);
+
+ err = ttm_resource_alloc(bo, place, &res1, NULL);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ bo->resource = res1;
+
+ /* Add a dummy resource to the pinned list */
+ err = ttm_resource_alloc(bo, place, &res2, NULL);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_EQ(test,
+ list_is_last(&res2->lru.link, &priv->ttm_dev->unevictable), 1);
+
+ ttm_bo_unreserve(bo);
+ KUNIT_ASSERT_EQ(test,
+ list_is_last(&res1->lru.link, &priv->ttm_dev->unevictable), 1);
+
+ ttm_resource_free(bo, &res1);
+ ttm_resource_free(bo, &res2);
+}
+
+static void ttm_bo_unreserve_bulk(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_lru_bulk_move lru_bulk_move;
+ struct ttm_lru_bulk_move_pos *pos;
+ struct ttm_buffer_object *bo1, *bo2;
+ struct ttm_resource *res1, *res2;
+ struct ttm_device *ttm_dev;
+ struct ttm_place *place;
+ struct dma_resv *resv;
+ u32 mem_type = TTM_PL_SYSTEM;
+ unsigned int bo_priority = 0;
+ int err;
+
+ ttm_lru_bulk_move_init(&lru_bulk_move);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ resv = kunit_kzalloc(test, sizeof(*resv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, resv);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ priv->ttm_dev = ttm_dev;
+
+ dma_resv_init(resv);
+
+ bo1 = ttm_bo_kunit_init(test, test->priv, BO_SIZE, resv);
+ bo2 = ttm_bo_kunit_init(test, test->priv, BO_SIZE, resv);
+
+ dma_resv_lock(bo1->base.resv, NULL);
+ ttm_bo_set_bulk_move(bo1, &lru_bulk_move);
+ dma_resv_unlock(bo1->base.resv);
+
+ err = ttm_resource_alloc(bo1, place, &res1, NULL);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ bo1->resource = res1;
+
+ dma_resv_lock(bo2->base.resv, NULL);
+ ttm_bo_set_bulk_move(bo2, &lru_bulk_move);
+ dma_resv_unlock(bo2->base.resv);
+
+ err = ttm_resource_alloc(bo2, place, &res2, NULL);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ bo2->resource = res2;
+
+ ttm_bo_reserve(bo1, false, false, NULL);
+ ttm_bo_unreserve(bo1);
+
+ pos = &lru_bulk_move.pos[mem_type][bo_priority];
+ KUNIT_ASSERT_PTR_EQ(test, res1, pos->last);
+
+ ttm_resource_free(bo1, &res1);
+ ttm_resource_free(bo2, &res2);
+
+ dma_resv_fini(resv);
+}
+
+static void ttm_bo_fini_basic(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_buffer_object *bo;
+ struct ttm_resource *res;
+ struct ttm_device *ttm_dev;
+ struct ttm_place *place;
+ u32 mem_type = TTM_PL_SYSTEM;
+ int err;
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ priv->ttm_dev = ttm_dev;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+ bo->type = ttm_bo_type_device;
+
+ err = ttm_resource_alloc(bo, place, &res, NULL);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ bo->resource = res;
+
+ dma_resv_lock(bo->base.resv, NULL);
+ err = ttm_tt_create(bo, false);
+ dma_resv_unlock(bo->base.resv);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ ttm_bo_fini(bo);
+}
+
+static const char *mock_name(struct dma_fence *f)
+{
+ return "kunit-ttm-bo-put";
+}
+
+static const struct dma_fence_ops mock_fence_ops = {
+ .get_driver_name = mock_name,
+ .get_timeline_name = mock_name,
+};
+
+static void ttm_bo_fini_shared_resv(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_buffer_object *bo;
+ struct dma_resv *external_resv;
+ struct dma_fence *fence;
+ /* A dummy DMA fence lock */
+ spinlock_t fence_lock;
+ struct ttm_device *ttm_dev;
+ int err;
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ priv->ttm_dev = ttm_dev;
+
+ external_resv = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, external_resv);
+
+ dma_resv_init(external_resv);
+
+ fence = kunit_kzalloc(test, sizeof(*fence), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, fence);
+
+ spin_lock_init(&fence_lock);
+ dma_fence_init(fence, &mock_fence_ops, &fence_lock, 0, 0);
+
+ dma_resv_lock(external_resv, NULL);
+ dma_resv_reserve_fences(external_resv, 1);
+ dma_resv_add_fence(external_resv, fence, DMA_RESV_USAGE_BOOKKEEP);
+ dma_resv_unlock(external_resv);
+
+ dma_fence_signal(fence);
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+ bo->type = ttm_bo_type_device;
+ bo->base.resv = external_resv;
+
+ ttm_bo_fini(bo);
+}
+
+static void ttm_bo_pin_basic(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_buffer_object *bo;
+ struct ttm_device *ttm_dev;
+ unsigned int no_pins = 3;
+ int err;
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ priv->ttm_dev = ttm_dev;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ for (int i = 0; i < no_pins; i++) {
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_pin(bo);
+ dma_resv_unlock(bo->base.resv);
+ }
+
+ KUNIT_ASSERT_EQ(test, bo->pin_count, no_pins);
+}
+
+static void ttm_bo_pin_unpin_resource(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_lru_bulk_move lru_bulk_move;
+ struct ttm_lru_bulk_move_pos *pos;
+ struct ttm_buffer_object *bo;
+ struct ttm_resource *res;
+ struct ttm_device *ttm_dev;
+ struct ttm_place *place;
+ u32 mem_type = TTM_PL_SYSTEM;
+ unsigned int bo_priority = 0;
+ int err;
+
+ ttm_lru_bulk_move_init(&lru_bulk_move);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ priv->ttm_dev = ttm_dev;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ err = ttm_resource_alloc(bo, place, &res, NULL);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ bo->resource = res;
+
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_set_bulk_move(bo, &lru_bulk_move);
+ ttm_bo_pin(bo);
+ dma_resv_unlock(bo->base.resv);
+
+ pos = &lru_bulk_move.pos[mem_type][bo_priority];
+
+ KUNIT_ASSERT_EQ(test, bo->pin_count, 1);
+ KUNIT_ASSERT_NULL(test, pos->first);
+ KUNIT_ASSERT_NULL(test, pos->last);
+
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_unpin(bo);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_ASSERT_PTR_EQ(test, res, pos->last);
+ KUNIT_ASSERT_EQ(test, bo->pin_count, 0);
+
+ ttm_resource_free(bo, &res);
+}
+
+static void ttm_bo_multiple_pin_one_unpin(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_lru_bulk_move lru_bulk_move;
+ struct ttm_lru_bulk_move_pos *pos;
+ struct ttm_buffer_object *bo;
+ struct ttm_resource *res;
+ struct ttm_device *ttm_dev;
+ struct ttm_place *place;
+ u32 mem_type = TTM_PL_SYSTEM;
+ unsigned int bo_priority = 0;
+ int err;
+
+ ttm_lru_bulk_move_init(&lru_bulk_move);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ priv->ttm_dev = ttm_dev;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ err = ttm_resource_alloc(bo, place, &res, NULL);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ bo->resource = res;
+
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_set_bulk_move(bo, &lru_bulk_move);
+
+ /* Multiple pins */
+ ttm_bo_pin(bo);
+ ttm_bo_pin(bo);
+
+ dma_resv_unlock(bo->base.resv);
+
+ pos = &lru_bulk_move.pos[mem_type][bo_priority];
+
+ KUNIT_ASSERT_EQ(test, bo->pin_count, 2);
+ KUNIT_ASSERT_NULL(test, pos->first);
+ KUNIT_ASSERT_NULL(test, pos->last);
+
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_unpin(bo);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_ASSERT_EQ(test, bo->pin_count, 1);
+ KUNIT_ASSERT_NULL(test, pos->first);
+ KUNIT_ASSERT_NULL(test, pos->last);
+
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_unpin(bo);
+ dma_resv_unlock(bo->base.resv);
+
+ ttm_resource_free(bo, &res);
+}
+
+static struct kunit_case ttm_bo_test_cases[] = {
+ KUNIT_CASE_PARAM(ttm_bo_reserve_optimistic_no_ticket,
+ ttm_bo_reserve_gen_params),
+ KUNIT_CASE(ttm_bo_reserve_locked_no_sleep),
+ KUNIT_CASE(ttm_bo_reserve_no_wait_ticket),
+ KUNIT_CASE(ttm_bo_reserve_double_resv),
+#if IS_BUILTIN(CONFIG_DRM_TTM_KUNIT_TEST)
+ KUNIT_CASE(ttm_bo_reserve_interrupted),
+#endif
+ KUNIT_CASE(ttm_bo_reserve_deadlock),
+ KUNIT_CASE(ttm_bo_unreserve_basic),
+ KUNIT_CASE(ttm_bo_unreserve_pinned),
+ KUNIT_CASE(ttm_bo_unreserve_bulk),
+ KUNIT_CASE(ttm_bo_fini_basic),
+ KUNIT_CASE(ttm_bo_fini_shared_resv),
+ KUNIT_CASE(ttm_bo_pin_basic),
+ KUNIT_CASE(ttm_bo_pin_unpin_resource),
+ KUNIT_CASE(ttm_bo_multiple_pin_one_unpin),
+ {}
+};
+
+static struct kunit_suite ttm_bo_test_suite = {
+ .name = "ttm_bo",
+ .init = ttm_test_devices_init,
+ .exit = ttm_test_devices_fini,
+ .test_cases = ttm_bo_test_cases,
+};
+
+kunit_test_suites(&ttm_bo_test_suite);
+
+MODULE_DESCRIPTION("KUnit tests for ttm_bo APIs");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
new file mode 100644
index 000000000000..2eda87882e65
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
@@ -0,0 +1,1176 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#include <linux/delay.h>
+#include <linux/kthread.h>
+
+#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_tt.h>
+
+#include "ttm_kunit_helpers.h"
+#include "ttm_mock_manager.h"
+
+#define BO_SIZE SZ_4K
+#define MANAGER_SIZE SZ_1M
+
+static struct spinlock fence_lock;
+
+struct ttm_bo_validate_test_case {
+ const char *description;
+ enum ttm_bo_type bo_type;
+ u32 mem_type;
+ bool with_ttm;
+ bool no_gpu_wait;
+};
+
+static struct ttm_placement *ttm_placement_kunit_init(struct kunit *test,
+ struct ttm_place *places,
+ unsigned int num_places)
+{
+ struct ttm_placement *placement;
+
+ placement = kunit_kzalloc(test, sizeof(*placement), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, placement);
+
+ placement->num_placement = num_places;
+ placement->placement = places;
+
+ return placement;
+}
+
+static const char *fence_name(struct dma_fence *f)
+{
+ return "ttm-bo-validate-fence";
+}
+
+static const struct dma_fence_ops fence_ops = {
+ .get_driver_name = fence_name,
+ .get_timeline_name = fence_name,
+};
+
+static struct dma_fence *alloc_mock_fence(struct kunit *test)
+{
+ struct dma_fence *fence;
+
+ fence = kunit_kzalloc(test, sizeof(*fence), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, fence);
+
+ dma_fence_init(fence, &fence_ops, &fence_lock, 0, 0);
+
+ return fence;
+}
+
+static void dma_resv_kunit_active_fence_init(struct kunit *test,
+ struct dma_resv *resv,
+ enum dma_resv_usage usage)
+{
+ struct dma_fence *fence;
+
+ fence = alloc_mock_fence(test);
+ dma_fence_enable_sw_signaling(fence);
+
+ dma_resv_lock(resv, NULL);
+ dma_resv_reserve_fences(resv, 1);
+ dma_resv_add_fence(resv, fence, usage);
+ dma_resv_unlock(resv);
+}
+
+static void ttm_bo_validate_case_desc(const struct ttm_bo_validate_test_case *t,
+ char *desc)
+{
+ strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
+}
+
+static const struct ttm_bo_validate_test_case ttm_bo_type_cases[] = {
+ {
+ .description = "Buffer object for userspace",
+ .bo_type = ttm_bo_type_device,
+ },
+ {
+ .description = "Kernel buffer object",
+ .bo_type = ttm_bo_type_kernel,
+ },
+ {
+ .description = "Shared buffer object",
+ .bo_type = ttm_bo_type_sg,
+ },
+};
+
+KUNIT_ARRAY_PARAM(ttm_bo_types, ttm_bo_type_cases,
+ ttm_bo_validate_case_desc);
+
+static void ttm_bo_init_reserved_sys_man(struct kunit *test)
+{
+ const struct ttm_bo_validate_test_case *params = test->param_value;
+ struct ttm_test_devices *priv = test->priv;
+ enum ttm_bo_type bo_type = params->bo_type;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_operation_ctx ctx = { };
+ struct ttm_placement *placement;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ int err;
+
+ bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+
+ place = ttm_place_kunit_init(test, TTM_PL_SYSTEM, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ drm_gem_private_object_init(priv->drm, &bo->base, size);
+
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo, bo_type, placement,
+ PAGE_SIZE, &ctx, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, kref_read(&bo->kref), 1);
+ KUNIT_EXPECT_PTR_EQ(test, bo->bdev, priv->ttm_dev);
+ KUNIT_EXPECT_EQ(test, bo->type, bo_type);
+ KUNIT_EXPECT_EQ(test, bo->page_alignment, PAGE_SIZE);
+ KUNIT_EXPECT_PTR_EQ(test, bo->destroy, &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, bo->pin_count, 0);
+ KUNIT_EXPECT_NULL(test, bo->bulk_move);
+ KUNIT_EXPECT_NOT_NULL(test, bo->ttm);
+ KUNIT_EXPECT_FALSE(test, ttm_tt_is_populated(bo->ttm));
+ KUNIT_EXPECT_NOT_NULL(test, (void *)bo->base.resv->fences);
+ KUNIT_EXPECT_EQ(test, ctx.bytes_moved, size);
+
+ if (bo_type != ttm_bo_type_kernel)
+ KUNIT_EXPECT_TRUE(test,
+ drm_mm_node_allocated(&bo->base.vma_node.vm_node));
+
+ ttm_resource_free(bo, &bo->resource);
+ ttm_bo_fini(bo);
+}
+
+static void ttm_bo_init_reserved_mock_man(struct kunit *test)
+{
+ const struct ttm_bo_validate_test_case *params = test->param_value;
+ enum ttm_bo_type bo_type = params->bo_type;
+ struct ttm_test_devices *priv = test->priv;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_operation_ctx ctx = { };
+ struct ttm_placement *placement;
+ u32 mem_type = TTM_PL_VRAM;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ int err;
+
+ ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
+
+ bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ drm_gem_private_object_init(priv->drm, &bo->base, size);
+
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo, bo_type, placement,
+ PAGE_SIZE, &ctx, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, kref_read(&bo->kref), 1);
+ KUNIT_EXPECT_PTR_EQ(test, bo->bdev, priv->ttm_dev);
+ KUNIT_EXPECT_EQ(test, bo->type, bo_type);
+ KUNIT_EXPECT_EQ(test, ctx.bytes_moved, size);
+
+ if (bo_type != ttm_bo_type_kernel)
+ KUNIT_EXPECT_TRUE(test,
+ drm_mm_node_allocated(&bo->base.vma_node.vm_node));
+
+ ttm_resource_free(bo, &bo->resource);
+ ttm_bo_fini(bo);
+ ttm_mock_manager_fini(priv->ttm_dev, mem_type);
+}
+
+static void ttm_bo_init_reserved_resv(struct kunit *test)
+{
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_test_devices *priv = test->priv;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_operation_ctx ctx = { };
+ struct ttm_placement *placement;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ struct dma_resv resv;
+ int err;
+
+ bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+
+ place = ttm_place_kunit_init(test, TTM_PL_SYSTEM, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ drm_gem_private_object_init(priv->drm, &bo->base, size);
+ dma_resv_init(&resv);
+ dma_resv_lock(&resv, NULL);
+
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo, bo_type, placement,
+ PAGE_SIZE, &ctx, NULL, &resv,
+ &dummy_ttm_bo_destroy);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_PTR_EQ(test, bo->base.resv, &resv);
+
+ ttm_resource_free(bo, &bo->resource);
+ ttm_bo_fini(bo);
+}
+
+static void ttm_bo_validate_basic(struct kunit *test)
+{
+ const struct ttm_bo_validate_test_case *params = test->param_value;
+ u32 fst_mem = TTM_PL_SYSTEM, snd_mem = TTM_PL_VRAM;
+ struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
+ struct ttm_placement *fst_placement, *snd_placement;
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_place *fst_place, *snd_place;
+ u32 size = ALIGN(SZ_8K, PAGE_SIZE);
+ struct ttm_buffer_object *bo;
+ int err;
+
+ ttm_mock_manager_init(priv->ttm_dev, snd_mem, MANAGER_SIZE);
+
+ fst_place = ttm_place_kunit_init(test, fst_mem, 0);
+ fst_placement = ttm_placement_kunit_init(test, fst_place, 1);
+
+ bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+
+ drm_gem_private_object_init(priv->drm, &bo->base, size);
+
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo, params->bo_type,
+ fst_placement, PAGE_SIZE, &ctx_init, NULL,
+ NULL, &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ snd_place = ttm_place_kunit_init(test, snd_mem, DRM_BUDDY_TOPDOWN_ALLOCATION);
+ snd_placement = ttm_placement_kunit_init(test, snd_place, 1);
+
+ err = ttm_bo_validate(bo, snd_placement, &ctx_val);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, bo->base.size);
+ KUNIT_EXPECT_NOT_NULL(test, bo->ttm);
+ KUNIT_EXPECT_TRUE(test, ttm_tt_is_populated(bo->ttm));
+ KUNIT_EXPECT_EQ(test, bo->resource->mem_type, snd_mem);
+ KUNIT_EXPECT_EQ(test, bo->resource->placement,
+ DRM_BUDDY_TOPDOWN_ALLOCATION);
+
+ ttm_bo_fini(bo);
+ ttm_mock_manager_fini(priv->ttm_dev, snd_mem);
+}
+
+static void ttm_bo_validate_invalid_placement(struct kunit *test)
+{
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ u32 unknown_mem_type = TTM_PL_PRIV + 1;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_operation_ctx ctx = { };
+ struct ttm_placement *placement;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ int err;
+
+ place = ttm_place_kunit_init(test, unknown_mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
+ bo->type = bo_type;
+
+ ttm_bo_reserve(bo, false, false, NULL);
+ err = ttm_bo_validate(bo, placement, &ctx);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_EXPECT_EQ(test, err, -ENOMEM);
+
+ ttm_bo_fini(bo);
+}
+
+static void ttm_bo_validate_failed_alloc(struct kunit *test)
+{
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_test_devices *priv = test->priv;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_operation_ctx ctx = { };
+ struct ttm_placement *placement;
+ u32 mem_type = TTM_PL_VRAM;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
+ bo->type = bo_type;
+
+ ttm_bad_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ ttm_bo_reserve(bo, false, false, NULL);
+ err = ttm_bo_validate(bo, placement, &ctx);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_EXPECT_EQ(test, err, -ENOMEM);
+
+ ttm_bo_fini(bo);
+ ttm_bad_manager_fini(priv->ttm_dev, mem_type);
+}
+
+static void ttm_bo_validate_pinned(struct kunit *test)
+{
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_operation_ctx ctx = { };
+ u32 mem_type = TTM_PL_SYSTEM;
+ struct ttm_placement *placement;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ int err;
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
+ bo->type = bo_type;
+
+ ttm_bo_reserve(bo, false, false, NULL);
+ ttm_bo_pin(bo);
+ err = ttm_bo_validate(bo, placement, &ctx);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_EXPECT_EQ(test, err, -EINVAL);
+
+ ttm_bo_reserve(bo, false, false, NULL);
+ ttm_bo_unpin(bo);
+ dma_resv_unlock(bo->base.resv);
+
+ ttm_bo_fini(bo);
+}
+
+static const struct ttm_bo_validate_test_case ttm_mem_type_cases[] = {
+ {
+ .description = "System manager",
+ .mem_type = TTM_PL_SYSTEM,
+ },
+ {
+ .description = "VRAM manager",
+ .mem_type = TTM_PL_VRAM,
+ },
+};
+
+KUNIT_ARRAY_PARAM(ttm_bo_validate_mem, ttm_mem_type_cases,
+ ttm_bo_validate_case_desc);
+
+static void ttm_bo_validate_same_placement(struct kunit *test)
+{
+ const struct ttm_bo_validate_test_case *params = test->param_value;
+ struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
+ struct ttm_test_devices *priv = test->priv;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_placement *placement;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ int err;
+
+ place = ttm_place_kunit_init(test, params->mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ if (params->mem_type != TTM_PL_SYSTEM)
+ ttm_mock_manager_init(priv->ttm_dev, params->mem_type, MANAGER_SIZE);
+
+ bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+
+ drm_gem_private_object_init(priv->drm, &bo->base, size);
+
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo, params->bo_type,
+ placement, PAGE_SIZE, &ctx_init, NULL,
+ NULL, &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ err = ttm_bo_validate(bo, placement, &ctx_val);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, 0);
+
+ ttm_bo_fini(bo);
+
+ if (params->mem_type != TTM_PL_SYSTEM)
+ ttm_mock_manager_fini(priv->ttm_dev, params->mem_type);
+}
+
+static void ttm_bo_validate_busy_placement(struct kunit *test)
+{
+ u32 fst_mem = TTM_PL_VRAM, snd_mem = TTM_PL_VRAM + 1;
+ struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
+ struct ttm_placement *placement_init, *placement_val;
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_test_devices *priv = test->priv;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_place *init_place, places[2];
+ struct ttm_resource_manager *man;
+ struct ttm_buffer_object *bo;
+ int err;
+
+ ttm_bad_manager_init(priv->ttm_dev, fst_mem, MANAGER_SIZE);
+ ttm_mock_manager_init(priv->ttm_dev, snd_mem, MANAGER_SIZE);
+
+ init_place = ttm_place_kunit_init(test, TTM_PL_SYSTEM, 0);
+ placement_init = ttm_placement_kunit_init(test, init_place, 1);
+
+ bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+
+ drm_gem_private_object_init(priv->drm, &bo->base, size);
+
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo, bo_type, placement_init,
+ PAGE_SIZE, &ctx_init, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ places[0] = (struct ttm_place){ .mem_type = fst_mem, .flags = TTM_PL_FLAG_DESIRED };
+ places[1] = (struct ttm_place){ .mem_type = snd_mem, .flags = TTM_PL_FLAG_FALLBACK };
+ placement_val = ttm_placement_kunit_init(test, places, 2);
+
+ err = ttm_bo_validate(bo, placement_val, &ctx_val);
+ dma_resv_unlock(bo->base.resv);
+
+ man = ttm_manager_type(priv->ttm_dev, snd_mem);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, bo->base.size);
+ KUNIT_EXPECT_EQ(test, bo->resource->mem_type, snd_mem);
+ KUNIT_ASSERT_TRUE(test, list_is_singular(&man->lru[bo->priority]));
+
+ ttm_bo_fini(bo);
+ ttm_bad_manager_fini(priv->ttm_dev, fst_mem);
+ ttm_mock_manager_fini(priv->ttm_dev, snd_mem);
+}
+
+static void ttm_bo_validate_multihop(struct kunit *test)
+{
+ const struct ttm_bo_validate_test_case *params = test->param_value;
+ struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
+ struct ttm_placement *placement_init, *placement_val;
+ u32 fst_mem = TTM_PL_VRAM, tmp_mem = TTM_PL_TT, final_mem = TTM_PL_SYSTEM;
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_place *fst_place, *final_place;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_buffer_object *bo;
+ int err;
+
+ ttm_mock_manager_init(priv->ttm_dev, fst_mem, MANAGER_SIZE);
+ ttm_mock_manager_init(priv->ttm_dev, tmp_mem, MANAGER_SIZE);
+
+ fst_place = ttm_place_kunit_init(test, fst_mem, 0);
+ placement_init = ttm_placement_kunit_init(test, fst_place, 1);
+
+ bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+
+ drm_gem_private_object_init(priv->drm, &bo->base, size);
+
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo, params->bo_type,
+ placement_init, PAGE_SIZE, &ctx_init, NULL,
+ NULL, &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ final_place = ttm_place_kunit_init(test, final_mem, 0);
+ placement_val = ttm_placement_kunit_init(test, final_place, 1);
+
+ err = ttm_bo_validate(bo, placement_val, &ctx_val);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, size * 2);
+ KUNIT_EXPECT_EQ(test, bo->resource->mem_type, final_mem);
+
+ ttm_bo_fini(bo);
+
+ ttm_mock_manager_fini(priv->ttm_dev, fst_mem);
+ ttm_mock_manager_fini(priv->ttm_dev, tmp_mem);
+}
+
+static const struct ttm_bo_validate_test_case ttm_bo_no_placement_cases[] = {
+ {
+ .description = "Buffer object in system domain, no page vector",
+ },
+ {
+ .description = "Buffer object in system domain with an existing page vector",
+ .with_ttm = true,
+ },
+};
+
+KUNIT_ARRAY_PARAM(ttm_bo_no_placement, ttm_bo_no_placement_cases,
+ ttm_bo_validate_case_desc);
+
+static void ttm_bo_validate_no_placement_signaled(struct kunit *test)
+{
+ const struct ttm_bo_validate_test_case *params = test->param_value;
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_test_devices *priv = test->priv;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_operation_ctx ctx = { };
+ u32 mem_type = TTM_PL_SYSTEM;
+ struct ttm_resource_manager *man;
+ struct ttm_placement *placement;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ struct ttm_tt *old_tt;
+ u32 flags;
+ int err;
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+ man = ttm_manager_type(priv->ttm_dev, mem_type);
+
+ bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
+ bo->type = bo_type;
+
+ if (params->with_ttm) {
+ old_tt = priv->ttm_dev->funcs->ttm_tt_create(bo, 0);
+ ttm_pool_alloc(&priv->ttm_dev->pool, old_tt, &ctx);
+ bo->ttm = old_tt;
+ }
+
+ placement = kunit_kzalloc(test, sizeof(*placement), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, placement);
+
+ ttm_bo_reserve(bo, false, false, NULL);
+
+ err = ttm_resource_alloc(bo, place, &bo->resource, NULL);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_ASSERT_EQ(test, man->usage, size);
+
+ err = ttm_bo_validate(bo, placement, &ctx);
+ ttm_bo_unreserve(bo);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_ASSERT_EQ(test, man->usage, 0);
+ KUNIT_ASSERT_NOT_NULL(test, bo->ttm);
+ KUNIT_EXPECT_EQ(test, ctx.bytes_moved, 0);
+
+ if (params->with_ttm) {
+ flags = bo->ttm->page_flags;
+
+ KUNIT_ASSERT_PTR_EQ(test, bo->ttm, old_tt);
+ KUNIT_ASSERT_FALSE(test, flags & TTM_TT_FLAG_PRIV_POPULATED);
+ KUNIT_ASSERT_TRUE(test, flags & TTM_TT_FLAG_ZERO_ALLOC);
+ }
+
+ ttm_bo_fini(bo);
+}
+
+static int threaded_dma_resv_signal(void *arg)
+{
+ struct ttm_buffer_object *bo = arg;
+ struct dma_resv *resv = bo->base.resv;
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
+
+ dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
+ dma_fence_signal(fence);
+ }
+ dma_resv_iter_end(&cursor);
+
+ return 0;
+}
+
+static void ttm_bo_validate_no_placement_not_signaled(struct kunit *test)
+{
+ const struct ttm_bo_validate_test_case *params = test->param_value;
+ enum dma_resv_usage usage = DMA_RESV_USAGE_BOOKKEEP;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_operation_ctx ctx = { };
+ u32 mem_type = TTM_PL_SYSTEM;
+ struct ttm_placement *placement;
+ struct ttm_buffer_object *bo;
+ struct task_struct *task;
+ struct ttm_place *place;
+ int err;
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+
+ bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
+ bo->type = params->bo_type;
+
+ err = ttm_resource_alloc(bo, place, &bo->resource, NULL);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ placement = kunit_kzalloc(test, sizeof(*placement), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, placement);
+
+ /* Create an active fence to simulate a non-idle resv object */
+ spin_lock_init(&fence_lock);
+ dma_resv_kunit_active_fence_init(test, bo->base.resv, usage);
+
+ task = kthread_create(threaded_dma_resv_signal, bo, "dma-resv-signal");
+ if (IS_ERR(task))
+ KUNIT_FAIL(test, "Couldn't create dma resv signal task\n");
+
+ wake_up_process(task);
+ ttm_bo_reserve(bo, false, false, NULL);
+ err = ttm_bo_validate(bo, placement, &ctx);
+ ttm_bo_unreserve(bo);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_ASSERT_NOT_NULL(test, bo->ttm);
+ KUNIT_ASSERT_NULL(test, bo->resource);
+ KUNIT_ASSERT_NULL(test, bo->bulk_move);
+ KUNIT_EXPECT_EQ(test, ctx.bytes_moved, 0);
+
+ if (bo->type != ttm_bo_type_sg)
+ KUNIT_ASSERT_PTR_EQ(test, bo->base.resv, &bo->base._resv);
+
+ /* Make sure we have an idle object at this point */
+ dma_resv_wait_timeout(bo->base.resv, usage, false, MAX_SCHEDULE_TIMEOUT);
+
+ ttm_bo_fini(bo);
+}
+
+static void ttm_bo_validate_move_fence_signaled(struct kunit *test)
+{
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_test_devices *priv = test->priv;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_operation_ctx ctx = { };
+ u32 mem_type = TTM_PL_SYSTEM;
+ struct ttm_resource_manager *man;
+ struct ttm_placement *placement;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ int err;
+
+ man = ttm_manager_type(priv->ttm_dev, mem_type);
+ man->eviction_fences[0] = dma_fence_get_stub();
+
+ bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
+ bo->type = bo_type;
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ ttm_bo_reserve(bo, false, false, NULL);
+ err = ttm_bo_validate(bo, placement, &ctx);
+ ttm_bo_unreserve(bo);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, bo->resource->mem_type, mem_type);
+ KUNIT_EXPECT_EQ(test, ctx.bytes_moved, size);
+
+ ttm_bo_fini(bo);
+ dma_fence_put(man->eviction_fences[0]);
+}
+
+static const struct ttm_bo_validate_test_case ttm_bo_validate_wait_cases[] = {
+ {
+ .description = "Waits for GPU",
+ .no_gpu_wait = false,
+ },
+ {
+ .description = "Tries to lock straight away",
+ .no_gpu_wait = true,
+ },
+};
+
+KUNIT_ARRAY_PARAM(ttm_bo_validate_wait, ttm_bo_validate_wait_cases,
+ ttm_bo_validate_case_desc);
+
+static int threaded_fence_signal(void *arg)
+{
+ struct dma_fence *fence = arg;
+
+ msleep(20);
+
+ return dma_fence_signal(fence);
+}
+
+static void ttm_bo_validate_move_fence_not_signaled(struct kunit *test)
+{
+ const struct ttm_bo_validate_test_case *params = test->param_value;
+ struct ttm_operation_ctx ctx_init = { },
+ ctx_val = { .no_wait_gpu = params->no_gpu_wait };
+ u32 fst_mem = TTM_PL_VRAM, snd_mem = TTM_PL_VRAM + 1;
+ struct ttm_placement *placement_init, *placement_val;
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_test_devices *priv = test->priv;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_place *init_place, places[2];
+ struct ttm_resource_manager *man;
+ struct ttm_buffer_object *bo;
+ struct task_struct *task;
+ int err;
+
+ init_place = ttm_place_kunit_init(test, TTM_PL_SYSTEM, 0);
+ placement_init = ttm_placement_kunit_init(test, init_place, 1);
+
+ bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+
+ drm_gem_private_object_init(priv->drm, &bo->base, size);
+
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo, bo_type, placement_init,
+ PAGE_SIZE, &ctx_init, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ ttm_mock_manager_init(priv->ttm_dev, fst_mem, MANAGER_SIZE);
+ ttm_mock_manager_init(priv->ttm_dev, snd_mem, MANAGER_SIZE);
+
+ places[0] = (struct ttm_place){ .mem_type = fst_mem, .flags = TTM_PL_FLAG_DESIRED };
+ places[1] = (struct ttm_place){ .mem_type = snd_mem, .flags = TTM_PL_FLAG_FALLBACK };
+ placement_val = ttm_placement_kunit_init(test, places, 2);
+
+ spin_lock_init(&fence_lock);
+ man = ttm_manager_type(priv->ttm_dev, fst_mem);
+ man->eviction_fences[0] = alloc_mock_fence(test);
+
+ task = kthread_create(threaded_fence_signal, man->eviction_fences[0], "move-fence-signal");
+ if (IS_ERR(task))
+ KUNIT_FAIL(test, "Couldn't create move fence signal task\n");
+
+ wake_up_process(task);
+ err = ttm_bo_validate(bo, placement_val, &ctx_val);
+ dma_resv_unlock(bo->base.resv);
+
+ dma_fence_wait_timeout(man->eviction_fences[0], false, MAX_SCHEDULE_TIMEOUT);
+ man->eviction_fences[0] = NULL;
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, size);
+
+ if (params->no_gpu_wait)
+ KUNIT_EXPECT_EQ(test, bo->resource->mem_type, snd_mem);
+ else
+ KUNIT_EXPECT_EQ(test, bo->resource->mem_type, fst_mem);
+
+ ttm_bo_fini(bo);
+ ttm_mock_manager_fini(priv->ttm_dev, fst_mem);
+ ttm_mock_manager_fini(priv->ttm_dev, snd_mem);
+}
+
+static void ttm_bo_validate_happy_evict(struct kunit *test)
+{
+ u32 mem_type = TTM_PL_VRAM, mem_multihop = TTM_PL_TT,
+ mem_type_evict = TTM_PL_SYSTEM;
+ struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ u32 small = SZ_8K, medium = SZ_512K,
+ big = MANAGER_SIZE - (small + medium);
+ u32 bo_sizes[] = { small, medium, big };
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_buffer_object *bos, *bo_val;
+ struct ttm_placement *placement;
+ struct ttm_place *place;
+ u32 bo_no = 3;
+ int i, err;
+
+ ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
+ ttm_mock_manager_init(priv->ttm_dev, mem_multihop, MANAGER_SIZE);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ bos = kunit_kmalloc_array(test, bo_no, sizeof(*bos), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bos);
+
+ memset(bos, 0, sizeof(*bos) * bo_no);
+ for (i = 0; i < bo_no; i++) {
+ drm_gem_private_object_init(priv->drm, &bos[i].base, bo_sizes[i]);
+ err = ttm_bo_init_reserved(priv->ttm_dev, &bos[i], bo_type, placement,
+ PAGE_SIZE, &ctx_init, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ dma_resv_unlock(bos[i].base.resv);
+ }
+
+ bo_val = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+ bo_val->type = bo_type;
+
+ ttm_bo_reserve(bo_val, false, false, NULL);
+ err = ttm_bo_validate(bo_val, placement, &ctx_val);
+ ttm_bo_unreserve(bo_val);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, bos[0].resource->mem_type, mem_type_evict);
+ KUNIT_EXPECT_TRUE(test, bos[0].ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC);
+ KUNIT_EXPECT_TRUE(test, bos[0].ttm->page_flags & TTM_TT_FLAG_PRIV_POPULATED);
+ KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, small * 2 + BO_SIZE);
+ KUNIT_EXPECT_EQ(test, bos[1].resource->mem_type, mem_type);
+
+ for (i = 0; i < bo_no; i++)
+ ttm_bo_fini(&bos[i]);
+ ttm_bo_fini(bo_val);
+
+ ttm_mock_manager_fini(priv->ttm_dev, mem_type);
+ ttm_mock_manager_fini(priv->ttm_dev, mem_multihop);
+}
+
+static void ttm_bo_validate_all_pinned_evict(struct kunit *test)
+{
+ struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_buffer_object *bo_big, *bo_small;
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_placement *placement;
+ u32 mem_type = TTM_PL_VRAM, mem_multihop = TTM_PL_TT;
+ struct ttm_place *place;
+ int err;
+
+ ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
+ ttm_mock_manager_init(priv->ttm_dev, mem_multihop, MANAGER_SIZE);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ bo_big = kunit_kzalloc(test, sizeof(*bo_big), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo_big);
+
+ drm_gem_private_object_init(priv->drm, &bo_big->base, MANAGER_SIZE);
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo_big, bo_type, placement,
+ PAGE_SIZE, &ctx_init, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ ttm_bo_pin(bo_big);
+ dma_resv_unlock(bo_big->base.resv);
+
+ bo_small = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+ bo_small->type = bo_type;
+
+ ttm_bo_reserve(bo_small, false, false, NULL);
+ err = ttm_bo_validate(bo_small, placement, &ctx_val);
+ ttm_bo_unreserve(bo_small);
+
+ KUNIT_EXPECT_EQ(test, err, -ENOMEM);
+
+ ttm_bo_fini(bo_small);
+
+ ttm_bo_reserve(bo_big, false, false, NULL);
+ ttm_bo_unpin(bo_big);
+ dma_resv_unlock(bo_big->base.resv);
+ ttm_bo_fini(bo_big);
+
+ ttm_mock_manager_fini(priv->ttm_dev, mem_type);
+ ttm_mock_manager_fini(priv->ttm_dev, mem_multihop);
+}
+
+static void ttm_bo_validate_allowed_only_evict(struct kunit *test)
+{
+ u32 mem_type = TTM_PL_VRAM, mem_multihop = TTM_PL_TT,
+ mem_type_evict = TTM_PL_SYSTEM;
+ struct ttm_buffer_object *bo, *bo_evictable, *bo_pinned;
+ struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_placement *placement;
+ struct ttm_place *place;
+ u32 size = SZ_512K;
+ int err;
+
+ ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
+ ttm_mock_manager_init(priv->ttm_dev, mem_multihop, MANAGER_SIZE);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ bo_pinned = kunit_kzalloc(test, sizeof(*bo_pinned), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo_pinned);
+
+ drm_gem_private_object_init(priv->drm, &bo_pinned->base, size);
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo_pinned, bo_type, placement,
+ PAGE_SIZE, &ctx_init, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ ttm_bo_pin(bo_pinned);
+ dma_resv_unlock(bo_pinned->base.resv);
+
+ bo_evictable = kunit_kzalloc(test, sizeof(*bo_evictable), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo_evictable);
+
+ drm_gem_private_object_init(priv->drm, &bo_evictable->base, size);
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo_evictable, bo_type, placement,
+ PAGE_SIZE, &ctx_init, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ dma_resv_unlock(bo_evictable->base.resv);
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+ bo->type = bo_type;
+
+ ttm_bo_reserve(bo, false, false, NULL);
+ err = ttm_bo_validate(bo, placement, &ctx_val);
+ ttm_bo_unreserve(bo);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, bo->resource->mem_type, mem_type);
+ KUNIT_EXPECT_EQ(test, bo_pinned->resource->mem_type, mem_type);
+ KUNIT_EXPECT_EQ(test, bo_evictable->resource->mem_type, mem_type_evict);
+ KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, size * 2 + BO_SIZE);
+
+ ttm_bo_fini(bo);
+ ttm_bo_fini(bo_evictable);
+
+ ttm_bo_reserve(bo_pinned, false, false, NULL);
+ ttm_bo_unpin(bo_pinned);
+ dma_resv_unlock(bo_pinned->base.resv);
+ ttm_bo_fini(bo_pinned);
+
+ ttm_mock_manager_fini(priv->ttm_dev, mem_type);
+ ttm_mock_manager_fini(priv->ttm_dev, mem_multihop);
+}
+
+static void ttm_bo_validate_deleted_evict(struct kunit *test)
+{
+ struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
+ u32 small = SZ_8K, big = MANAGER_SIZE - BO_SIZE;
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_buffer_object *bo_big, *bo_small;
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_resource_manager *man;
+ u32 mem_type = TTM_PL_VRAM;
+ struct ttm_placement *placement;
+ struct ttm_place *place;
+ int err;
+
+ ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
+ man = ttm_manager_type(priv->ttm_dev, mem_type);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ bo_big = kunit_kzalloc(test, sizeof(*bo_big), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo_big);
+
+ drm_gem_private_object_init(priv->drm, &bo_big->base, big);
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo_big, bo_type, placement,
+ PAGE_SIZE, &ctx_init, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, ttm_resource_manager_usage(man), big);
+
+ dma_resv_unlock(bo_big->base.resv);
+ bo_big->deleted = true;
+
+ bo_small = ttm_bo_kunit_init(test, test->priv, small, NULL);
+ bo_small->type = bo_type;
+
+ ttm_bo_reserve(bo_small, false, false, NULL);
+ err = ttm_bo_validate(bo_small, placement, &ctx_val);
+ ttm_bo_unreserve(bo_small);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, bo_small->resource->mem_type, mem_type);
+ KUNIT_EXPECT_EQ(test, ttm_resource_manager_usage(man), small);
+ KUNIT_EXPECT_NULL(test, bo_big->ttm);
+ KUNIT_EXPECT_NULL(test, bo_big->resource);
+
+ ttm_bo_fini(bo_small);
+ ttm_bo_fini(bo_big);
+ ttm_mock_manager_fini(priv->ttm_dev, mem_type);
+}
+
+static void ttm_bo_validate_busy_domain_evict(struct kunit *test)
+{
+ u32 mem_type = TTM_PL_VRAM, mem_type_evict = TTM_PL_MOCK1;
+ struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_buffer_object *bo_init, *bo_val;
+ struct ttm_placement *placement;
+ struct ttm_place *place;
+ int err;
+
+ /*
+ * Drop the default device and setup a new one that points to busy
+ * thus unsuitable eviction domain
+ */
+ ttm_device_fini(priv->ttm_dev);
+
+ err = ttm_device_kunit_init_bad_evict(test->priv, priv->ttm_dev);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
+ ttm_busy_manager_init(priv->ttm_dev, mem_type_evict, MANAGER_SIZE);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ bo_init = kunit_kzalloc(test, sizeof(*bo_init), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo_init);
+
+ drm_gem_private_object_init(priv->drm, &bo_init->base, MANAGER_SIZE);
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo_init, bo_type, placement,
+ PAGE_SIZE, &ctx_init, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ dma_resv_unlock(bo_init->base.resv);
+
+ bo_val = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+ bo_val->type = bo_type;
+
+ ttm_bo_reserve(bo_val, false, false, NULL);
+ err = ttm_bo_validate(bo_val, placement, &ctx_val);
+ ttm_bo_unreserve(bo_val);
+
+ KUNIT_EXPECT_EQ(test, err, -ENOMEM);
+ KUNIT_EXPECT_EQ(test, bo_init->resource->mem_type, mem_type);
+ KUNIT_EXPECT_NULL(test, bo_val->resource);
+
+ ttm_bo_fini(bo_init);
+ ttm_bo_fini(bo_val);
+
+ ttm_mock_manager_fini(priv->ttm_dev, mem_type);
+ ttm_bad_manager_fini(priv->ttm_dev, mem_type_evict);
+}
+
+static void ttm_bo_validate_evict_gutting(struct kunit *test)
+{
+ struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_buffer_object *bo, *bo_evict;
+ u32 mem_type = TTM_PL_MOCK1;
+ struct ttm_placement *placement;
+ struct ttm_place *place;
+ int err;
+
+ ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ bo_evict = kunit_kzalloc(test, sizeof(*bo_evict), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo_evict);
+
+ drm_gem_private_object_init(priv->drm, &bo_evict->base, MANAGER_SIZE);
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo_evict, bo_type, placement,
+ PAGE_SIZE, &ctx_init, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ dma_resv_unlock(bo_evict->base.resv);
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+ bo->type = bo_type;
+
+ ttm_bo_reserve(bo, false, false, NULL);
+ err = ttm_bo_validate(bo, placement, &ctx_val);
+ ttm_bo_unreserve(bo);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, bo->resource->mem_type, mem_type);
+ KUNIT_ASSERT_NULL(test, bo_evict->resource);
+ KUNIT_ASSERT_TRUE(test, bo_evict->ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC);
+
+ ttm_bo_fini(bo_evict);
+ ttm_bo_fini(bo);
+
+ ttm_mock_manager_fini(priv->ttm_dev, mem_type);
+}
+
+static void ttm_bo_validate_recrusive_evict(struct kunit *test)
+{
+ u32 mem_type = TTM_PL_TT, mem_type_evict = TTM_PL_MOCK2;
+ struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
+ struct ttm_placement *placement_tt, *placement_mock;
+ struct ttm_buffer_object *bo_tt, *bo_mock, *bo_val;
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_place *place_tt, *place_mock;
+ int err;
+
+ ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
+ ttm_mock_manager_init(priv->ttm_dev, mem_type_evict, MANAGER_SIZE);
+
+ place_tt = ttm_place_kunit_init(test, mem_type, 0);
+ place_mock = ttm_place_kunit_init(test, mem_type_evict, 0);
+
+ placement_tt = ttm_placement_kunit_init(test, place_tt, 1);
+ placement_mock = ttm_placement_kunit_init(test, place_mock, 1);
+
+ bo_tt = kunit_kzalloc(test, sizeof(*bo_tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo_tt);
+
+ bo_mock = kunit_kzalloc(test, sizeof(*bo_mock), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo_mock);
+
+ drm_gem_private_object_init(priv->drm, &bo_tt->base, MANAGER_SIZE);
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo_tt, bo_type, placement_tt,
+ PAGE_SIZE, &ctx_init, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ dma_resv_unlock(bo_tt->base.resv);
+
+ drm_gem_private_object_init(priv->drm, &bo_mock->base, MANAGER_SIZE);
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo_mock, bo_type, placement_mock,
+ PAGE_SIZE, &ctx_init, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ dma_resv_unlock(bo_mock->base.resv);
+
+ bo_val = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+ bo_val->type = bo_type;
+
+ ttm_bo_reserve(bo_val, false, false, NULL);
+ err = ttm_bo_validate(bo_val, placement_tt, &ctx_val);
+ ttm_bo_unreserve(bo_val);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ ttm_mock_manager_fini(priv->ttm_dev, mem_type);
+ ttm_mock_manager_fini(priv->ttm_dev, mem_type_evict);
+
+ ttm_bo_fini(bo_val);
+ ttm_bo_fini(bo_tt);
+ ttm_bo_fini(bo_mock);
+}
+
+static struct kunit_case ttm_bo_validate_test_cases[] = {
+ KUNIT_CASE_PARAM(ttm_bo_init_reserved_sys_man, ttm_bo_types_gen_params),
+ KUNIT_CASE_PARAM(ttm_bo_init_reserved_mock_man, ttm_bo_types_gen_params),
+ KUNIT_CASE(ttm_bo_init_reserved_resv),
+ KUNIT_CASE_PARAM(ttm_bo_validate_basic, ttm_bo_types_gen_params),
+ KUNIT_CASE(ttm_bo_validate_invalid_placement),
+ KUNIT_CASE_PARAM(ttm_bo_validate_same_placement,
+ ttm_bo_validate_mem_gen_params),
+ KUNIT_CASE(ttm_bo_validate_failed_alloc),
+ KUNIT_CASE(ttm_bo_validate_pinned),
+ KUNIT_CASE(ttm_bo_validate_busy_placement),
+ KUNIT_CASE_PARAM(ttm_bo_validate_multihop, ttm_bo_types_gen_params),
+ KUNIT_CASE_PARAM(ttm_bo_validate_no_placement_signaled,
+ ttm_bo_no_placement_gen_params),
+ KUNIT_CASE_PARAM(ttm_bo_validate_no_placement_not_signaled,
+ ttm_bo_types_gen_params),
+ KUNIT_CASE(ttm_bo_validate_move_fence_signaled),
+ KUNIT_CASE_PARAM(ttm_bo_validate_move_fence_not_signaled,
+ ttm_bo_validate_wait_gen_params),
+ KUNIT_CASE(ttm_bo_validate_happy_evict),
+ KUNIT_CASE(ttm_bo_validate_all_pinned_evict),
+ KUNIT_CASE(ttm_bo_validate_allowed_only_evict),
+ KUNIT_CASE(ttm_bo_validate_deleted_evict),
+ KUNIT_CASE(ttm_bo_validate_busy_domain_evict),
+ KUNIT_CASE(ttm_bo_validate_evict_gutting),
+ KUNIT_CASE(ttm_bo_validate_recrusive_evict),
+ {}
+};
+
+static struct kunit_suite ttm_bo_validate_test_suite = {
+ .name = "ttm_bo_validate",
+ .init = ttm_test_devices_all_init,
+ .exit = ttm_test_devices_fini,
+ .test_cases = ttm_bo_validate_test_cases,
+};
+
+kunit_test_suites(&ttm_bo_validate_test_suite);
+
+MODULE_DESCRIPTION("KUnit tests for ttm_bo APIs");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/ttm/tests/ttm_device_test.c b/drivers/gpu/drm/ttm/tests/ttm_device_test.c
new file mode 100644
index 000000000000..2d55ad34fe48
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_device_test.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_device.h>
+#include <drm/ttm/ttm_placement.h>
+
+#include "ttm_kunit_helpers.h"
+#include "../ttm_pool_internal.h"
+
+struct ttm_device_test_case {
+ const char *description;
+ unsigned int alloc_flags;
+ bool pools_init_expected;
+};
+
+static void ttm_device_init_basic(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_device *ttm_dev;
+ struct ttm_resource_manager *ttm_sys_man;
+ int err;
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ KUNIT_EXPECT_PTR_EQ(test, ttm_dev->funcs, &ttm_dev_funcs);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev->wq);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev->man_drv[TTM_PL_SYSTEM]);
+
+ ttm_sys_man = &ttm_dev->sysman;
+ KUNIT_ASSERT_NOT_NULL(test, ttm_sys_man);
+ KUNIT_EXPECT_TRUE(test, ttm_sys_man->use_tt);
+ KUNIT_EXPECT_TRUE(test, ttm_sys_man->use_type);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_sys_man->func);
+
+ KUNIT_EXPECT_PTR_EQ(test, ttm_dev->dev_mapping,
+ priv->drm->anon_inode->i_mapping);
+
+ ttm_device_fini(ttm_dev);
+}
+
+static void ttm_device_init_multiple(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_device *ttm_devs;
+ unsigned int i, num_dev = 3;
+ int err;
+
+ ttm_devs = kunit_kcalloc(test, num_dev, sizeof(*ttm_devs), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_devs);
+
+ for (i = 0; i < num_dev; i++) {
+ err = ttm_device_kunit_init(priv, &ttm_devs[i], 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ KUNIT_EXPECT_PTR_EQ(test, ttm_devs[i].dev_mapping,
+ priv->drm->anon_inode->i_mapping);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_devs[i].wq);
+ KUNIT_EXPECT_PTR_EQ(test, ttm_devs[i].funcs, &ttm_dev_funcs);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_devs[i].man_drv[TTM_PL_SYSTEM]);
+ }
+
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&ttm_devs[0].device_list), num_dev);
+
+ for (i = 0; i < num_dev; i++)
+ ttm_device_fini(&ttm_devs[i]);
+}
+
+static void ttm_device_fini_basic(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_device *ttm_dev;
+ struct ttm_resource_manager *man;
+ int err;
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ man = ttm_manager_type(ttm_dev, TTM_PL_SYSTEM);
+ KUNIT_ASSERT_NOT_NULL(test, man);
+
+ ttm_device_fini(ttm_dev);
+
+ KUNIT_ASSERT_FALSE(test, man->use_type);
+ KUNIT_ASSERT_TRUE(test, list_empty(&man->lru[0]));
+ KUNIT_ASSERT_NULL(test, ttm_dev->man_drv[TTM_PL_SYSTEM]);
+}
+
+static void ttm_device_init_no_vma_man(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct drm_device *drm = priv->drm;
+ struct ttm_device *ttm_dev;
+ struct drm_vma_offset_manager *vma_man;
+ int err;
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ /* Let's pretend there's no VMA manager allocated */
+ vma_man = drm->vma_offset_manager;
+ drm->vma_offset_manager = NULL;
+
+ err = ttm_device_kunit_init(priv, ttm_dev, 0);
+ KUNIT_EXPECT_EQ(test, err, -EINVAL);
+
+ /* Bring the manager back for a graceful cleanup */
+ drm->vma_offset_manager = vma_man;
+}
+
+static const struct ttm_device_test_case ttm_device_cases[] = {
+ {
+ .description = "No DMA allocations, no DMA32 required",
+ .pools_init_expected = false,
+ },
+ {
+ .description = "DMA allocations, DMA32 required",
+ .alloc_flags = TTM_ALLOCATION_POOL_USE_DMA_ALLOC |
+ TTM_ALLOCATION_POOL_USE_DMA32,
+ .pools_init_expected = true,
+ },
+ {
+ .description = "No DMA allocations, DMA32 required",
+ .alloc_flags = TTM_ALLOCATION_POOL_USE_DMA32,
+ .pools_init_expected = false,
+ },
+ {
+ .description = "DMA allocations, no DMA32 required",
+ .alloc_flags = TTM_ALLOCATION_POOL_USE_DMA_ALLOC,
+ .pools_init_expected = true,
+ },
+};
+
+static void ttm_device_case_desc(const struct ttm_device_test_case *t, char *desc)
+{
+ strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(ttm_device, ttm_device_cases, ttm_device_case_desc);
+
+static void ttm_device_init_pools(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ const struct ttm_device_test_case *params = test->param_value;
+ struct ttm_device *ttm_dev;
+ struct ttm_pool *pool;
+ struct ttm_pool_type pt;
+ int err;
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, params->alloc_flags);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ pool = &ttm_dev->pool;
+ KUNIT_ASSERT_NOT_NULL(test, pool);
+ KUNIT_EXPECT_PTR_EQ(test, pool->dev, priv->dev);
+ KUNIT_EXPECT_EQ(test, pool->alloc_flags, params->alloc_flags);
+
+ if (params->pools_init_expected) {
+ for (int i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
+ for (int j = 0; j < NR_PAGE_ORDERS; ++j) {
+ pt = pool->caching[i].orders[j];
+ KUNIT_EXPECT_PTR_EQ(test, pt.pool, pool);
+ KUNIT_EXPECT_EQ(test, pt.caching, i);
+ KUNIT_EXPECT_EQ(test, pt.order, j);
+
+ if (ttm_pool_uses_dma_alloc(pool))
+ KUNIT_ASSERT_FALSE(test,
+ list_empty(&pt.pages));
+ }
+ }
+ }
+
+ ttm_device_fini(ttm_dev);
+}
+
+static struct kunit_case ttm_device_test_cases[] = {
+ KUNIT_CASE(ttm_device_init_basic),
+ KUNIT_CASE(ttm_device_init_multiple),
+ KUNIT_CASE(ttm_device_fini_basic),
+ KUNIT_CASE(ttm_device_init_no_vma_man),
+ KUNIT_CASE_PARAM(ttm_device_init_pools, ttm_device_gen_params),
+ {}
+};
+
+static struct kunit_suite ttm_device_test_suite = {
+ .name = "ttm_device",
+ .init = ttm_test_devices_init,
+ .exit = ttm_test_devices_fini,
+ .test_cases = ttm_device_test_cases,
+};
+
+kunit_test_suites(&ttm_device_test_suite);
+
+MODULE_DESCRIPTION("KUnit tests for ttm_device APIs");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
new file mode 100644
index 000000000000..7b533e4e1e04
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <linux/export.h>
+
+#include <drm/ttm/ttm_tt.h>
+
+#include "ttm_kunit_helpers.h"
+
+static const struct ttm_place sys_place = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .mem_type = TTM_PL_SYSTEM,
+ .flags = TTM_PL_FLAG_FALLBACK,
+};
+
+static const struct ttm_place mock1_place = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .mem_type = TTM_PL_MOCK1,
+ .flags = TTM_PL_FLAG_FALLBACK,
+};
+
+static const struct ttm_place mock2_place = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .mem_type = TTM_PL_MOCK2,
+ .flags = TTM_PL_FLAG_FALLBACK,
+};
+
+static struct ttm_placement sys_placement = {
+ .num_placement = 1,
+ .placement = &sys_place,
+};
+
+static struct ttm_placement bad_placement = {
+ .num_placement = 1,
+ .placement = &mock1_place,
+};
+
+static struct ttm_placement mock_placement = {
+ .num_placement = 1,
+ .placement = &mock2_place,
+};
+
+static struct ttm_tt *ttm_tt_simple_create(struct ttm_buffer_object *bo, u32 page_flags)
+{
+ struct ttm_tt *tt;
+
+ tt = kzalloc(sizeof(*tt), GFP_KERNEL);
+ ttm_tt_init(tt, bo, page_flags, ttm_cached, 0);
+
+ return tt;
+}
+
+static void ttm_tt_simple_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
+{
+ kfree(ttm);
+}
+
+static int mock_move(struct ttm_buffer_object *bo, bool evict,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_resource *new_mem,
+ struct ttm_place *hop)
+{
+ struct ttm_resource *old_mem = bo->resource;
+
+ if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm)) {
+ ttm_bo_move_null(bo, new_mem);
+ return 0;
+ }
+
+ if (bo->resource->mem_type == TTM_PL_VRAM &&
+ new_mem->mem_type == TTM_PL_SYSTEM) {
+ hop->mem_type = TTM_PL_TT;
+ hop->flags = TTM_PL_FLAG_TEMPORARY;
+ hop->fpfn = 0;
+ hop->lpfn = 0;
+ return -EMULTIHOP;
+ }
+
+ if ((old_mem->mem_type == TTM_PL_SYSTEM &&
+ new_mem->mem_type == TTM_PL_TT) ||
+ (old_mem->mem_type == TTM_PL_TT &&
+ new_mem->mem_type == TTM_PL_SYSTEM)) {
+ ttm_bo_move_null(bo, new_mem);
+ return 0;
+ }
+
+ return ttm_bo_move_memcpy(bo, ctx, new_mem);
+}
+
+static void mock_evict_flags(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement)
+{
+ switch (bo->resource->mem_type) {
+ case TTM_PL_VRAM:
+ case TTM_PL_SYSTEM:
+ *placement = sys_placement;
+ break;
+ case TTM_PL_TT:
+ *placement = mock_placement;
+ break;
+ case TTM_PL_MOCK1:
+ /* Purge objects coming from this domain */
+ break;
+ }
+}
+
+static void bad_evict_flags(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement)
+{
+ *placement = bad_placement;
+}
+
+static int ttm_device_kunit_init_with_funcs(struct ttm_test_devices *priv,
+ struct ttm_device *ttm,
+ unsigned int alloc_flags,
+ struct ttm_device_funcs *funcs)
+{
+ struct drm_device *drm = priv->drm;
+ int err;
+
+ err = ttm_device_init(ttm, funcs, drm->dev,
+ drm->anon_inode->i_mapping,
+ drm->vma_offset_manager,
+ alloc_flags);
+
+ return err;
+}
+
+struct ttm_device_funcs ttm_dev_funcs = {
+ .ttm_tt_create = ttm_tt_simple_create,
+ .ttm_tt_destroy = ttm_tt_simple_destroy,
+ .move = mock_move,
+ .eviction_valuable = ttm_bo_eviction_valuable,
+ .evict_flags = mock_evict_flags,
+};
+EXPORT_SYMBOL_GPL(ttm_dev_funcs);
+
+int ttm_device_kunit_init(struct ttm_test_devices *priv,
+ struct ttm_device *ttm,
+ unsigned int alloc_flags)
+{
+ return ttm_device_kunit_init_with_funcs(priv, ttm, alloc_flags,
+ &ttm_dev_funcs);
+}
+EXPORT_SYMBOL_GPL(ttm_device_kunit_init);
+
+struct ttm_device_funcs ttm_dev_funcs_bad_evict = {
+ .ttm_tt_create = ttm_tt_simple_create,
+ .ttm_tt_destroy = ttm_tt_simple_destroy,
+ .move = mock_move,
+ .eviction_valuable = ttm_bo_eviction_valuable,
+ .evict_flags = bad_evict_flags,
+};
+EXPORT_SYMBOL_GPL(ttm_dev_funcs_bad_evict);
+
+int ttm_device_kunit_init_bad_evict(struct ttm_test_devices *priv,
+ struct ttm_device *ttm)
+{
+ return ttm_device_kunit_init_with_funcs(priv, ttm, 0,
+ &ttm_dev_funcs_bad_evict);
+}
+EXPORT_SYMBOL_GPL(ttm_device_kunit_init_bad_evict);
+
+struct ttm_buffer_object *ttm_bo_kunit_init(struct kunit *test,
+ struct ttm_test_devices *devs,
+ size_t size,
+ struct dma_resv *obj)
+{
+ struct drm_gem_object gem_obj = { };
+ struct ttm_buffer_object *bo;
+ int err;
+
+ bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+
+ bo->base = gem_obj;
+
+ if (obj)
+ bo->base.resv = obj;
+
+ err = drm_gem_object_init(devs->drm, &bo->base, size);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ bo->bdev = devs->ttm_dev;
+ bo->destroy = dummy_ttm_bo_destroy;
+
+ kref_init(&bo->kref);
+
+ return bo;
+}
+EXPORT_SYMBOL_GPL(ttm_bo_kunit_init);
+
+struct ttm_place *ttm_place_kunit_init(struct kunit *test, u32 mem_type, u32 flags)
+{
+ struct ttm_place *place;
+
+ place = kunit_kzalloc(test, sizeof(*place), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, place);
+
+ place->mem_type = mem_type;
+ place->flags = flags;
+
+ return place;
+}
+EXPORT_SYMBOL_GPL(ttm_place_kunit_init);
+
+void dummy_ttm_bo_destroy(struct ttm_buffer_object *bo)
+{
+ drm_gem_object_release(&bo->base);
+}
+EXPORT_SYMBOL_GPL(dummy_ttm_bo_destroy);
+
+struct ttm_test_devices *ttm_test_devices_basic(struct kunit *test)
+{
+ struct ttm_test_devices *devs;
+
+ devs = kunit_kzalloc(test, sizeof(*devs), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, devs);
+
+ devs->dev = drm_kunit_helper_alloc_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, devs->dev);
+
+ /* Set mask for alloc_coherent mappings to enable ttm_pool_alloc testing */
+ devs->dev->coherent_dma_mask = -1;
+
+ devs->drm = __drm_kunit_helper_alloc_drm_device(test, devs->dev,
+ sizeof(*devs->drm), 0,
+ DRIVER_GEM);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, devs->drm);
+
+ return devs;
+}
+EXPORT_SYMBOL_GPL(ttm_test_devices_basic);
+
+struct ttm_test_devices *ttm_test_devices_all(struct kunit *test)
+{
+ struct ttm_test_devices *devs;
+ struct ttm_device *ttm_dev;
+ int err;
+
+ devs = ttm_test_devices_basic(test);
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(devs, ttm_dev, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ devs->ttm_dev = ttm_dev;
+
+ return devs;
+}
+EXPORT_SYMBOL_GPL(ttm_test_devices_all);
+
+void ttm_test_devices_put(struct kunit *test, struct ttm_test_devices *devs)
+{
+ if (devs->ttm_dev)
+ ttm_device_fini(devs->ttm_dev);
+
+ drm_kunit_helper_free_device(test, devs->dev);
+}
+EXPORT_SYMBOL_GPL(ttm_test_devices_put);
+
+int ttm_test_devices_init(struct kunit *test)
+{
+ struct ttm_test_devices *priv;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ priv = ttm_test_devices_basic(test);
+ test->priv = priv;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ttm_test_devices_init);
+
+int ttm_test_devices_all_init(struct kunit *test)
+{
+ struct ttm_test_devices *priv;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ priv = ttm_test_devices_all(test);
+ test->priv = priv;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ttm_test_devices_all_init);
+
+void ttm_test_devices_fini(struct kunit *test)
+{
+ ttm_test_devices_put(test, test->priv);
+}
+EXPORT_SYMBOL_GPL(ttm_test_devices_fini);
+
+MODULE_DESCRIPTION("TTM KUnit test helper functions");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h
new file mode 100644
index 000000000000..f8402b979d05
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 AND MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#ifndef TTM_KUNIT_HELPERS_H
+#define TTM_KUNIT_HELPERS_H
+
+#include <drm/drm_drv.h>
+#include <drm/ttm/ttm_device.h>
+#include <drm/ttm/ttm_bo.h>
+#include <drm/ttm/ttm_placement.h>
+
+#include <drm/drm_kunit_helpers.h>
+#include <kunit/test.h>
+
+#define TTM_PL_MOCK1 (TTM_PL_PRIV + 1)
+#define TTM_PL_MOCK2 (TTM_PL_PRIV + 2)
+
+extern struct ttm_device_funcs ttm_dev_funcs;
+extern struct ttm_device_funcs ttm_dev_funcs_bad_evict;
+
+struct ttm_test_devices {
+ struct drm_device *drm;
+ struct device *dev;
+ struct ttm_device *ttm_dev;
+};
+
+/* Building blocks for test-specific init functions */
+int ttm_device_kunit_init(struct ttm_test_devices *priv,
+ struct ttm_device *ttm,
+ unsigned int alloc_flags);
+int ttm_device_kunit_init_bad_evict(struct ttm_test_devices *priv,
+ struct ttm_device *ttm);
+struct ttm_buffer_object *ttm_bo_kunit_init(struct kunit *test,
+ struct ttm_test_devices *devs,
+ size_t size,
+ struct dma_resv *obj);
+struct ttm_place *ttm_place_kunit_init(struct kunit *test, u32 mem_type,
+ u32 flags);
+void dummy_ttm_bo_destroy(struct ttm_buffer_object *bo);
+
+struct ttm_test_devices *ttm_test_devices_basic(struct kunit *test);
+struct ttm_test_devices *ttm_test_devices_all(struct kunit *test);
+
+void ttm_test_devices_put(struct kunit *test, struct ttm_test_devices *devs);
+
+/* Generic init/fini for tests that only need DRM/TTM devices */
+int ttm_test_devices_init(struct kunit *test);
+int ttm_test_devices_all_init(struct kunit *test);
+void ttm_test_devices_fini(struct kunit *test);
+
+#endif // TTM_KUNIT_HELPERS_H
diff --git a/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c b/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c
new file mode 100644
index 000000000000..dd395229e388
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <linux/export.h>
+#include <linux/module.h>
+
+#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_device.h>
+#include <drm/ttm/ttm_placement.h>
+
+#include "ttm_mock_manager.h"
+
+static inline struct ttm_mock_manager *
+to_mock_mgr(struct ttm_resource_manager *man)
+{
+ return container_of(man, struct ttm_mock_manager, man);
+}
+
+static inline struct ttm_mock_resource *
+to_mock_mgr_resource(struct ttm_resource *res)
+{
+ return container_of(res, struct ttm_mock_resource, base);
+}
+
+static int ttm_mock_manager_alloc(struct ttm_resource_manager *man,
+ struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_resource **res)
+{
+ struct ttm_mock_manager *manager = to_mock_mgr(man);
+ struct ttm_mock_resource *mock_res;
+ struct drm_buddy *mm = &manager->mm;
+ u64 lpfn, fpfn, alloc_size;
+ int err;
+
+ mock_res = kzalloc(sizeof(*mock_res), GFP_KERNEL);
+
+ if (!mock_res)
+ return -ENOMEM;
+
+ fpfn = 0;
+ lpfn = man->size;
+
+ ttm_resource_init(bo, place, &mock_res->base);
+ INIT_LIST_HEAD(&mock_res->blocks);
+
+ if (place->flags & TTM_PL_FLAG_TOPDOWN)
+ mock_res->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
+
+ if (place->flags & TTM_PL_FLAG_CONTIGUOUS)
+ mock_res->flags |= DRM_BUDDY_CONTIGUOUS_ALLOCATION;
+
+ alloc_size = (uint64_t)mock_res->base.size;
+ mutex_lock(&manager->lock);
+ err = drm_buddy_alloc_blocks(mm, fpfn, lpfn, alloc_size,
+ manager->default_page_size,
+ &mock_res->blocks,
+ mock_res->flags);
+
+ if (err)
+ goto error_free_blocks;
+ mutex_unlock(&manager->lock);
+
+ *res = &mock_res->base;
+ return 0;
+
+error_free_blocks:
+ drm_buddy_free_list(mm, &mock_res->blocks, 0);
+ ttm_resource_fini(man, &mock_res->base);
+ mutex_unlock(&manager->lock);
+
+ return err;
+}
+
+static void ttm_mock_manager_free(struct ttm_resource_manager *man,
+ struct ttm_resource *res)
+{
+ struct ttm_mock_manager *manager = to_mock_mgr(man);
+ struct ttm_mock_resource *mock_res = to_mock_mgr_resource(res);
+ struct drm_buddy *mm = &manager->mm;
+
+ mutex_lock(&manager->lock);
+ drm_buddy_free_list(mm, &mock_res->blocks, 0);
+ mutex_unlock(&manager->lock);
+
+ ttm_resource_fini(man, res);
+ kfree(mock_res);
+}
+
+static const struct ttm_resource_manager_func ttm_mock_manager_funcs = {
+ .alloc = ttm_mock_manager_alloc,
+ .free = ttm_mock_manager_free,
+};
+
+int ttm_mock_manager_init(struct ttm_device *bdev, u32 mem_type, u32 size)
+{
+ struct ttm_mock_manager *manager;
+ struct ttm_resource_manager *base;
+ int err;
+
+ manager = kzalloc(sizeof(*manager), GFP_KERNEL);
+ if (!manager)
+ return -ENOMEM;
+
+ mutex_init(&manager->lock);
+
+ err = drm_buddy_init(&manager->mm, size, PAGE_SIZE);
+
+ if (err) {
+ kfree(manager);
+ return err;
+ }
+
+ manager->default_page_size = PAGE_SIZE;
+ base = &manager->man;
+ base->func = &ttm_mock_manager_funcs;
+ base->use_tt = true;
+
+ ttm_resource_manager_init(base, bdev, size);
+ ttm_set_driver_manager(bdev, mem_type, base);
+ ttm_resource_manager_set_used(base, true);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ttm_mock_manager_init);
+
+void ttm_mock_manager_fini(struct ttm_device *bdev, u32 mem_type)
+{
+ struct ttm_resource_manager *man;
+ struct ttm_mock_manager *mock_man;
+ int err;
+
+ man = ttm_manager_type(bdev, mem_type);
+ mock_man = to_mock_mgr(man);
+
+ err = ttm_resource_manager_evict_all(bdev, man);
+ if (err)
+ return;
+
+ ttm_resource_manager_set_used(man, false);
+
+ mutex_lock(&mock_man->lock);
+ drm_buddy_fini(&mock_man->mm);
+ mutex_unlock(&mock_man->lock);
+
+ ttm_set_driver_manager(bdev, mem_type, NULL);
+}
+EXPORT_SYMBOL_GPL(ttm_mock_manager_fini);
+
+static int ttm_bad_manager_alloc(struct ttm_resource_manager *man,
+ struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_resource **res)
+{
+ return -ENOSPC;
+}
+
+static int ttm_busy_manager_alloc(struct ttm_resource_manager *man,
+ struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_resource **res)
+{
+ return -EBUSY;
+}
+
+static void ttm_bad_manager_free(struct ttm_resource_manager *man,
+ struct ttm_resource *res)
+{
+}
+
+static bool ttm_bad_manager_compatible(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ return true;
+}
+
+static const struct ttm_resource_manager_func ttm_bad_manager_funcs = {
+ .alloc = ttm_bad_manager_alloc,
+ .free = ttm_bad_manager_free,
+ .compatible = ttm_bad_manager_compatible
+};
+
+static const struct ttm_resource_manager_func ttm_bad_busy_manager_funcs = {
+ .alloc = ttm_busy_manager_alloc,
+ .free = ttm_bad_manager_free,
+ .compatible = ttm_bad_manager_compatible
+};
+
+int ttm_bad_manager_init(struct ttm_device *bdev, u32 mem_type, u32 size)
+{
+ struct ttm_resource_manager *man;
+
+ man = kzalloc(sizeof(*man), GFP_KERNEL);
+ if (!man)
+ return -ENOMEM;
+
+ man->func = &ttm_bad_manager_funcs;
+
+ ttm_resource_manager_init(man, bdev, size);
+ ttm_set_driver_manager(bdev, mem_type, man);
+ ttm_resource_manager_set_used(man, true);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ttm_bad_manager_init);
+
+int ttm_busy_manager_init(struct ttm_device *bdev, u32 mem_type, u32 size)
+{
+ struct ttm_resource_manager *man;
+
+ ttm_bad_manager_init(bdev, mem_type, size);
+ man = ttm_manager_type(bdev, mem_type);
+
+ man->func = &ttm_bad_busy_manager_funcs;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ttm_busy_manager_init);
+
+void ttm_bad_manager_fini(struct ttm_device *bdev, uint32_t mem_type)
+{
+ struct ttm_resource_manager *man;
+
+ man = ttm_manager_type(bdev, mem_type);
+
+ ttm_resource_manager_set_used(man, false);
+ ttm_set_driver_manager(bdev, mem_type, NULL);
+
+ kfree(man);
+}
+EXPORT_SYMBOL_GPL(ttm_bad_manager_fini);
+
+MODULE_DESCRIPTION("KUnit tests for ttm with mock resource managers");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/ttm/tests/ttm_mock_manager.h b/drivers/gpu/drm/ttm/tests/ttm_mock_manager.h
new file mode 100644
index 000000000000..e4c95f86a467
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_mock_manager.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 AND MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#ifndef TTM_MOCK_MANAGER_H
+#define TTM_MOCK_MANAGER_H
+
+#include <drm/drm_buddy.h>
+
+struct ttm_mock_manager {
+ struct ttm_resource_manager man;
+ struct drm_buddy mm;
+ u64 default_page_size;
+ /* protects allocations of mock buffer objects */
+ struct mutex lock;
+};
+
+struct ttm_mock_resource {
+ struct ttm_resource base;
+ struct list_head blocks;
+ unsigned long flags;
+};
+
+int ttm_mock_manager_init(struct ttm_device *bdev, u32 mem_type, u32 size);
+int ttm_bad_manager_init(struct ttm_device *bdev, u32 mem_type, u32 size);
+int ttm_busy_manager_init(struct ttm_device *bdev, u32 mem_type, u32 size);
+void ttm_mock_manager_fini(struct ttm_device *bdev, u32 mem_type);
+void ttm_bad_manager_fini(struct ttm_device *bdev, u32 mem_type);
+
+#endif // TTM_MOCK_MANAGER_H
diff --git a/drivers/gpu/drm/ttm/tests/ttm_pool_test.c b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
new file mode 100644
index 000000000000..11c92bd75779
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#include <linux/mm.h>
+
+#include <drm/ttm/ttm_tt.h>
+#include <drm/ttm/ttm_pool.h>
+
+#include "ttm_kunit_helpers.h"
+#include "../ttm_pool_internal.h"
+
+struct ttm_pool_test_case {
+ const char *description;
+ unsigned int order;
+ unsigned int alloc_flags;
+};
+
+struct ttm_pool_test_priv {
+ struct ttm_test_devices *devs;
+
+ /* Used to create mock ttm_tts */
+ struct ttm_buffer_object *mock_bo;
+};
+
+static struct ttm_operation_ctx simple_ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false,
+};
+
+static int ttm_pool_test_init(struct kunit *test)
+{
+ struct ttm_pool_test_priv *priv;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ priv->devs = ttm_test_devices_basic(test);
+ test->priv = priv;
+
+ return 0;
+}
+
+static void ttm_pool_test_fini(struct kunit *test)
+{
+ struct ttm_pool_test_priv *priv = test->priv;
+
+ ttm_test_devices_put(test, priv->devs);
+}
+
+static struct ttm_tt *ttm_tt_kunit_init(struct kunit *test,
+ u32 page_flags,
+ enum ttm_caching caching,
+ size_t size)
+{
+ struct ttm_pool_test_priv *priv = test->priv;
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, priv->devs, size, NULL);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+ priv->mock_bo = bo;
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ err = ttm_tt_init(tt, priv->mock_bo, page_flags, caching, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ return tt;
+}
+
+static struct ttm_pool *ttm_pool_pre_populated(struct kunit *test,
+ size_t size,
+ enum ttm_caching caching)
+{
+ struct ttm_pool_test_priv *priv = test->priv;
+ struct ttm_test_devices *devs = priv->devs;
+ struct ttm_pool *pool;
+ struct ttm_tt *tt;
+ int err;
+
+ tt = ttm_tt_kunit_init(test, 0, caching, size);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, pool);
+
+ ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, TTM_ALLOCATION_POOL_USE_DMA_ALLOC);
+
+ err = ttm_pool_alloc(pool, tt, &simple_ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+
+ return pool;
+}
+
+static const struct ttm_pool_test_case ttm_pool_basic_cases[] = {
+ {
+ .description = "One page",
+ .order = 0,
+ },
+ {
+ .description = "More than one page",
+ .order = 2,
+ },
+ {
+ .description = "Above the allocation limit",
+ .order = MAX_PAGE_ORDER + 1,
+ },
+ {
+ .description = "One page, with coherent DMA mappings enabled",
+ .order = 0,
+ .alloc_flags = TTM_ALLOCATION_POOL_USE_DMA_ALLOC,
+ },
+ {
+ .description = "Above the allocation limit, with coherent DMA mappings enabled",
+ .order = MAX_PAGE_ORDER + 1,
+ .alloc_flags = TTM_ALLOCATION_POOL_USE_DMA_ALLOC,
+ },
+};
+
+static void ttm_pool_alloc_case_desc(const struct ttm_pool_test_case *t,
+ char *desc)
+{
+ strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(ttm_pool_alloc_basic, ttm_pool_basic_cases,
+ ttm_pool_alloc_case_desc);
+
+static void ttm_pool_alloc_basic(struct kunit *test)
+{
+ struct ttm_pool_test_priv *priv = test->priv;
+ struct ttm_test_devices *devs = priv->devs;
+ const struct ttm_pool_test_case *params = test->param_value;
+ struct ttm_tt *tt;
+ struct ttm_pool *pool;
+ struct page *fst_page, *last_page;
+ enum ttm_caching caching = ttm_uncached;
+ unsigned int expected_num_pages = 1 << params->order;
+ size_t size = expected_num_pages * PAGE_SIZE;
+ int err;
+
+ tt = ttm_tt_kunit_init(test, 0, caching, size);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, pool);
+
+ ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, params->alloc_flags);
+
+ KUNIT_ASSERT_PTR_EQ(test, pool->dev, devs->dev);
+ KUNIT_ASSERT_EQ(test, pool->nid, NUMA_NO_NODE);
+ KUNIT_ASSERT_EQ(test, pool->alloc_flags, params->alloc_flags);
+
+ err = ttm_pool_alloc(pool, tt, &simple_ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_EQ(test, tt->num_pages, expected_num_pages);
+
+ fst_page = tt->pages[0];
+ last_page = tt->pages[tt->num_pages - 1];
+
+ if (params->order <= MAX_PAGE_ORDER) {
+ if (ttm_pool_uses_dma_alloc(pool)) {
+ KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
+ KUNIT_ASSERT_NOT_NULL(test, (void *)last_page->private);
+ } else {
+ KUNIT_ASSERT_EQ(test, fst_page->private, params->order);
+ }
+ } else {
+ if (ttm_pool_uses_dma_alloc(pool)) {
+ KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
+ KUNIT_ASSERT_NULL(test, (void *)last_page->private);
+ } else {
+ /*
+ * We expect to alloc one big block, followed by
+ * order 0 blocks
+ */
+ KUNIT_ASSERT_EQ(test, fst_page->private,
+ min_t(unsigned int, MAX_PAGE_ORDER,
+ params->order));
+ KUNIT_ASSERT_EQ(test, last_page->private, 0);
+ }
+ }
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+ ttm_pool_fini(pool);
+}
+
+static void ttm_pool_alloc_basic_dma_addr(struct kunit *test)
+{
+ struct ttm_pool_test_priv *priv = test->priv;
+ struct ttm_test_devices *devs = priv->devs;
+ const struct ttm_pool_test_case *params = test->param_value;
+ struct ttm_tt *tt;
+ struct ttm_pool *pool;
+ struct ttm_buffer_object *bo;
+ dma_addr_t dma1, dma2;
+ enum ttm_caching caching = ttm_uncached;
+ unsigned int expected_num_pages = 1 << params->order;
+ size_t size = expected_num_pages * PAGE_SIZE;
+ int err;
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ bo = ttm_bo_kunit_init(test, devs, size, NULL);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+
+ err = ttm_sg_tt_init(tt, bo, 0, caching);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, pool);
+
+ ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, TTM_ALLOCATION_POOL_USE_DMA_ALLOC);
+
+ err = ttm_pool_alloc(pool, tt, &simple_ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_EQ(test, tt->num_pages, expected_num_pages);
+
+ dma1 = tt->dma_address[0];
+ dma2 = tt->dma_address[tt->num_pages - 1];
+
+ KUNIT_ASSERT_NOT_NULL(test, (void *)(uintptr_t)dma1);
+ KUNIT_ASSERT_NOT_NULL(test, (void *)(uintptr_t)dma2);
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+ ttm_pool_fini(pool);
+}
+
+static void ttm_pool_alloc_order_caching_match(struct kunit *test)
+{
+ struct ttm_tt *tt;
+ struct ttm_pool *pool;
+ struct ttm_pool_type *pt;
+ enum ttm_caching caching = ttm_uncached;
+ unsigned int order = 0;
+ size_t size = PAGE_SIZE;
+ int err;
+
+ pool = ttm_pool_pre_populated(test, size, caching);
+
+ pt = &pool->caching[caching].orders[order];
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
+
+ tt = ttm_tt_kunit_init(test, 0, caching, size);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ err = ttm_pool_alloc(pool, tt, &simple_ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+ ttm_pool_fini(pool);
+}
+
+static void ttm_pool_alloc_caching_mismatch(struct kunit *test)
+{
+ struct ttm_tt *tt;
+ struct ttm_pool *pool;
+ struct ttm_pool_type *pt_pool, *pt_tt;
+ enum ttm_caching tt_caching = ttm_uncached;
+ enum ttm_caching pool_caching = ttm_cached;
+ size_t size = PAGE_SIZE;
+ unsigned int order = 0;
+ int err;
+
+ pool = ttm_pool_pre_populated(test, size, pool_caching);
+
+ pt_pool = &pool->caching[pool_caching].orders[order];
+ pt_tt = &pool->caching[tt_caching].orders[order];
+
+ tt = ttm_tt_kunit_init(test, 0, tt_caching, size);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
+ KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
+
+ err = ttm_pool_alloc(pool, tt, &simple_ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
+
+ ttm_pool_fini(pool);
+}
+
+static void ttm_pool_alloc_order_mismatch(struct kunit *test)
+{
+ struct ttm_tt *tt;
+ struct ttm_pool *pool;
+ struct ttm_pool_type *pt_pool, *pt_tt;
+ enum ttm_caching caching = ttm_uncached;
+ unsigned int order = 2;
+ size_t fst_size = (1 << order) * PAGE_SIZE;
+ size_t snd_size = PAGE_SIZE;
+ int err;
+
+ pool = ttm_pool_pre_populated(test, fst_size, caching);
+
+ pt_pool = &pool->caching[caching].orders[order];
+ pt_tt = &pool->caching[caching].orders[0];
+
+ tt = ttm_tt_kunit_init(test, 0, caching, snd_size);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
+ KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
+
+ err = ttm_pool_alloc(pool, tt, &simple_ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
+
+ ttm_pool_fini(pool);
+}
+
+static void ttm_pool_free_dma_alloc(struct kunit *test)
+{
+ struct ttm_pool_test_priv *priv = test->priv;
+ struct ttm_test_devices *devs = priv->devs;
+ struct ttm_tt *tt;
+ struct ttm_pool *pool;
+ struct ttm_pool_type *pt;
+ enum ttm_caching caching = ttm_uncached;
+ unsigned int order = 2;
+ size_t size = (1 << order) * PAGE_SIZE;
+
+ tt = ttm_tt_kunit_init(test, 0, caching, size);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, pool);
+
+ ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, TTM_ALLOCATION_POOL_USE_DMA_ALLOC);
+ ttm_pool_alloc(pool, tt, &simple_ctx);
+
+ pt = &pool->caching[caching].orders[order];
+ KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
+
+ ttm_pool_fini(pool);
+}
+
+static void ttm_pool_free_no_dma_alloc(struct kunit *test)
+{
+ struct ttm_pool_test_priv *priv = test->priv;
+ struct ttm_test_devices *devs = priv->devs;
+ struct ttm_tt *tt;
+ struct ttm_pool *pool;
+ struct ttm_pool_type *pt;
+ enum ttm_caching caching = ttm_uncached;
+ unsigned int order = 2;
+ size_t size = (1 << order) * PAGE_SIZE;
+
+ tt = ttm_tt_kunit_init(test, 0, caching, size);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, pool);
+
+ ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, 0);
+ ttm_pool_alloc(pool, tt, &simple_ctx);
+
+ pt = &pool->caching[caching].orders[order];
+ KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+
+ KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
+
+ ttm_pool_fini(pool);
+}
+
+static void ttm_pool_fini_basic(struct kunit *test)
+{
+ struct ttm_pool *pool;
+ struct ttm_pool_type *pt;
+ enum ttm_caching caching = ttm_uncached;
+ unsigned int order = 0;
+ size_t size = PAGE_SIZE;
+
+ pool = ttm_pool_pre_populated(test, size, caching);
+ pt = &pool->caching[caching].orders[order];
+
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
+
+ ttm_pool_fini(pool);
+
+ KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
+}
+
+static struct kunit_case ttm_pool_test_cases[] = {
+ KUNIT_CASE_PARAM(ttm_pool_alloc_basic, ttm_pool_alloc_basic_gen_params),
+ KUNIT_CASE_PARAM(ttm_pool_alloc_basic_dma_addr,
+ ttm_pool_alloc_basic_gen_params),
+ KUNIT_CASE(ttm_pool_alloc_order_caching_match),
+ KUNIT_CASE(ttm_pool_alloc_caching_mismatch),
+ KUNIT_CASE(ttm_pool_alloc_order_mismatch),
+ KUNIT_CASE(ttm_pool_free_dma_alloc),
+ KUNIT_CASE(ttm_pool_free_no_dma_alloc),
+ KUNIT_CASE(ttm_pool_fini_basic),
+ {}
+};
+
+static struct kunit_suite ttm_pool_test_suite = {
+ .name = "ttm_pool",
+ .init = ttm_pool_test_init,
+ .exit = ttm_pool_test_fini,
+ .test_cases = ttm_pool_test_cases,
+};
+
+kunit_test_suites(&ttm_pool_test_suite);
+
+MODULE_DESCRIPTION("KUnit tests for ttm_pool APIs");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/ttm/tests/ttm_resource_test.c b/drivers/gpu/drm/ttm/tests/ttm_resource_test.c
new file mode 100644
index 000000000000..c0e4e35e0442
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_resource_test.c
@@ -0,0 +1,337 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#include <drm/ttm/ttm_resource.h>
+
+#include "ttm_kunit_helpers.h"
+
+#define RES_SIZE SZ_4K
+#define TTM_PRIV_DUMMY_REG (TTM_NUM_MEM_TYPES - 1)
+
+struct ttm_resource_test_case {
+ const char *description;
+ u32 mem_type;
+ u32 flags;
+};
+
+struct ttm_resource_test_priv {
+ struct ttm_test_devices *devs;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+};
+
+static const struct ttm_resource_manager_func ttm_resource_manager_mock_funcs = { };
+
+static int ttm_resource_test_init(struct kunit *test)
+{
+ struct ttm_resource_test_priv *priv;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ priv->devs = ttm_test_devices_all(test);
+ KUNIT_ASSERT_NOT_NULL(test, priv->devs);
+
+ test->priv = priv;
+
+ return 0;
+}
+
+static void ttm_resource_test_fini(struct kunit *test)
+{
+ struct ttm_resource_test_priv *priv = test->priv;
+
+ ttm_test_devices_put(test, priv->devs);
+}
+
+static void ttm_init_test_mocks(struct kunit *test,
+ struct ttm_resource_test_priv *priv,
+ u32 mem_type, u32 flags)
+{
+ size_t size = RES_SIZE;
+
+ /* Make sure we have what we need for a good BO mock */
+ KUNIT_ASSERT_NOT_NULL(test, priv->devs->ttm_dev);
+
+ priv->bo = ttm_bo_kunit_init(test, priv->devs, size, NULL);
+ priv->place = ttm_place_kunit_init(test, mem_type, flags);
+}
+
+static void ttm_init_test_manager(struct kunit *test,
+ struct ttm_resource_test_priv *priv,
+ u32 mem_type)
+{
+ struct ttm_device *ttm_dev = priv->devs->ttm_dev;
+ struct ttm_resource_manager *man;
+ size_t size = SZ_16K;
+
+ man = kunit_kzalloc(test, sizeof(*man), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, man);
+
+ man->use_tt = false;
+ man->func = &ttm_resource_manager_mock_funcs;
+
+ ttm_resource_manager_init(man, ttm_dev, size);
+ ttm_set_driver_manager(ttm_dev, mem_type, man);
+ ttm_resource_manager_set_used(man, true);
+}
+
+static const struct ttm_resource_test_case ttm_resource_cases[] = {
+ {
+ .description = "Init resource in TTM_PL_SYSTEM",
+ .mem_type = TTM_PL_SYSTEM,
+ },
+ {
+ .description = "Init resource in TTM_PL_VRAM",
+ .mem_type = TTM_PL_VRAM,
+ },
+ {
+ .description = "Init resource in a private placement",
+ .mem_type = TTM_PRIV_DUMMY_REG,
+ },
+ {
+ .description = "Init resource in TTM_PL_SYSTEM, set placement flags",
+ .mem_type = TTM_PL_SYSTEM,
+ .flags = TTM_PL_FLAG_TOPDOWN,
+ },
+};
+
+static void ttm_resource_case_desc(const struct ttm_resource_test_case *t, char *desc)
+{
+ strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(ttm_resource, ttm_resource_cases, ttm_resource_case_desc);
+
+static void ttm_resource_init_basic(struct kunit *test)
+{
+ const struct ttm_resource_test_case *params = test->param_value;
+ struct ttm_resource_test_priv *priv = test->priv;
+ struct ttm_resource *res;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ struct ttm_resource_manager *man;
+ u64 expected_usage;
+
+ ttm_init_test_mocks(test, priv, params->mem_type, params->flags);
+ bo = priv->bo;
+ place = priv->place;
+
+ if (params->mem_type > TTM_PL_SYSTEM)
+ ttm_init_test_manager(test, priv, params->mem_type);
+
+ res = kunit_kzalloc(test, sizeof(*res), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, res);
+
+ man = ttm_manager_type(priv->devs->ttm_dev, place->mem_type);
+ expected_usage = man->usage + RES_SIZE;
+
+ KUNIT_ASSERT_TRUE(test, list_empty(&man->lru[bo->priority]));
+
+ ttm_resource_init(bo, place, res);
+
+ KUNIT_ASSERT_EQ(test, res->start, 0);
+ KUNIT_ASSERT_EQ(test, res->size, RES_SIZE);
+ KUNIT_ASSERT_EQ(test, res->mem_type, place->mem_type);
+ KUNIT_ASSERT_EQ(test, res->placement, place->flags);
+ KUNIT_ASSERT_PTR_EQ(test, res->bo, bo);
+
+ KUNIT_ASSERT_NULL(test, res->bus.addr);
+ KUNIT_ASSERT_EQ(test, res->bus.offset, 0);
+ KUNIT_ASSERT_FALSE(test, res->bus.is_iomem);
+ KUNIT_ASSERT_EQ(test, res->bus.caching, ttm_cached);
+ KUNIT_ASSERT_EQ(test, man->usage, expected_usage);
+
+ KUNIT_ASSERT_TRUE(test, list_is_singular(&man->lru[bo->priority]));
+
+ ttm_resource_fini(man, res);
+}
+
+static void ttm_resource_init_pinned(struct kunit *test)
+{
+ struct ttm_resource_test_priv *priv = test->priv;
+ struct ttm_resource *res;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ struct ttm_resource_manager *man;
+
+ ttm_init_test_mocks(test, priv, TTM_PL_SYSTEM, 0);
+ bo = priv->bo;
+ place = priv->place;
+
+ man = ttm_manager_type(priv->devs->ttm_dev, place->mem_type);
+
+ res = kunit_kzalloc(test, sizeof(*res), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, res);
+ KUNIT_ASSERT_TRUE(test, list_empty(&bo->bdev->unevictable));
+
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_pin(bo);
+ ttm_resource_init(bo, place, res);
+ KUNIT_ASSERT_TRUE(test, list_is_singular(&bo->bdev->unevictable));
+
+ ttm_bo_unpin(bo);
+ ttm_resource_fini(man, res);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_ASSERT_TRUE(test, list_empty(&bo->bdev->unevictable));
+}
+
+static void ttm_resource_fini_basic(struct kunit *test)
+{
+ struct ttm_resource_test_priv *priv = test->priv;
+ struct ttm_resource *res;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ struct ttm_resource_manager *man;
+
+ ttm_init_test_mocks(test, priv, TTM_PL_SYSTEM, 0);
+ bo = priv->bo;
+ place = priv->place;
+
+ man = ttm_manager_type(priv->devs->ttm_dev, place->mem_type);
+
+ res = kunit_kzalloc(test, sizeof(*res), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, res);
+
+ ttm_resource_init(bo, place, res);
+ ttm_resource_fini(man, res);
+
+ KUNIT_ASSERT_TRUE(test, list_empty(&res->lru.link));
+ KUNIT_ASSERT_EQ(test, man->usage, 0);
+}
+
+static void ttm_resource_manager_init_basic(struct kunit *test)
+{
+ struct ttm_resource_test_priv *priv = test->priv;
+ struct ttm_resource_manager *man;
+ size_t size = SZ_16K;
+ int i;
+
+ man = kunit_kzalloc(test, sizeof(*man), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, man);
+
+ ttm_resource_manager_init(man, priv->devs->ttm_dev, size);
+
+ KUNIT_ASSERT_PTR_EQ(test, man->bdev, priv->devs->ttm_dev);
+ KUNIT_ASSERT_EQ(test, man->size, size);
+ KUNIT_ASSERT_EQ(test, man->usage, 0);
+ for (i = 0; i < TTM_NUM_MOVE_FENCES; i++)
+ KUNIT_ASSERT_NULL(test, man->eviction_fences[i]);
+
+ for (int i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
+ KUNIT_ASSERT_TRUE(test, list_empty(&man->lru[i]));
+}
+
+static void ttm_resource_manager_usage_basic(struct kunit *test)
+{
+ struct ttm_resource_test_priv *priv = test->priv;
+ struct ttm_resource *res;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ struct ttm_resource_manager *man;
+ u64 actual_usage;
+
+ ttm_init_test_mocks(test, priv, TTM_PL_SYSTEM, TTM_PL_FLAG_TOPDOWN);
+ bo = priv->bo;
+ place = priv->place;
+
+ res = kunit_kzalloc(test, sizeof(*res), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, res);
+
+ man = ttm_manager_type(priv->devs->ttm_dev, place->mem_type);
+
+ ttm_resource_init(bo, place, res);
+ actual_usage = ttm_resource_manager_usage(man);
+
+ KUNIT_ASSERT_EQ(test, actual_usage, RES_SIZE);
+
+ ttm_resource_fini(man, res);
+}
+
+static void ttm_resource_manager_set_used_basic(struct kunit *test)
+{
+ struct ttm_resource_test_priv *priv = test->priv;
+ struct ttm_resource_manager *man;
+
+ man = ttm_manager_type(priv->devs->ttm_dev, TTM_PL_SYSTEM);
+ KUNIT_ASSERT_TRUE(test, man->use_type);
+
+ ttm_resource_manager_set_used(man, false);
+ KUNIT_ASSERT_FALSE(test, man->use_type);
+}
+
+static void ttm_sys_man_alloc_basic(struct kunit *test)
+{
+ struct ttm_resource_test_priv *priv = test->priv;
+ struct ttm_resource_manager *man;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ struct ttm_resource *res;
+ u32 mem_type = TTM_PL_SYSTEM;
+ int ret;
+
+ ttm_init_test_mocks(test, priv, mem_type, 0);
+ bo = priv->bo;
+ place = priv->place;
+
+ man = ttm_manager_type(priv->devs->ttm_dev, mem_type);
+ ret = man->func->alloc(man, bo, place, &res);
+
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_EQ(test, res->size, RES_SIZE);
+ KUNIT_ASSERT_EQ(test, res->mem_type, mem_type);
+ KUNIT_ASSERT_PTR_EQ(test, res->bo, bo);
+
+ ttm_resource_fini(man, res);
+}
+
+static void ttm_sys_man_free_basic(struct kunit *test)
+{
+ struct ttm_resource_test_priv *priv = test->priv;
+ struct ttm_resource_manager *man;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ struct ttm_resource *res;
+ u32 mem_type = TTM_PL_SYSTEM;
+
+ ttm_init_test_mocks(test, priv, mem_type, 0);
+ bo = priv->bo;
+ place = priv->place;
+
+ res = kunit_kzalloc(test, sizeof(*res), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, res);
+
+ ttm_resource_alloc(bo, place, &res, NULL);
+
+ man = ttm_manager_type(priv->devs->ttm_dev, mem_type);
+ man->func->free(man, res);
+
+ KUNIT_ASSERT_TRUE(test, list_empty(&man->lru[bo->priority]));
+ KUNIT_ASSERT_EQ(test, man->usage, 0);
+}
+
+static struct kunit_case ttm_resource_test_cases[] = {
+ KUNIT_CASE_PARAM(ttm_resource_init_basic, ttm_resource_gen_params),
+ KUNIT_CASE(ttm_resource_init_pinned),
+ KUNIT_CASE(ttm_resource_fini_basic),
+ KUNIT_CASE(ttm_resource_manager_init_basic),
+ KUNIT_CASE(ttm_resource_manager_usage_basic),
+ KUNIT_CASE(ttm_resource_manager_set_used_basic),
+ KUNIT_CASE(ttm_sys_man_alloc_basic),
+ KUNIT_CASE(ttm_sys_man_free_basic),
+ {}
+};
+
+static struct kunit_suite ttm_resource_test_suite = {
+ .name = "ttm_resource",
+ .init = ttm_resource_test_init,
+ .exit = ttm_resource_test_fini,
+ .test_cases = ttm_resource_test_cases,
+};
+
+kunit_test_suites(&ttm_resource_test_suite);
+
+MODULE_DESCRIPTION("KUnit tests for ttm_resource and ttm_sys_man APIs");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/ttm/tests/ttm_tt_test.c b/drivers/gpu/drm/ttm/tests/ttm_tt_test.c
new file mode 100644
index 000000000000..61ec6f580b62
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_tt_test.c
@@ -0,0 +1,402 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#include <linux/shmem_fs.h>
+#include <drm/ttm/ttm_tt.h>
+
+#include "ttm_kunit_helpers.h"
+
+#define BO_SIZE SZ_4K
+
+struct ttm_tt_test_case {
+ const char *description;
+ u32 size;
+ u32 extra_pages_num;
+};
+
+static const struct ttm_tt_test_case ttm_tt_init_basic_cases[] = {
+ {
+ .description = "Page-aligned size",
+ .size = SZ_4K,
+ },
+ {
+ .description = "Extra pages requested",
+ .size = SZ_4K,
+ .extra_pages_num = 1,
+ },
+};
+
+static void ttm_tt_init_case_desc(const struct ttm_tt_test_case *t,
+ char *desc)
+{
+ strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(ttm_tt_init_basic, ttm_tt_init_basic_cases,
+ ttm_tt_init_case_desc);
+
+static void ttm_tt_init_basic(struct kunit *test)
+{
+ const struct ttm_tt_test_case *params = test->param_value;
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ u32 page_flags = TTM_TT_FLAG_ZERO_ALLOC;
+ enum ttm_caching caching = ttm_cached;
+ u32 extra_pages = params->extra_pages_num;
+ int num_pages = params->size >> PAGE_SHIFT;
+ int err;
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ bo = ttm_bo_kunit_init(test, test->priv, params->size, NULL);
+
+ err = ttm_tt_init(tt, bo, page_flags, caching, extra_pages);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ KUNIT_ASSERT_EQ(test, tt->num_pages, num_pages + extra_pages);
+
+ KUNIT_ASSERT_EQ(test, tt->page_flags, page_flags);
+ KUNIT_ASSERT_EQ(test, tt->caching, caching);
+
+ KUNIT_ASSERT_NULL(test, tt->dma_address);
+ KUNIT_ASSERT_NULL(test, tt->swap_storage);
+}
+
+static void ttm_tt_init_misaligned(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ enum ttm_caching caching = ttm_cached;
+ u32 size = SZ_8K;
+ int num_pages = (size + SZ_4K) >> PAGE_SHIFT;
+ int err;
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
+
+ /* Make the object size misaligned */
+ bo->base.size += 1;
+
+ err = ttm_tt_init(tt, bo, 0, caching, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ KUNIT_ASSERT_EQ(test, tt->num_pages, num_pages);
+}
+
+static void ttm_tt_fini_basic(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ enum ttm_caching caching = ttm_cached;
+ int err;
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ err = ttm_tt_init(tt, bo, 0, caching, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_NOT_NULL(test, tt->pages);
+
+ ttm_tt_fini(tt);
+ KUNIT_ASSERT_NULL(test, tt->pages);
+}
+
+static void ttm_tt_fini_sg(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ enum ttm_caching caching = ttm_cached;
+ int err;
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ err = ttm_sg_tt_init(tt, bo, 0, caching);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_NOT_NULL(test, tt->dma_address);
+
+ ttm_tt_fini(tt);
+ KUNIT_ASSERT_NULL(test, tt->dma_address);
+}
+
+static void ttm_tt_fini_shmem(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ struct file *shmem;
+ enum ttm_caching caching = ttm_cached;
+ int err;
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ err = ttm_tt_init(tt, bo, 0, caching, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ shmem = shmem_file_setup("ttm swap", BO_SIZE, 0);
+ tt->swap_storage = shmem;
+
+ ttm_tt_fini(tt);
+ KUNIT_ASSERT_NULL(test, tt->swap_storage);
+}
+
+static void ttm_tt_create_basic(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+ bo->type = ttm_bo_type_device;
+
+ dma_resv_lock(bo->base.resv, NULL);
+ err = ttm_tt_create(bo, false);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_NOT_NULL(test, bo->ttm);
+
+ /* Free manually, as it was allocated outside of KUnit */
+ kfree(bo->ttm);
+}
+
+static void ttm_tt_create_invalid_bo_type(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+ bo->type = ttm_bo_type_sg + 1;
+
+ dma_resv_lock(bo->base.resv, NULL);
+ err = ttm_tt_create(bo, false);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_EXPECT_EQ(test, err, -EINVAL);
+ KUNIT_EXPECT_NULL(test, bo->ttm);
+}
+
+static void ttm_tt_create_ttm_exists(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ enum ttm_caching caching = ttm_cached;
+ int err;
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ err = ttm_tt_init(tt, bo, 0, caching, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ bo->ttm = tt;
+
+ dma_resv_lock(bo->base.resv, NULL);
+ err = ttm_tt_create(bo, false);
+ dma_resv_unlock(bo->base.resv);
+
+ /* Expect to keep the previous TTM */
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_PTR_EQ(test, tt, bo->ttm);
+}
+
+static struct ttm_tt *ttm_tt_null_create(struct ttm_buffer_object *bo,
+ u32 page_flags)
+{
+ return NULL;
+}
+
+static struct ttm_device_funcs ttm_dev_empty_funcs = {
+ .ttm_tt_create = ttm_tt_null_create,
+};
+
+static void ttm_tt_create_failed(struct kunit *test)
+{
+ const struct ttm_test_devices *devs = test->priv;
+ struct ttm_buffer_object *bo;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ /* Update ttm_device_funcs so we don't alloc ttm_tt */
+ devs->ttm_dev->funcs = &ttm_dev_empty_funcs;
+
+ dma_resv_lock(bo->base.resv, NULL);
+ err = ttm_tt_create(bo, false);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_ASSERT_EQ(test, err, -ENOMEM);
+}
+
+static void ttm_tt_destroy_basic(struct kunit *test)
+{
+ const struct ttm_test_devices *devs = test->priv;
+ struct ttm_buffer_object *bo;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ dma_resv_lock(bo->base.resv, NULL);
+ err = ttm_tt_create(bo, false);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_NOT_NULL(test, bo->ttm);
+
+ ttm_tt_destroy(devs->ttm_dev, bo->ttm);
+}
+
+static void ttm_tt_populate_null_ttm(struct kunit *test)
+{
+ const struct ttm_test_devices *devs = test->priv;
+ struct ttm_operation_ctx ctx = { };
+ int err;
+
+ err = ttm_tt_populate(devs->ttm_dev, NULL, &ctx);
+ KUNIT_ASSERT_EQ(test, err, -EINVAL);
+}
+
+static void ttm_tt_populate_populated_ttm(struct kunit *test)
+{
+ const struct ttm_test_devices *devs = test->priv;
+ struct ttm_operation_ctx ctx = { };
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ struct page *populated_page;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ err = ttm_tt_init(tt, bo, 0, ttm_cached, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ err = ttm_tt_populate(devs->ttm_dev, tt, &ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ populated_page = *tt->pages;
+
+ err = ttm_tt_populate(devs->ttm_dev, tt, &ctx);
+ KUNIT_ASSERT_PTR_EQ(test, populated_page, *tt->pages);
+}
+
+static void ttm_tt_unpopulate_basic(struct kunit *test)
+{
+ const struct ttm_test_devices *devs = test->priv;
+ struct ttm_operation_ctx ctx = { };
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ err = ttm_tt_init(tt, bo, 0, ttm_cached, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ err = ttm_tt_populate(devs->ttm_dev, tt, &ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_TRUE(test, ttm_tt_is_populated(tt));
+
+ ttm_tt_unpopulate(devs->ttm_dev, tt);
+ KUNIT_ASSERT_FALSE(test, ttm_tt_is_populated(tt));
+}
+
+static void ttm_tt_unpopulate_empty_ttm(struct kunit *test)
+{
+ const struct ttm_test_devices *devs = test->priv;
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ err = ttm_tt_init(tt, bo, 0, ttm_cached, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ ttm_tt_unpopulate(devs->ttm_dev, tt);
+ /* Expect graceful handling of unpopulated TTs */
+}
+
+static void ttm_tt_swapin_basic(struct kunit *test)
+{
+ const struct ttm_test_devices *devs = test->priv;
+ int expected_num_pages = BO_SIZE >> PAGE_SHIFT;
+ struct ttm_operation_ctx ctx = { };
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ int err, num_pages;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ err = ttm_tt_init(tt, bo, 0, ttm_cached, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ err = ttm_tt_populate(devs->ttm_dev, tt, &ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_TRUE(test, ttm_tt_is_populated(tt));
+
+ num_pages = ttm_tt_swapout(devs->ttm_dev, tt, GFP_KERNEL);
+ KUNIT_ASSERT_EQ(test, num_pages, expected_num_pages);
+ KUNIT_ASSERT_NOT_NULL(test, tt->swap_storage);
+ KUNIT_ASSERT_TRUE(test, tt->page_flags & TTM_TT_FLAG_SWAPPED);
+
+ /* Swapout depopulates TT, allocate pages and then swap them in */
+ err = ttm_pool_alloc(&devs->ttm_dev->pool, tt, &ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ err = ttm_tt_swapin(tt);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_NULL(test, tt->swap_storage);
+ KUNIT_ASSERT_FALSE(test, tt->page_flags & TTM_TT_FLAG_SWAPPED);
+}
+
+static struct kunit_case ttm_tt_test_cases[] = {
+ KUNIT_CASE_PARAM(ttm_tt_init_basic, ttm_tt_init_basic_gen_params),
+ KUNIT_CASE(ttm_tt_init_misaligned),
+ KUNIT_CASE(ttm_tt_fini_basic),
+ KUNIT_CASE(ttm_tt_fini_sg),
+ KUNIT_CASE(ttm_tt_fini_shmem),
+ KUNIT_CASE(ttm_tt_create_basic),
+ KUNIT_CASE(ttm_tt_create_invalid_bo_type),
+ KUNIT_CASE(ttm_tt_create_ttm_exists),
+ KUNIT_CASE(ttm_tt_create_failed),
+ KUNIT_CASE(ttm_tt_destroy_basic),
+ KUNIT_CASE(ttm_tt_populate_null_ttm),
+ KUNIT_CASE(ttm_tt_populate_populated_ttm),
+ KUNIT_CASE(ttm_tt_unpopulate_basic),
+ KUNIT_CASE(ttm_tt_unpopulate_empty_ttm),
+ KUNIT_CASE(ttm_tt_swapin_basic),
+ {}
+};
+
+static struct kunit_suite ttm_tt_test_suite = {
+ .name = "ttm_tt",
+ .init = ttm_test_devices_all_init,
+ .exit = ttm_test_devices_fini,
+ .test_cases = ttm_tt_test_cases,
+};
+
+kunit_test_suites(&ttm_tt_test_suite);
+
+MODULE_DESCRIPTION("KUnit tests for ttm_tt APIs");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index d27691f2e451..fca0a1a3c6fd 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -36,6 +36,7 @@
#include <drm/ttm/ttm_tt.h>
#include <drm/ttm/ttm_resource.h>
#include <linux/agp_backend.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/io.h>
diff --git a/drivers/gpu/drm/ttm/ttm_backup.c b/drivers/gpu/drm/ttm/ttm_backup.c
new file mode 100644
index 000000000000..32530c75f038
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_backup.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include <drm/ttm/ttm_backup.h>
+
+#include <linux/export.h>
+#include <linux/page-flags.h>
+#include <linux/swap.h>
+
+/*
+ * Need to map shmem indices to handle since a handle value
+ * of 0 means error, following the swp_entry_t convention.
+ */
+static unsigned long ttm_backup_shmem_idx_to_handle(pgoff_t idx)
+{
+ return (unsigned long)idx + 1;
+}
+
+static pgoff_t ttm_backup_handle_to_shmem_idx(pgoff_t handle)
+{
+ return handle - 1;
+}
+
+/**
+ * ttm_backup_drop() - release memory associated with a handle
+ * @backup: The struct backup pointer used to obtain the handle
+ * @handle: The handle obtained from the @backup_page function.
+ */
+void ttm_backup_drop(struct file *backup, pgoff_t handle)
+{
+ loff_t start = ttm_backup_handle_to_shmem_idx(handle);
+
+ start <<= PAGE_SHIFT;
+ shmem_truncate_range(file_inode(backup), start,
+ start + PAGE_SIZE - 1);
+}
+
+/**
+ * ttm_backup_copy_page() - Copy the contents of a previously backed
+ * up page
+ * @backup: The struct backup pointer used to back up the page.
+ * @dst: The struct page to copy into.
+ * @handle: The handle returned when the page was backed up.
+ * @intr: Try to perform waits interruptible or at least killable.
+ *
+ * Return: 0 on success, Negative error code on failure, notably
+ * -EINTR if @intr was set to true and a signal is pending.
+ */
+int ttm_backup_copy_page(struct file *backup, struct page *dst,
+ pgoff_t handle, bool intr)
+{
+ struct address_space *mapping = backup->f_mapping;
+ struct folio *from_folio;
+ pgoff_t idx = ttm_backup_handle_to_shmem_idx(handle);
+
+ from_folio = shmem_read_folio(mapping, idx);
+ if (IS_ERR(from_folio))
+ return PTR_ERR(from_folio);
+
+ copy_highpage(dst, folio_file_page(from_folio, idx));
+ folio_put(from_folio);
+
+ return 0;
+}
+
+/**
+ * ttm_backup_backup_page() - Backup a page
+ * @backup: The struct backup pointer to use.
+ * @page: The page to back up.
+ * @writeback: Whether to perform immediate writeback of the page.
+ * This may have performance implications.
+ * @idx: A unique integer for each page and each struct backup.
+ * This allows the backup implementation to avoid managing
+ * its address space separately.
+ * @page_gfp: The gfp value used when the page was allocated.
+ * This is used for accounting purposes.
+ * @alloc_gfp: The gfp to be used when allocating memory.
+ *
+ * Context: If called from reclaim context, the caller needs to
+ * assert that the shrinker gfp has __GFP_FS set, to avoid
+ * deadlocking on lock_page(). If @writeback is set to true and
+ * called from reclaim context, the caller also needs to assert
+ * that the shrinker gfp has __GFP_IO set, since without it,
+ * we're not allowed to start backup IO.
+ *
+ * Return: A handle on success. Negative error code on failure.
+ *
+ * Note: This function could be extended to back up a folio and
+ * implementations would then split the folio internally if needed.
+ * Drawback is that the caller would then have to keep track of
+ * the folio size- and usage.
+ */
+s64
+ttm_backup_backup_page(struct file *backup, struct page *page,
+ bool writeback, pgoff_t idx, gfp_t page_gfp,
+ gfp_t alloc_gfp)
+{
+ struct address_space *mapping = backup->f_mapping;
+ unsigned long handle = 0;
+ struct folio *to_folio;
+ int ret;
+
+ to_folio = shmem_read_folio_gfp(mapping, idx, alloc_gfp);
+ if (IS_ERR(to_folio))
+ return PTR_ERR(to_folio);
+
+ folio_mark_accessed(to_folio);
+ folio_lock(to_folio);
+ folio_mark_dirty(to_folio);
+ copy_highpage(folio_file_page(to_folio, idx), page);
+ handle = ttm_backup_shmem_idx_to_handle(idx);
+
+ if (writeback && !folio_mapped(to_folio) &&
+ folio_clear_dirty_for_io(to_folio)) {
+ folio_set_reclaim(to_folio);
+ ret = shmem_writeout(to_folio, NULL, NULL);
+ if (!folio_test_writeback(to_folio))
+ folio_clear_reclaim(to_folio);
+ /*
+ * If writeout succeeds, it unlocks the folio. errors
+ * are otherwise dropped, since writeout is only best
+ * effort here.
+ */
+ if (ret)
+ folio_unlock(to_folio);
+ } else {
+ folio_unlock(to_folio);
+ }
+
+ folio_put(to_folio);
+
+ return handle;
+}
+
+/**
+ * ttm_backup_fini() - Free the struct backup resources after last use.
+ * @backup: Pointer to the struct backup whose resources to free.
+ *
+ * After a call to this function, it's illegal to use the @backup pointer.
+ */
+void ttm_backup_fini(struct file *backup)
+{
+ fput(backup);
+}
+
+/**
+ * ttm_backup_bytes_avail() - Report the approximate number of bytes of backup space
+ * left for backup.
+ *
+ * This function is intended also for driver use to indicate whether a
+ * backup attempt is meaningful.
+ *
+ * Return: An approximate size of backup space available.
+ */
+u64 ttm_backup_bytes_avail(void)
+{
+ /*
+ * The idea behind backing up to shmem is that shmem objects may
+ * eventually be swapped out. So no point swapping out if there
+ * is no or low swap-space available. But the accuracy of this
+ * number also depends on shmem actually swapping out backed-up
+ * shmem objects without too much buffering.
+ */
+ return (u64)get_nr_swap_pages() << PAGE_SHIFT;
+}
+EXPORT_SYMBOL_GPL(ttm_backup_bytes_avail);
+
+/**
+ * ttm_backup_shmem_create() - Create a shmem-based struct backup.
+ * @size: The maximum size (in bytes) to back up.
+ *
+ * Create a backup utilizing shmem objects.
+ *
+ * Return: A pointer to a struct file on success,
+ * an error pointer on error.
+ */
+struct file *ttm_backup_shmem_create(loff_t size)
+{
+ return shmem_file_setup("ttm shmem backup", size, 0);
+}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 326a3d13a829..bd27607f8076 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -31,10 +31,13 @@
#define pr_fmt(fmt) "[TTM] " fmt
+#include <drm/drm_print.h>
+#include <drm/ttm/ttm_allocation.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_tt.h>
+#include <linux/export.h>
#include <linux/jiffies.h>
#include <linux/slab.h>
#include <linux/sched.h>
@@ -42,14 +45,16 @@
#include <linux/file.h>
#include <linux/module.h>
#include <linux/atomic.h>
+#include <linux/cgroup_dmem.h>
#include <linux/dma-resv.h>
#include "ttm_module.h"
+#include "ttm_bo_internal.h"
static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
- struct drm_printer p = drm_debug_printer(TTM_PFX);
+ struct drm_printer p = drm_dbg_printer(NULL, DRM_UT_CORE, TTM_PFX);
struct ttm_resource_manager *man;
int i, mem_type;
@@ -84,6 +89,7 @@ EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
* ttm_bo_set_bulk_move - update BOs bulk move object
*
* @bo: The buffer object.
+ * @bulk: bulk move structure
*
* Update the BOs bulk move object, making sure that resources are added/removed
* as well. A bulk move allows to move many resource on the LRU at once,
@@ -120,8 +126,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
bool old_use_tt, new_use_tt;
int ret;
- old_use_tt = bo->resource &&
- ttm_manager_type(bdev, bo->resource->mem_type)->use_tt;
+ old_use_tt = !bo->resource || ttm_manager_type(bdev, bo->resource->mem_type)->use_tt;
new_use_tt = ttm_manager_type(bdev, mem->mem_type)->use_tt;
ttm_bo_unmap_virtual(bo);
@@ -139,7 +144,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
goto out_err;
if (mem->mem_type != TTM_PL_SYSTEM) {
- ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
+ ret = ttm_bo_populate(bo, ctx);
if (ret)
goto out_err;
}
@@ -224,82 +229,6 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
dma_resv_iter_end(&cursor);
}
-/**
- * ttm_bo_cleanup_refs
- * If bo idle, remove from lru lists, and unref.
- * If not idle, block if possible.
- *
- * Must be called with lru_lock and reservation held, this function
- * will drop the lru lock and optionally the reservation lock before returning.
- *
- * @bo: The buffer object to clean-up
- * @interruptible: Any sleeps should occur interruptibly.
- * @no_wait_gpu: Never wait for gpu. Return -EBUSY instead.
- * @unlock_resv: Unlock the reservation lock as well.
- */
-
-static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
- bool interruptible, bool no_wait_gpu,
- bool unlock_resv)
-{
- struct dma_resv *resv = &bo->base._resv;
- int ret;
-
- if (dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP))
- ret = 0;
- else
- ret = -EBUSY;
-
- if (ret && !no_wait_gpu) {
- long lret;
-
- if (unlock_resv)
- dma_resv_unlock(bo->base.resv);
- spin_unlock(&bo->bdev->lru_lock);
-
- lret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
- interruptible,
- 30 * HZ);
-
- if (lret < 0)
- return lret;
- else if (lret == 0)
- return -EBUSY;
-
- spin_lock(&bo->bdev->lru_lock);
- if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
- /*
- * We raced, and lost, someone else holds the reservation now,
- * and is probably busy in ttm_bo_cleanup_memtype_use.
- *
- * Even if it's not the case, because we finished waiting any
- * delayed destruction would succeed, so just return success
- * here.
- */
- spin_unlock(&bo->bdev->lru_lock);
- return 0;
- }
- ret = 0;
- }
-
- if (ret) {
- if (unlock_resv)
- dma_resv_unlock(bo->base.resv);
- spin_unlock(&bo->bdev->lru_lock);
- return ret;
- }
-
- spin_unlock(&bo->bdev->lru_lock);
- ttm_bo_cleanup_memtype_use(bo);
-
- if (unlock_resv)
- dma_resv_unlock(bo->base.resv);
-
- ttm_bo_put(bo);
-
- return 0;
-}
-
/*
* Block for the dma_resv object to become idle, lock the buffer and clean up
* the resource and tt object.
@@ -310,7 +239,7 @@ static void ttm_bo_delayed_delete(struct work_struct *work)
bo = container_of(work, typeof(*bo), delayed_delete);
- dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, false,
+ dma_resv_wait_timeout(&bo->base._resv, DMA_RESV_USAGE_BOOKKEEP, false,
MAX_SCHEDULE_TIMEOUT);
dma_resv_lock(bo->base.resv, NULL);
ttm_bo_cleanup_memtype_use(bo);
@@ -345,8 +274,10 @@ static void ttm_bo_release(struct kref *kref)
drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
ttm_mem_io_free(bdev, bo->resource);
- if (!dma_resv_test_signaled(bo->base.resv,
+ if (!dma_resv_test_signaled(&bo->base._resv,
DMA_RESV_USAGE_BOOKKEEP) ||
+ (want_init_on_free() && (bo->ttm != NULL)) ||
+ bo->type == ttm_bo_type_sg ||
!dma_resv_trylock(bo->base.resv)) {
/* The BO is not idle, resurrect it for delayed destroy */
ttm_bo_flush_all_fences(bo);
@@ -371,7 +302,13 @@ static void ttm_bo_release(struct kref *kref)
spin_unlock(&bo->bdev->lru_lock);
INIT_WORK(&bo->delayed_delete, ttm_bo_delayed_delete);
- queue_work(bdev->wq, &bo->delayed_delete);
+
+ /* Schedule the worker on the closest NUMA node. This
+ * improves performance since system memory might be
+ * cleared on free and that is best done on a CPU core
+ * close to it.
+ */
+ queue_work_node(bdev->pool.nid, bdev->wq, &bo->delayed_delete);
return;
}
@@ -383,21 +320,19 @@ static void ttm_bo_release(struct kref *kref)
bo->destroy(bo);
}
-/**
- * ttm_bo_put
- *
- * @bo: The buffer object.
- *
- * Unreference a buffer object.
- */
+/* TODO: remove! */
void ttm_bo_put(struct ttm_buffer_object *bo)
{
kref_put(&bo->kref, ttm_bo_release);
}
-EXPORT_SYMBOL(ttm_bo_put);
+
+void ttm_bo_fini(struct ttm_buffer_object *bo)
+{
+ ttm_bo_put(bo);
+}
+EXPORT_SYMBOL(ttm_bo_fini);
static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
- struct ttm_resource **mem,
struct ttm_operation_ctx *ctx,
struct ttm_place *hop)
{
@@ -405,8 +340,8 @@ static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
struct ttm_resource *hop_mem;
int ret;
- hop_placement.num_placement = hop_placement.num_busy_placement = 1;
- hop_placement.placement = hop_placement.busy_placement = hop;
+ hop_placement.num_placement = 1;
+ hop_placement.placement = hop;
/* find space in the bounce domain */
ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx);
@@ -435,10 +370,9 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
dma_resv_assert_held(bo->base.resv);
placement.num_placement = 0;
- placement.num_busy_placement = 0;
bdev->funcs->evict_flags(bo, &placement);
- if (!placement.num_placement && !placement.num_busy_placement) {
+ if (!placement.num_placement) {
ret = ttm_bo_wait_ctx(bo, ctx);
if (ret)
return ret;
@@ -460,17 +394,18 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
goto out;
}
-bounce:
- ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
- if (ret == -EMULTIHOP) {
- ret = ttm_bo_bounce_temp_buffer(bo, &evict_mem, ctx, &hop);
- if (ret) {
+ do {
+ ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
+ if (ret != -EMULTIHOP)
+ break;
+
+ ret = ttm_bo_bounce_temp_buffer(bo, ctx, &hop);
+ } while (!ret);
+
+ if (ret) {
+ ttm_resource_free(bo, &evict_mem);
+ if (ret != -ERESTARTSYS && ret != -EINTR)
pr_err("Buffer eviction failed\n");
- ttm_resource_free(bo, &evict_mem);
- goto out;
- }
- /* try and move to final place now. */
- goto bounce;
}
out:
return ret;
@@ -501,143 +436,184 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_eviction_valuable);
-/*
- * Check the target bo is allowable to be evicted or swapout, including cases:
- *
- * a. if share same reservation object with ctx->resv, have assumption
- * reservation objects should already be locked, so not lock again and
- * return true directly when either the opreation allow_reserved_eviction
- * or the target bo already is in delayed free list;
+/**
+ * ttm_bo_evict_first() - Evict the first bo on the manager's LRU list.
+ * @bdev: The ttm device.
+ * @man: The manager whose bo to evict.
+ * @ctx: The TTM operation ctx governing the eviction.
*
- * b. Otherwise, trylock it.
+ * Return: 0 if successful or the resource disappeared. Negative error code on error.
*/
-static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
- struct ttm_operation_ctx *ctx,
- const struct ttm_place *place,
- bool *locked, bool *busy)
+int ttm_bo_evict_first(struct ttm_device *bdev, struct ttm_resource_manager *man,
+ struct ttm_operation_ctx *ctx)
{
- bool ret = false;
-
- if (bo->base.resv == ctx->resv) {
- dma_resv_assert_held(bo->base.resv);
- if (ctx->allow_res_evict)
- ret = true;
- *locked = false;
- if (busy)
- *busy = false;
- } else {
- ret = dma_resv_trylock(bo->base.resv);
- *locked = ret;
- if (busy)
- *busy = !ret;
+ struct ttm_resource_cursor cursor;
+ struct ttm_buffer_object *bo;
+ struct ttm_resource *res;
+ unsigned int mem_type;
+ int ret = 0;
+
+ spin_lock(&bdev->lru_lock);
+ ttm_resource_cursor_init(&cursor, man);
+ res = ttm_resource_manager_first(&cursor);
+ ttm_resource_cursor_fini(&cursor);
+ if (!res) {
+ ret = -ENOENT;
+ goto out_no_ref;
}
+ bo = res->bo;
+ if (!ttm_bo_get_unless_zero(bo))
+ goto out_no_ref;
+ mem_type = res->mem_type;
+ spin_unlock(&bdev->lru_lock);
+ ret = ttm_bo_reserve(bo, ctx->interruptible, ctx->no_wait_gpu, NULL);
+ if (ret)
+ goto out_no_lock;
+ if (!bo->resource || bo->resource->mem_type != mem_type)
+ goto out_bo_moved;
- if (ret && place && (bo->resource->mem_type != place->mem_type ||
- !bo->bdev->funcs->eviction_valuable(bo, place))) {
- ret = false;
- if (*locked) {
- dma_resv_unlock(bo->base.resv);
- *locked = false;
- }
+ if (bo->deleted) {
+ ret = ttm_bo_wait_ctx(bo, ctx);
+ if (!ret)
+ ttm_bo_cleanup_memtype_use(bo);
+ } else {
+ ret = ttm_bo_evict(bo, ctx);
}
+out_bo_moved:
+ dma_resv_unlock(bo->base.resv);
+out_no_lock:
+ ttm_bo_put(bo);
+ return ret;
+out_no_ref:
+ spin_unlock(&bdev->lru_lock);
return ret;
}
/**
- * ttm_mem_evict_wait_busy - wait for a busy BO to become available
- *
- * @busy_bo: BO which couldn't be locked with trylock
- * @ctx: operation context
- * @ticket: acquire ticket
- *
- * Try to lock a busy buffer object to avoid failing eviction.
+ * struct ttm_bo_evict_walk - Parameters for the evict walk.
*/
-static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
- struct ttm_operation_ctx *ctx,
- struct ww_acquire_ctx *ticket)
+struct ttm_bo_evict_walk {
+ /** @walk: The walk base parameters. */
+ struct ttm_lru_walk walk;
+ /** @place: The place passed to the resource allocation. */
+ const struct ttm_place *place;
+ /** @evictor: The buffer object we're trying to make room for. */
+ struct ttm_buffer_object *evictor;
+ /** @res: The allocated resource if any. */
+ struct ttm_resource **res;
+ /** @evicted: Number of successful evictions. */
+ unsigned long evicted;
+
+ /** @limit_pool: Which pool limit we should test against */
+ struct dmem_cgroup_pool_state *limit_pool;
+ /** @try_low: Whether we should attempt to evict BO's with low watermark threshold */
+ bool try_low;
+ /** @hit_low: If we cannot evict a bo when @try_low is false (first pass) */
+ bool hit_low;
+};
+
+static s64 ttm_bo_evict_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
{
- int r;
+ struct ttm_bo_evict_walk *evict_walk =
+ container_of(walk, typeof(*evict_walk), walk);
+ s64 lret;
- if (!busy_bo || !ticket)
- return -EBUSY;
+ if (!dmem_cgroup_state_evict_valuable(evict_walk->limit_pool, bo->resource->css,
+ evict_walk->try_low, &evict_walk->hit_low))
+ return 0;
- if (ctx->interruptible)
- r = dma_resv_lock_interruptible(busy_bo->base.resv,
- ticket);
- else
- r = dma_resv_lock(busy_bo->base.resv, ticket);
+ if (bo->pin_count || !bo->bdev->funcs->eviction_valuable(bo, evict_walk->place))
+ return 0;
- /*
- * TODO: It would be better to keep the BO locked until allocation is at
- * least tried one more time, but that would mean a much larger rework
- * of TTM.
- */
- if (!r)
- dma_resv_unlock(busy_bo->base.resv);
+ if (bo->deleted) {
+ lret = ttm_bo_wait_ctx(bo, walk->arg.ctx);
+ if (!lret)
+ ttm_bo_cleanup_memtype_use(bo);
+ } else {
+ lret = ttm_bo_evict(bo, walk->arg.ctx);
+ }
- return r == -EDEADLK ? -EBUSY : r;
-}
+ if (lret)
+ goto out;
-int ttm_mem_evict_first(struct ttm_device *bdev,
- struct ttm_resource_manager *man,
- const struct ttm_place *place,
- struct ttm_operation_ctx *ctx,
- struct ww_acquire_ctx *ticket)
-{
- struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
- struct ttm_resource_cursor cursor;
- struct ttm_resource *res;
- bool locked = false;
- int ret;
+ evict_walk->evicted++;
+ if (evict_walk->res)
+ lret = ttm_resource_alloc(evict_walk->evictor, evict_walk->place,
+ evict_walk->res, NULL);
+ if (lret == 0)
+ return 1;
+out:
+ /* Errors that should terminate the walk. */
+ if (lret == -ENOSPC)
+ return -EBUSY;
- spin_lock(&bdev->lru_lock);
- ttm_resource_manager_for_each_res(man, &cursor, res) {
- bool busy;
-
- if (!ttm_bo_evict_swapout_allowable(res->bo, ctx, place,
- &locked, &busy)) {
- if (busy && !busy_bo && ticket !=
- dma_resv_locking_ctx(res->bo->base.resv))
- busy_bo = res->bo;
- continue;
- }
+ return lret;
+}
- if (ttm_bo_get_unless_zero(res->bo)) {
- bo = res->bo;
- break;
- }
- if (locked)
- dma_resv_unlock(res->bo->base.resv);
+static const struct ttm_lru_walk_ops ttm_evict_walk_ops = {
+ .process_bo = ttm_bo_evict_cb,
+};
+
+static int ttm_bo_evict_alloc(struct ttm_device *bdev,
+ struct ttm_resource_manager *man,
+ const struct ttm_place *place,
+ struct ttm_buffer_object *evictor,
+ struct ttm_operation_ctx *ctx,
+ struct ww_acquire_ctx *ticket,
+ struct ttm_resource **res,
+ struct dmem_cgroup_pool_state *limit_pool)
+{
+ struct ttm_bo_evict_walk evict_walk = {
+ .walk = {
+ .ops = &ttm_evict_walk_ops,
+ .arg = {
+ .ctx = ctx,
+ .ticket = ticket,
+ }
+ },
+ .place = place,
+ .evictor = evictor,
+ .res = res,
+ .limit_pool = limit_pool,
+ };
+ s64 lret;
+
+ evict_walk.walk.arg.trylock_only = true;
+ lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
+
+ /* One more attempt if we hit low limit? */
+ if (!lret && evict_walk.hit_low) {
+ evict_walk.try_low = true;
+ lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
}
+ if (lret || !ticket)
+ goto out;
- if (!bo) {
- if (busy_bo && !ttm_bo_get_unless_zero(busy_bo))
- busy_bo = NULL;
- spin_unlock(&bdev->lru_lock);
- ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
- if (busy_bo)
- ttm_bo_put(busy_bo);
- return ret;
- }
+ /* Reset low limit */
+ evict_walk.try_low = evict_walk.hit_low = false;
+ /* If ticket-locking, repeat while making progress. */
+ evict_walk.walk.arg.trylock_only = false;
- if (bo->deleted) {
- ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
- ctx->no_wait_gpu, locked);
- ttm_bo_put(bo);
- return ret;
+retry:
+ do {
+ /* The walk may clear the evict_walk.walk.ticket field */
+ evict_walk.walk.arg.ticket = ticket;
+ evict_walk.evicted = 0;
+ lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
+ } while (!lret && evict_walk.evicted);
+
+ /* We hit the low limit? Try once more */
+ if (!lret && evict_walk.hit_low && !evict_walk.try_low) {
+ evict_walk.try_low = true;
+ goto retry;
}
-
- spin_unlock(&bdev->lru_lock);
-
- ret = ttm_bo_evict(bo, ctx);
- if (locked)
- ttm_bo_unreserve(bo);
- else
- ttm_bo_move_to_lru_tail_unlocked(bo);
-
- ttm_bo_put(bo);
- return ret;
+out:
+ if (lret < 0)
+ return lret;
+ if (lret == 0)
+ return -EBUSY;
+ return 0;
}
/**
@@ -654,7 +630,8 @@ void ttm_bo_pin(struct ttm_buffer_object *bo)
spin_lock(&bo->bdev->lru_lock);
if (bo->resource)
ttm_resource_del_bulk_move(bo->resource, bo);
- ++bo->pin_count;
+ if (!bo->pin_count++ && bo->resource)
+ ttm_resource_move_to_lru_tail(bo->resource);
spin_unlock(&bo->bdev->lru_lock);
}
EXPORT_SYMBOL(ttm_bo_pin);
@@ -673,197 +650,157 @@ void ttm_bo_unpin(struct ttm_buffer_object *bo)
return;
spin_lock(&bo->bdev->lru_lock);
- --bo->pin_count;
- if (bo->resource)
+ if (!--bo->pin_count && bo->resource) {
ttm_resource_add_bulk_move(bo->resource, bo);
+ ttm_resource_move_to_lru_tail(bo->resource);
+ }
spin_unlock(&bo->bdev->lru_lock);
}
EXPORT_SYMBOL(ttm_bo_unpin);
/*
- * Add the last move fence to the BO as kernel dependency and reserve a new
- * fence slot.
+ * Add the pipelined eviction fencesto the BO as kernel dependency and reserve new
+ * fence slots.
*/
-static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
- struct ttm_resource_manager *man,
- struct ttm_resource *mem,
- bool no_wait_gpu)
+static int ttm_bo_add_pipelined_eviction_fences(struct ttm_buffer_object *bo,
+ struct ttm_resource_manager *man,
+ bool no_wait_gpu)
{
struct dma_fence *fence;
- int ret;
+ int i;
- spin_lock(&man->move_lock);
- fence = dma_fence_get(man->move);
- spin_unlock(&man->move_lock);
-
- if (!fence)
- return 0;
+ spin_lock(&man->eviction_lock);
+ for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) {
+ fence = man->eviction_fences[i];
+ if (!fence)
+ continue;
- if (no_wait_gpu) {
- ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
- dma_fence_put(fence);
- return ret;
+ if (no_wait_gpu) {
+ if (!dma_fence_is_signaled(fence)) {
+ spin_unlock(&man->eviction_lock);
+ return -EBUSY;
+ }
+ } else {
+ dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
+ }
}
+ spin_unlock(&man->eviction_lock);
- dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
-
- ret = dma_resv_reserve_fences(bo->base.resv, 1);
- dma_fence_put(fence);
- return ret;
-}
-
-/*
- * Repeatedly evict memory from the LRU for @mem_type until we create enough
- * space, or we've evicted everything and there isn't enough space.
- */
-static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
- const struct ttm_place *place,
- struct ttm_resource **mem,
- struct ttm_operation_ctx *ctx)
-{
- struct ttm_device *bdev = bo->bdev;
- struct ttm_resource_manager *man;
- struct ww_acquire_ctx *ticket;
- int ret;
-
- man = ttm_manager_type(bdev, place->mem_type);
- ticket = dma_resv_locking_ctx(bo->base.resv);
- do {
- ret = ttm_resource_alloc(bo, place, mem);
- if (likely(!ret))
- break;
- if (unlikely(ret != -ENOSPC))
- return ret;
- ret = ttm_mem_evict_first(bdev, man, place, ctx,
- ticket);
- if (unlikely(ret != 0))
- return ret;
- } while (1);
-
- return ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
+ /* TODO: this call should be removed. */
+ return dma_resv_reserve_fences(bo->base.resv, 1);
}
/**
- * ttm_bo_mem_space
+ * ttm_bo_alloc_resource - Allocate backing store for a BO
*
- * @bo: Pointer to a struct ttm_buffer_object. the data of which
- * we want to allocate space for.
- * @proposed_placement: Proposed new placement for the buffer object.
- * @mem: A struct ttm_resource.
+ * @bo: Pointer to a struct ttm_buffer_object of which we want a resource for
+ * @placement: Proposed new placement for the buffer object
* @ctx: if and how to sleep, lock buffers and alloc memory
+ * @force_space: If we should evict buffers to force space
+ * @res: The resulting struct ttm_resource.
*
- * Allocate memory space for the buffer object pointed to by @bo, using
- * the placement flags in @placement, potentially evicting other idle buffer objects.
- * This function may sleep while waiting for space to become available.
+ * Allocates a resource for the buffer object pointed to by @bo, using the
+ * placement flags in @placement, potentially evicting other buffer objects when
+ * @force_space is true.
+ * This function may sleep while waiting for resources to become available.
* Returns:
- * -EBUSY: No space available (only if no_wait == 1).
- * -ENOMEM: Could not allocate memory for the buffer object, either due to
+ * -EBUSY: No space available (only if no_wait == true).
+ * -ENOSPC: Could not allocate space for the buffer object, either due to
* fragmentation or concurrent allocators.
* -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
*/
-int ttm_bo_mem_space(struct ttm_buffer_object *bo,
- struct ttm_placement *placement,
- struct ttm_resource **mem,
- struct ttm_operation_ctx *ctx)
+static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_operation_ctx *ctx,
+ bool force_space,
+ struct ttm_resource **res)
{
struct ttm_device *bdev = bo->bdev;
- bool type_found = false;
+ struct ww_acquire_ctx *ticket;
int i, ret;
- ret = dma_resv_reserve_fences(bo->base.resv, 1);
+ ticket = dma_resv_locking_ctx(bo->base.resv);
+ ret = dma_resv_reserve_fences(bo->base.resv, TTM_NUM_MOVE_FENCES);
if (unlikely(ret))
return ret;
for (i = 0; i < placement->num_placement; ++i) {
const struct ttm_place *place = &placement->placement[i];
+ struct dmem_cgroup_pool_state *limit_pool = NULL;
struct ttm_resource_manager *man;
+ bool may_evict;
man = ttm_manager_type(bdev, place->mem_type);
if (!man || !ttm_resource_manager_used(man))
continue;
- type_found = true;
- ret = ttm_resource_alloc(bo, place, mem);
- if (ret == -ENOSPC)
+ if (place->flags & (force_space ? TTM_PL_FLAG_DESIRED :
+ TTM_PL_FLAG_FALLBACK))
continue;
- if (unlikely(ret))
- goto error;
- ret = ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
+ may_evict = (force_space && place->mem_type != TTM_PL_SYSTEM);
+ ret = ttm_resource_alloc(bo, place, res, force_space ? &limit_pool : NULL);
+ if (ret) {
+ if (ret != -ENOSPC && ret != -EAGAIN) {
+ dmem_cgroup_pool_state_put(limit_pool);
+ return ret;
+ }
+ if (!may_evict) {
+ dmem_cgroup_pool_state_put(limit_pool);
+ continue;
+ }
+
+ ret = ttm_bo_evict_alloc(bdev, man, place, bo, ctx,
+ ticket, res, limit_pool);
+ dmem_cgroup_pool_state_put(limit_pool);
+ if (ret == -EBUSY)
+ continue;
+ if (ret)
+ return ret;
+ }
+
+ ret = ttm_bo_add_pipelined_eviction_fences(bo, man, ctx->no_wait_gpu);
if (unlikely(ret)) {
- ttm_resource_free(bo, mem);
+ ttm_resource_free(bo, res);
if (ret == -EBUSY)
continue;
- goto error;
+ return ret;
}
return 0;
}
- for (i = 0; i < placement->num_busy_placement; ++i) {
- const struct ttm_place *place = &placement->busy_placement[i];
- struct ttm_resource_manager *man;
-
- man = ttm_manager_type(bdev, place->mem_type);
- if (!man || !ttm_resource_manager_used(man))
- continue;
-
- type_found = true;
- ret = ttm_bo_mem_force_space(bo, place, mem, ctx);
- if (likely(!ret))
- return 0;
-
- if (ret && ret != -EBUSY)
- goto error;
- }
-
- ret = -ENOMEM;
- if (!type_found) {
- pr_err(TTM_PFX "No compatible memory type found\n");
- ret = -EINVAL;
- }
-
-error:
- return ret;
+ return -ENOSPC;
}
-EXPORT_SYMBOL(ttm_bo_mem_space);
-static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
- struct ttm_placement *placement,
- struct ttm_operation_ctx *ctx)
+/*
+ * ttm_bo_mem_space - Wrapper around ttm_bo_alloc_resource
+ *
+ * @bo: Pointer to a struct ttm_buffer_object of which we want a resource for
+ * @placement: Proposed new placement for the buffer object
+ * @res: The resulting struct ttm_resource.
+ * @ctx: if and how to sleep, lock buffers and alloc memory
+ *
+ * Tries both idle allocation and forcefully eviction of buffers. See
+ * ttm_bo_alloc_resource for details.
+ */
+int ttm_bo_mem_space(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_resource **res,
+ struct ttm_operation_ctx *ctx)
{
- struct ttm_resource *mem;
- struct ttm_place hop;
+ bool force_space = false;
int ret;
- dma_resv_assert_held(bo->base.resv);
+ do {
+ ret = ttm_bo_alloc_resource(bo, placement, ctx,
+ force_space, res);
+ force_space = !force_space;
+ } while (ret == -ENOSPC && force_space);
- /*
- * Determine where to move the buffer.
- *
- * If driver determines move is going to need
- * an extra step then it will return -EMULTIHOP
- * and the buffer will be moved to the temporary
- * stop and the driver will be called to make
- * the second hop.
- */
- ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
- if (ret)
- return ret;
-bounce:
- ret = ttm_bo_handle_move_mem(bo, mem, false, ctx, &hop);
- if (ret == -EMULTIHOP) {
- ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop);
- if (ret)
- goto out;
- /* try and move to final place now. */
- goto bounce;
- }
-out:
- if (ret)
- ttm_resource_free(bo, &mem);
return ret;
}
+EXPORT_SYMBOL(ttm_bo_mem_space);
/**
* ttm_bo_validate
@@ -884,6 +821,9 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_operation_ctx *ctx)
{
+ struct ttm_resource *res;
+ struct ttm_place hop;
+ bool force_space;
int ret;
dma_resv_assert_held(bo->base.resv);
@@ -891,17 +831,58 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
/*
* Remove the backing store if no placement is given.
*/
- if (!placement->num_placement && !placement->num_busy_placement)
+ if (!placement->num_placement)
return ttm_bo_pipeline_gutting(bo);
- /*
- * Check whether we need to move buffer.
- */
- if (!bo->resource || !ttm_resource_compat(bo->resource, placement)) {
- ret = ttm_bo_move_buffer(bo, placement, ctx);
+ force_space = false;
+ do {
+ /* Check whether we need to move buffer. */
+ if (bo->resource &&
+ ttm_resource_compatible(bo->resource, placement,
+ force_space))
+ return 0;
+
+ /* Moving of pinned BOs is forbidden */
+ if (bo->pin_count)
+ return -EINVAL;
+
+ /*
+ * Determine where to move the buffer.
+ *
+ * If driver determines move is going to need
+ * an extra step then it will return -EMULTIHOP
+ * and the buffer will be moved to the temporary
+ * stop and the driver will be called to make
+ * the second hop.
+ */
+ ret = ttm_bo_alloc_resource(bo, placement, ctx, force_space,
+ &res);
+ force_space = !force_space;
+ if (ret == -ENOSPC)
+ continue;
if (ret)
return ret;
- }
+
+bounce:
+ ret = ttm_bo_handle_move_mem(bo, res, false, ctx, &hop);
+ if (ret == -EMULTIHOP) {
+ ret = ttm_bo_bounce_temp_buffer(bo, ctx, &hop);
+ /* try and move to final place now. */
+ if (!ret)
+ goto bounce;
+ }
+ if (ret) {
+ ttm_resource_free(bo, &res);
+ return ret;
+ }
+
+ } while (ret && force_space);
+
+ /* For backward compatibility with userspace */
+ if (ret == -ENOSPC)
+ return bo->bdev->alloc_flags & TTM_ALLOCATION_PROPAGATE_ENOSPC ?
+ ret : -ENOMEM;
+
/*
* We might need to add a TTM.
*/
@@ -953,7 +934,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
struct sg_table *sg, struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *))
{
- static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
int ret;
kref_init(&bo->kref);
@@ -970,12 +950,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
bo->base.resv = &bo->base._resv;
atomic_inc(&ttm_glob.bo_count);
- ret = ttm_resource_alloc(bo, &sys_mem, &bo->resource);
- if (unlikely(ret)) {
- ttm_bo_put(bo);
- return ret;
- }
-
/*
* For ttm_bo_type_device buffers, allocate
* address space from the device.
@@ -1118,12 +1092,27 @@ int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
}
EXPORT_SYMBOL(ttm_bo_wait_ctx);
-int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
- gfp_t gfp_flags)
+/**
+ * struct ttm_bo_swapout_walk - Parameters for the swapout walk
+ */
+struct ttm_bo_swapout_walk {
+ /** @walk: The walk base parameters. */
+ struct ttm_lru_walk walk;
+ /** @gfp_flags: The gfp flags to use for ttm_tt_swapout() */
+ gfp_t gfp_flags;
+ /** @hit_low: Whether we should attempt to swap BO's with low watermark threshold */
+ /** @evict_low: If we cannot swap a bo when @try_low is false (first pass) */
+ bool hit_low, evict_low;
+};
+
+static s64
+ttm_bo_swapout_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
{
- struct ttm_place place;
- bool locked;
- long ret;
+ struct ttm_place place = {.mem_type = bo->resource->mem_type};
+ struct ttm_bo_swapout_walk *swapout_walk =
+ container_of(walk, typeof(*swapout_walk), walk);
+ struct ttm_operation_ctx *ctx = walk->arg.ctx;
+ s64 ret;
/*
* While the bo may already reside in SYSTEM placement, set
@@ -1131,46 +1120,48 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
* The driver may use the fact that we're moving from SYSTEM
* as an indication that we're about to swap out.
*/
- memset(&place, 0, sizeof(place));
- place.mem_type = bo->resource->mem_type;
- if (!ttm_bo_evict_swapout_allowable(bo, ctx, &place, &locked, NULL))
- return -EBUSY;
+ if (bo->pin_count || !bo->bdev->funcs->eviction_valuable(bo, &place)) {
+ ret = -EBUSY;
+ goto out;
+ }
if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL ||
- bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED ||
- !ttm_bo_get_unless_zero(bo)) {
- if (locked)
- dma_resv_unlock(bo->base.resv);
- return -EBUSY;
+ bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED) {
+ ret = -EBUSY;
+ goto out;
}
if (bo->deleted) {
- ret = ttm_bo_cleanup_refs(bo, false, false, locked);
- ttm_bo_put(bo);
- return ret == -EBUSY ? -ENOSPC : ret;
- }
+ pgoff_t num_pages = bo->ttm->num_pages;
- /* TODO: Cleanup the locking */
- spin_unlock(&bo->bdev->lru_lock);
+ ret = ttm_bo_wait_ctx(bo, ctx);
+ if (ret)
+ goto out;
+
+ ttm_bo_cleanup_memtype_use(bo);
+ ret = num_pages;
+ goto out;
+ }
/*
* Move to system cached
*/
if (bo->resource->mem_type != TTM_PL_SYSTEM) {
- struct ttm_operation_ctx ctx = { false, false };
struct ttm_resource *evict_mem;
struct ttm_place hop;
memset(&hop, 0, sizeof(hop));
place.mem_type = TTM_PL_SYSTEM;
- ret = ttm_resource_alloc(bo, &place, &evict_mem);
- if (unlikely(ret))
+ ret = ttm_resource_alloc(bo, &place, &evict_mem, NULL);
+ if (ret)
goto out;
- ret = ttm_bo_handle_move_mem(bo, evict_mem, true, &ctx, &hop);
- if (unlikely(ret != 0)) {
- WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n");
+ ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
+ if (ret) {
+ WARN(ret == -EMULTIHOP,
+ "Unexpected multihop in swapout - likely driver bug.\n");
+ ttm_resource_free(bo, &evict_mem);
goto out;
}
}
@@ -1179,30 +1170,67 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
* Make sure BO is idle.
*/
ret = ttm_bo_wait_ctx(bo, ctx);
- if (unlikely(ret != 0))
+ if (ret)
goto out;
ttm_bo_unmap_virtual(bo);
-
- /*
- * Swap out. Buffer will be swapped in again as soon as
- * anyone tries to access a ttm page.
- */
if (bo->bdev->funcs->swap_notify)
bo->bdev->funcs->swap_notify(bo);
- if (ttm_tt_is_populated(bo->ttm))
- ret = ttm_tt_swapout(bo->bdev, bo->ttm, gfp_flags);
+ if (ttm_tt_is_populated(bo->ttm)) {
+ spin_lock(&bo->bdev->lru_lock);
+ ttm_resource_del_bulk_move(bo->resource, bo);
+ spin_unlock(&bo->bdev->lru_lock);
+
+ ret = ttm_tt_swapout(bo->bdev, bo->ttm, swapout_walk->gfp_flags);
+
+ spin_lock(&bo->bdev->lru_lock);
+ if (ret)
+ ttm_resource_add_bulk_move(bo->resource, bo);
+ ttm_resource_move_to_lru_tail(bo->resource);
+ spin_unlock(&bo->bdev->lru_lock);
+ }
+
out:
+ /* Consider -ENOMEM and -ENOSPC non-fatal. */
+ if (ret == -ENOMEM || ret == -ENOSPC)
+ ret = -EBUSY;
- /*
- * Unreserve without putting on LRU to avoid swapping out an
- * already swapped buffer.
- */
- if (locked)
- dma_resv_unlock(bo->base.resv);
- ttm_bo_put(bo);
- return ret == -EBUSY ? -ENOSPC : ret;
+ return ret;
+}
+
+const struct ttm_lru_walk_ops ttm_swap_ops = {
+ .process_bo = ttm_bo_swapout_cb,
+};
+
+/**
+ * ttm_bo_swapout() - Swap out buffer objects on the LRU list to shmem.
+ * @bdev: The ttm device.
+ * @ctx: The ttm_operation_ctx governing the swapout operation.
+ * @man: The resource manager whose resources / buffer objects are
+ * goint to be swapped out.
+ * @gfp_flags: The gfp flags used for shmem page allocations.
+ * @target: The desired number of bytes to swap out.
+ *
+ * Return: The number of bytes actually swapped out, or negative error code
+ * on error.
+ */
+s64 ttm_bo_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
+ struct ttm_resource_manager *man, gfp_t gfp_flags,
+ s64 target)
+{
+ struct ttm_bo_swapout_walk swapout_walk = {
+ .walk = {
+ .ops = &ttm_swap_ops,
+ .arg = {
+ .ctx = ctx,
+ .trylock_only = true,
+ },
+ },
+ .gfp_flags = gfp_flags,
+ };
+
+ return ttm_lru_walk_for_evict(&swapout_walk.walk, bdev, man, target);
}
void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
@@ -1214,3 +1242,62 @@ void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
ttm_tt_destroy(bo->bdev, bo->ttm);
bo->ttm = NULL;
}
+
+/**
+ * ttm_bo_populate() - Ensure that a buffer object has backing pages
+ * @bo: The buffer object
+ * @ctx: The ttm_operation_ctx governing the operation.
+ *
+ * For buffer objects in a memory type whose manager uses
+ * struct ttm_tt for backing pages, ensure those backing pages
+ * are present and with valid content. The bo's resource is also
+ * placed on the correct LRU list if it was previously swapped
+ * out.
+ *
+ * Return: 0 if successful, negative error code on failure.
+ * Note: May return -EINTR or -ERESTARTSYS if @ctx::interruptible
+ * is set to true.
+ */
+int ttm_bo_populate(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx)
+{
+ struct ttm_tt *tt = bo->ttm;
+ bool swapped;
+ int ret;
+
+ dma_resv_assert_held(bo->base.resv);
+
+ if (!tt)
+ return 0;
+
+ swapped = ttm_tt_is_swapped(tt);
+ ret = ttm_tt_populate(bo->bdev, tt, ctx);
+ if (ret)
+ return ret;
+
+ if (swapped && !ttm_tt_is_swapped(tt) && !bo->pin_count &&
+ bo->resource) {
+ spin_lock(&bo->bdev->lru_lock);
+ ttm_resource_add_bulk_move(bo->resource, bo);
+ ttm_resource_move_to_lru_tail(bo->resource);
+ spin_unlock(&bo->bdev->lru_lock);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ttm_bo_populate);
+
+int ttm_bo_setup_export(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx)
+{
+ int ret;
+
+ ret = ttm_bo_reserve(bo, false, false, NULL);
+ if (ret != 0)
+ return ret;
+
+ ret = ttm_bo_populate(bo, ctx);
+ ttm_bo_unreserve(bo);
+ return ret;
+}
+EXPORT_SYMBOL(ttm_bo_setup_export);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_internal.h b/drivers/gpu/drm/ttm/ttm_bo_internal.h
new file mode 100644
index 000000000000..e0d48eac74b0
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_bo_internal.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+
+#ifndef _TTM_BO_INTERNAL_H_
+#define _TTM_BO_INTERNAL_H_
+
+#include <drm/ttm/ttm_bo.h>
+
+/**
+ * ttm_bo_get - reference a struct ttm_buffer_object
+ *
+ * @bo: The buffer object.
+ */
+static inline void ttm_bo_get(struct ttm_buffer_object *bo)
+{
+ kref_get(&bo->kref);
+}
+
+/**
+ * ttm_bo_get_unless_zero - reference a struct ttm_buffer_object unless
+ * its refcount has already reached zero.
+ * @bo: The buffer object.
+ *
+ * Used to reference a TTM buffer object in lookups where the object is removed
+ * from the lookup structure during the destructor and for RCU lookups.
+ *
+ * Returns: @bo if the referencing was successful, NULL otherwise.
+ */
+static inline __must_check struct ttm_buffer_object *
+ttm_bo_get_unless_zero(struct ttm_buffer_object *bo)
+{
+ if (!kref_get_unless_zero(&bo->kref))
+ return NULL;
+ return bo;
+}
+
+void ttm_bo_put(struct ttm_buffer_object *bo);
+
+#endif
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 7635d7d6b13b..2ff35d55e462 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -29,6 +29,8 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
+#include <linux/export.h>
+#include <linux/swap.h>
#include <linux/vmalloc.h>
#include <drm/ttm/ttm_bo.h>
@@ -37,6 +39,8 @@
#include <drm/drm_cache.h>
+#include "ttm_bo_internal.h"
+
struct ttm_transfer_obj {
struct ttm_buffer_object base;
struct ttm_buffer_object *bo;
@@ -157,13 +161,13 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
bool clear;
int ret = 0;
- if (!src_mem)
- return 0;
+ if (WARN_ON(!src_mem))
+ return -EINVAL;
src_man = ttm_manager_type(bdev, src_mem->mem_type);
if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) ||
dst_man->use_tt)) {
- ret = ttm_tt_populate(bdev, ttm, ctx);
+ ret = ttm_bo_populate(bo, ctx);
if (ret)
return ret;
}
@@ -254,6 +258,13 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
ret = dma_resv_trylock(&fbo->base.base._resv);
WARN_ON(!ret);
+ ret = dma_resv_reserve_fences(&fbo->base.base._resv, TTM_NUM_MOVE_FENCES);
+ if (ret) {
+ dma_resv_unlock(&fbo->base.base._resv);
+ kfree(fbo);
+ return ret;
+ }
+
if (fbo->base.resource) {
ttm_resource_set_bo(fbo->base.resource, &fbo->base);
bo->resource = NULL;
@@ -262,12 +273,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
fbo->base.bulk_move = NULL;
}
- ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1);
- if (ret) {
- kfree(fbo);
- return ret;
- }
-
ttm_bo_get(bo);
fbo->bo = bo;
@@ -294,7 +299,13 @@ pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
enum ttm_caching caching;
man = ttm_manager_type(bo->bdev, res->mem_type);
- caching = man->use_tt ? bo->ttm->caching : res->bus.caching;
+ if (man->use_tt) {
+ caching = bo->ttm->caching;
+ if (bo->ttm->page_flags & TTM_TT_FLAG_DECRYPTED)
+ tmp = pgprot_decrypted(tmp);
+ } else {
+ caching = res->bus.caching;
+ }
return ttm_prot_from_caching(caching, tmp);
}
@@ -337,16 +348,19 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
.no_wait_gpu = false
};
struct ttm_tt *ttm = bo->ttm;
+ struct ttm_resource_manager *man =
+ ttm_manager_type(bo->bdev, bo->resource->mem_type);
pgprot_t prot;
int ret;
BUG_ON(!ttm);
- ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
+ ret = ttm_bo_populate(bo, &ctx);
if (ret)
return ret;
- if (num_pages == 1 && ttm->caching == ttm_cached) {
+ if (num_pages == 1 && ttm->caching == ttm_cached &&
+ !(man->use_tt && (ttm->page_flags & TTM_TT_FLAG_DECRYPTED))) {
/*
* We're mapping a single page, and the desired
* page protection is consistent with the bo.
@@ -369,6 +383,32 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
}
/**
+ * ttm_bo_kmap_try_from_panic
+ *
+ * @bo: The buffer object
+ * @page: The page to map
+ *
+ * Sets up a kernel virtual mapping using kmap_local_page_try_from_panic().
+ * This should only be called from the panic handler, if you make sure the bo
+ * is the one being displayed, so is properly allocated, and protected.
+ *
+ * Returns the vaddr, that you can use to write to the bo, and that you should
+ * pass to kunmap_local() when you're done with this page, or NULL if the bo
+ * is in iomem.
+ */
+void *ttm_bo_kmap_try_from_panic(struct ttm_buffer_object *bo, unsigned long page)
+{
+ if (page + 1 > PFN_UP(bo->resource->size))
+ return NULL;
+
+ if (!bo->resource->bus.is_iomem && bo->ttm->pages && bo->ttm->pages[page])
+ return kmap_local_page_try_from_panic(bo->ttm->pages[page]);
+
+ return NULL;
+}
+EXPORT_SYMBOL(ttm_bo_kmap_try_from_panic);
+
+/**
* ttm_bo_kmap
*
* @bo: The buffer object.
@@ -498,7 +538,7 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map)
pgprot_t prot;
void *vaddr;
- ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
+ ret = ttm_bo_populate(bo, &ctx);
if (ret)
return ret;
@@ -606,20 +646,44 @@ static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
{
struct ttm_device *bdev = bo->bdev;
struct ttm_resource_manager *from;
+ struct dma_fence *tmp;
+ int i;
from = ttm_manager_type(bdev, bo->resource->mem_type);
/**
* BO doesn't have a TTM we need to bind/unbind. Just remember
- * this eviction and free up the allocation
+ * this eviction and free up the allocation.
+ * The fence will be saved in the first free slot or in the slot
+ * already used to store a fence from the same context. Since
+ * drivers can't use more than TTM_NUM_MOVE_FENCES contexts for
+ * evictions we should always find a slot to use.
*/
- spin_lock(&from->move_lock);
- if (!from->move || dma_fence_is_later(fence, from->move)) {
- dma_fence_put(from->move);
- from->move = dma_fence_get(fence);
+ spin_lock(&from->eviction_lock);
+ for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) {
+ tmp = from->eviction_fences[i];
+ if (!tmp)
+ break;
+ if (fence->context != tmp->context)
+ continue;
+ if (dma_fence_is_later(fence, tmp)) {
+ dma_fence_put(tmp);
+ break;
+ }
+ goto unlock;
+ }
+ if (i < TTM_NUM_MOVE_FENCES) {
+ from->eviction_fences[i] = dma_fence_get(fence);
+ } else {
+ WARN(1, "not enough fence slots for all fence contexts");
+ spin_unlock(&from->eviction_lock);
+ dma_fence_wait(fence, false);
+ goto end;
}
- spin_unlock(&from->move_lock);
+unlock:
+ spin_unlock(&from->eviction_lock);
+end:
ttm_resource_free(bo, &bo->resource);
}
@@ -704,30 +768,23 @@ EXPORT_SYMBOL(ttm_bo_move_sync_cleanup);
*/
int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
{
- static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
struct ttm_buffer_object *ghost;
- struct ttm_resource *sys_res;
struct ttm_tt *ttm;
int ret;
- ret = ttm_resource_alloc(bo, &sys_mem, &sys_res);
- if (ret)
- return ret;
-
/* If already idle, no need for ghost object dance. */
if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP)) {
if (!bo->ttm) {
/* See comment below about clearing. */
ret = ttm_tt_create(bo, true);
if (ret)
- goto error_free_sys_mem;
+ return ret;
} else {
ttm_tt_unpopulate(bo->bdev, bo->ttm);
if (bo->type == ttm_bo_type_device)
ttm_tt_mark_for_clear(bo->ttm);
}
ttm_resource_free(bo, &bo->resource);
- ttm_bo_assign_mem(bo, sys_res);
return 0;
}
@@ -744,7 +801,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
ret = ttm_tt_create(bo, true);
swap(bo->ttm, ttm);
if (ret)
- goto error_free_sys_mem;
+ return ret;
ret = ttm_buffer_object_transfer(bo, &ghost);
if (ret)
@@ -760,13 +817,362 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
dma_resv_unlock(&ghost->base._resv);
ttm_bo_put(ghost);
bo->ttm = ttm;
- ttm_bo_assign_mem(bo, sys_res);
return 0;
error_destroy_tt:
ttm_tt_destroy(bo->bdev, ttm);
+ return ret;
+}
+
+static bool ttm_lru_walk_trylock(struct ttm_bo_lru_cursor *curs,
+ struct ttm_buffer_object *bo)
+{
+ struct ttm_operation_ctx *ctx = curs->arg->ctx;
+
+ curs->needs_unlock = false;
+
+ if (dma_resv_trylock(bo->base.resv)) {
+ curs->needs_unlock = true;
+ return true;
+ }
+
+ if (bo->base.resv == ctx->resv && ctx->allow_res_evict) {
+ dma_resv_assert_held(bo->base.resv);
+ return true;
+ }
+
+ return false;
+}
+
+static int ttm_lru_walk_ticketlock(struct ttm_bo_lru_cursor *curs,
+ struct ttm_buffer_object *bo)
+{
+ struct ttm_lru_walk_arg *arg = curs->arg;
+ struct dma_resv *resv = bo->base.resv;
+ int ret;
+
+ if (arg->ctx->interruptible)
+ ret = dma_resv_lock_interruptible(resv, arg->ticket);
+ else
+ ret = dma_resv_lock(resv, arg->ticket);
+
+ if (!ret) {
+ curs->needs_unlock = true;
+ /*
+ * Only a single ticketlock per loop. Ticketlocks are prone
+ * to return -EDEADLK causing the eviction to fail, so
+ * after waiting for the ticketlock, revert back to
+ * trylocking for this walk.
+ */
+ arg->ticket = NULL;
+ } else if (ret == -EDEADLK) {
+ /* Caller needs to exit the ww transaction. */
+ ret = -ENOSPC;
+ }
-error_free_sys_mem:
- ttm_resource_free(bo, &sys_res);
return ret;
}
+
+/**
+ * ttm_lru_walk_for_evict() - Perform a LRU list walk, with actions taken on
+ * valid items.
+ * @walk: describe the walks and actions taken
+ * @bdev: The TTM device.
+ * @man: The struct ttm_resource manager whose LRU lists we're walking.
+ * @target: The end condition for the walk.
+ *
+ * The LRU lists of @man are walk, and for each struct ttm_resource encountered,
+ * the corresponding ttm_buffer_object is locked and taken a reference on, and
+ * the LRU lock is dropped. the LRU lock may be dropped before locking and, in
+ * that case, it's verified that the item actually remains on the LRU list after
+ * the lock, and that the buffer object didn't switch resource in between.
+ *
+ * With a locked object, the actions indicated by @walk->process_bo are
+ * performed, and after that, the bo is unlocked, the refcount dropped and the
+ * next struct ttm_resource is processed. Here, the walker relies on
+ * TTM's restartable LRU list implementation.
+ *
+ * Typically @walk->process_bo() would return the number of pages evicted,
+ * swapped or shrunken, so that when the total exceeds @target, or when the
+ * LRU list has been walked in full, iteration is terminated. It's also terminated
+ * on error. Note that the definition of @target is done by the caller, it
+ * could have a different meaning than the number of pages.
+ *
+ * Note that the way dma_resv individualization is done, locking needs to be done
+ * either with the LRU lock held (trylocking only) or with a reference on the
+ * object.
+ *
+ * Return: The progress made towards target or negative error code on error.
+ */
+s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
+ struct ttm_resource_manager *man, s64 target)
+{
+ struct ttm_bo_lru_cursor cursor;
+ struct ttm_buffer_object *bo;
+ s64 progress = 0;
+ s64 lret;
+
+ ttm_bo_lru_for_each_reserved_guarded(&cursor, man, &walk->arg, bo) {
+ lret = walk->ops->process_bo(walk, bo);
+ if (lret == -EBUSY || lret == -EALREADY)
+ lret = 0;
+ progress = (lret < 0) ? lret : progress + lret;
+ if (progress < 0 || progress >= target)
+ break;
+ }
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ return progress;
+}
+EXPORT_SYMBOL(ttm_lru_walk_for_evict);
+
+static void ttm_bo_lru_cursor_cleanup_bo(struct ttm_bo_lru_cursor *curs)
+{
+ struct ttm_buffer_object *bo = curs->bo;
+
+ if (bo) {
+ if (curs->needs_unlock)
+ dma_resv_unlock(bo->base.resv);
+ ttm_bo_put(bo);
+ curs->bo = NULL;
+ }
+}
+
+/**
+ * ttm_bo_lru_cursor_fini() - Stop using a struct ttm_bo_lru_cursor
+ * and clean up any iteration it was used for.
+ * @curs: The cursor.
+ */
+void ttm_bo_lru_cursor_fini(struct ttm_bo_lru_cursor *curs)
+{
+ spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock;
+
+ ttm_bo_lru_cursor_cleanup_bo(curs);
+ spin_lock(lru_lock);
+ ttm_resource_cursor_fini(&curs->res_curs);
+ spin_unlock(lru_lock);
+}
+EXPORT_SYMBOL(ttm_bo_lru_cursor_fini);
+
+/**
+ * ttm_bo_lru_cursor_init() - Initialize a struct ttm_bo_lru_cursor
+ * @curs: The ttm_bo_lru_cursor to initialize.
+ * @man: The ttm resource_manager whose LRU lists to iterate over.
+ * @arg: The ttm_lru_walk_arg to govern the walk.
+ *
+ * Initialize a struct ttm_bo_lru_cursor.
+ *
+ * Return: Pointer to @curs. The function does not fail.
+ */
+struct ttm_bo_lru_cursor *
+ttm_bo_lru_cursor_init(struct ttm_bo_lru_cursor *curs,
+ struct ttm_resource_manager *man,
+ struct ttm_lru_walk_arg *arg)
+{
+ memset(curs, 0, sizeof(*curs));
+ ttm_resource_cursor_init(&curs->res_curs, man);
+ curs->arg = arg;
+
+ return curs;
+}
+EXPORT_SYMBOL(ttm_bo_lru_cursor_init);
+
+static struct ttm_buffer_object *
+__ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs)
+{
+ spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock;
+ struct ttm_resource *res = NULL;
+ struct ttm_buffer_object *bo;
+ struct ttm_lru_walk_arg *arg = curs->arg;
+ bool first = !curs->bo;
+
+ ttm_bo_lru_cursor_cleanup_bo(curs);
+
+ spin_lock(lru_lock);
+ for (;;) {
+ int mem_type, ret = 0;
+ bool bo_locked = false;
+
+ if (first) {
+ res = ttm_resource_manager_first(&curs->res_curs);
+ first = false;
+ } else {
+ res = ttm_resource_manager_next(&curs->res_curs);
+ }
+ if (!res)
+ break;
+
+ bo = res->bo;
+ if (ttm_lru_walk_trylock(curs, bo))
+ bo_locked = true;
+ else if (!arg->ticket || arg->ctx->no_wait_gpu || arg->trylock_only)
+ continue;
+
+ if (!ttm_bo_get_unless_zero(bo)) {
+ if (curs->needs_unlock)
+ dma_resv_unlock(bo->base.resv);
+ continue;
+ }
+
+ mem_type = res->mem_type;
+ spin_unlock(lru_lock);
+ if (!bo_locked)
+ ret = ttm_lru_walk_ticketlock(curs, bo);
+
+ /*
+ * Note that in between the release of the lru lock and the
+ * ticketlock, the bo may have switched resource,
+ * and also memory type, since the resource may have been
+ * freed and allocated again with a different memory type.
+ * In that case, just skip it.
+ */
+ curs->bo = bo;
+ if (!ret && bo->resource && bo->resource->mem_type == mem_type)
+ return bo;
+
+ ttm_bo_lru_cursor_cleanup_bo(curs);
+ if (ret && ret != -EALREADY)
+ return ERR_PTR(ret);
+
+ spin_lock(lru_lock);
+ }
+
+ spin_unlock(lru_lock);
+ return res ? bo : NULL;
+}
+
+/**
+ * ttm_bo_lru_cursor_next() - Continue iterating a manager's LRU lists
+ * to find and lock buffer object.
+ * @curs: The cursor initialized using ttm_bo_lru_cursor_init() and
+ * ttm_bo_lru_cursor_first().
+ *
+ * Return: A pointer to a locked and reference-counted buffer object,
+ * or NULL if none could be found and looping should be terminated.
+ */
+struct ttm_buffer_object *ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs)
+{
+ return __ttm_bo_lru_cursor_next(curs);
+}
+EXPORT_SYMBOL(ttm_bo_lru_cursor_next);
+
+/**
+ * ttm_bo_lru_cursor_first() - Start iterating a manager's LRU lists
+ * to find and lock buffer object.
+ * @curs: The cursor initialized using ttm_bo_lru_cursor_init().
+ *
+ * Return: A pointer to a locked and reference-counted buffer object,
+ * or NULL if none could be found and looping should be terminated.
+ */
+struct ttm_buffer_object *ttm_bo_lru_cursor_first(struct ttm_bo_lru_cursor *curs)
+{
+ ttm_bo_lru_cursor_cleanup_bo(curs);
+ return __ttm_bo_lru_cursor_next(curs);
+}
+EXPORT_SYMBOL(ttm_bo_lru_cursor_first);
+
+/**
+ * ttm_bo_shrink() - Helper to shrink a ttm buffer object.
+ * @ctx: The struct ttm_operation_ctx used for the shrinking operation.
+ * @bo: The buffer object.
+ * @flags: Flags governing the shrinking behaviour.
+ *
+ * The function uses the ttm_tt_back_up functionality to back up or
+ * purge a struct ttm_tt. If the bo is not in system, it's first
+ * moved there.
+ *
+ * Return: The number of pages shrunken or purged, or
+ * negative error code on failure.
+ */
+long ttm_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
+ const struct ttm_bo_shrink_flags flags)
+{
+ static const struct ttm_place sys_placement_flags = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .mem_type = TTM_PL_SYSTEM,
+ .flags = 0,
+ };
+ static struct ttm_placement sys_placement = {
+ .num_placement = 1,
+ .placement = &sys_placement_flags,
+ };
+ struct ttm_tt *tt = bo->ttm;
+ long lret;
+
+ dma_resv_assert_held(bo->base.resv);
+
+ if (flags.allow_move && bo->resource->mem_type != TTM_PL_SYSTEM) {
+ int ret = ttm_bo_validate(bo, &sys_placement, ctx);
+
+ /* Consider -ENOMEM and -ENOSPC non-fatal. */
+ if (ret) {
+ if (ret == -ENOMEM || ret == -ENOSPC)
+ ret = -EBUSY;
+ return ret;
+ }
+ }
+
+ ttm_bo_unmap_virtual(bo);
+ lret = ttm_bo_wait_ctx(bo, ctx);
+ if (lret < 0)
+ return lret;
+
+ if (bo->bulk_move) {
+ spin_lock(&bo->bdev->lru_lock);
+ ttm_resource_del_bulk_move(bo->resource, bo);
+ spin_unlock(&bo->bdev->lru_lock);
+ }
+
+ lret = ttm_tt_backup(bo->bdev, tt, (struct ttm_backup_flags)
+ {.purge = flags.purge,
+ .writeback = flags.writeback});
+
+ if (lret <= 0 && bo->bulk_move) {
+ spin_lock(&bo->bdev->lru_lock);
+ ttm_resource_add_bulk_move(bo->resource, bo);
+ spin_unlock(&bo->bdev->lru_lock);
+ }
+
+ if (lret < 0 && lret != -EINTR)
+ return -EBUSY;
+
+ return lret;
+}
+EXPORT_SYMBOL(ttm_bo_shrink);
+
+/**
+ * ttm_bo_shrink_suitable() - Whether a bo is suitable for shinking
+ * @ctx: The struct ttm_operation_ctx governing the shrinking.
+ * @bo: The candidate for shrinking.
+ *
+ * Check whether the object, given the information available to TTM,
+ * is suitable for shinking, This function can and should be used
+ * before attempting to shrink an object.
+ *
+ * Return: true if suitable. false if not.
+ */
+bool ttm_bo_shrink_suitable(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
+{
+ return bo->ttm && ttm_tt_is_populated(bo->ttm) && !bo->pin_count &&
+ (!ctx->no_wait_gpu ||
+ dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP));
+}
+EXPORT_SYMBOL(ttm_bo_shrink_suitable);
+
+/**
+ * ttm_bo_shrink_avoid_wait() - Whether to avoid waiting for GPU
+ * during shrinking
+ *
+ * In some situations, like direct reclaim, waiting (in particular gpu waiting)
+ * should be avoided since it may stall a system that could otherwise make progress
+ * shrinking something else less time consuming.
+ *
+ * Return: true if gpu waiting should be avoided, false if not.
+ */
+bool ttm_bo_shrink_avoid_wait(void)
+{
+ return !current_is_kswapd();
+}
+EXPORT_SYMBOL(ttm_bo_shrink_avoid_wait);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 3ecda6db24b8..b47020fca199 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -31,6 +31,8 @@
#define pr_fmt(fmt) "[TTM] " fmt
+#include <linux/export.h>
+
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_tt.h>
@@ -58,13 +60,13 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
return VM_FAULT_RETRY;
- ttm_bo_get(bo);
+ drm_gem_object_get(&bo->base);
mmap_read_unlock(vmf->vma->vm_mm);
(void)dma_resv_wait_timeout(bo->base.resv,
DMA_RESV_USAGE_KERNEL, true,
MAX_SCHEDULE_TIMEOUT);
dma_resv_unlock(bo->base.resv);
- ttm_bo_put(bo);
+ drm_gem_object_put(&bo->base);
return VM_FAULT_RETRY;
}
@@ -130,12 +132,12 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
*/
if (fault_flag_allow_retry_first(vmf->flags)) {
if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
- ttm_bo_get(bo);
+ drm_gem_object_get(&bo->base);
mmap_read_unlock(vmf->vma->vm_mm);
if (!dma_resv_lock_interruptible(bo->base.resv,
NULL))
dma_resv_unlock(bo->base.resv);
- ttm_bo_put(bo);
+ drm_gem_object_put(&bo->base);
}
return VM_FAULT_RETRY;
@@ -218,14 +220,20 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
prot = ttm_io_prot(bo, bo->resource, prot);
if (!bo->resource->bus.is_iomem) {
struct ttm_operation_ctx ctx = {
- .interruptible = false,
+ .interruptible = true,
.no_wait_gpu = false,
- .force_alloc = true
};
ttm = bo->ttm;
- if (ttm_tt_populate(bdev, bo->ttm, &ctx))
- return VM_FAULT_OOM;
+ err = ttm_bo_populate(bo, &ctx);
+ if (err) {
+ if (err == -EINTR || err == -ERESTARTSYS ||
+ err == -EAGAIN)
+ return VM_FAULT_NOPAGE;
+
+ pr_debug("TTM fault hit %pe.\n", ERR_PTR(err));
+ return VM_FAULT_SIGBUS;
+ }
} else {
/* Iomem should not be marked encrypted */
prot = pgprot_decrypted(prot);
@@ -254,7 +262,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
* encryption bits. This is because the exact location of the
* data may not be known at mmap() time and may also change
* at arbitrary times while the data is mmap'ed.
- * See vmf_insert_mixed_prot() for a discussion.
+ * See vmf_insert_pfn_prot() for a discussion.
*/
ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
@@ -346,7 +354,7 @@ void ttm_bo_vm_open(struct vm_area_struct *vma)
WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
- ttm_bo_get(bo);
+ drm_gem_object_get(&bo->base);
}
EXPORT_SYMBOL(ttm_bo_vm_open);
@@ -354,7 +362,7 @@ void ttm_bo_vm_close(struct vm_area_struct *vma)
{
struct ttm_buffer_object *bo = vma->vm_private_data;
- ttm_bo_put(bo);
+ drm_gem_object_put(&bo->base);
vma->vm_private_data = NULL;
}
EXPORT_SYMBOL(ttm_bo_vm_close);
@@ -398,13 +406,25 @@ static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
return len;
}
-int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
- void *buf, int len, int write)
+/**
+ * ttm_bo_access - Helper to access a buffer object
+ *
+ * @bo: ttm buffer object
+ * @offset: access offset into buffer object
+ * @buf: pointer to caller memory to read into or write from
+ * @len: length of access
+ * @write: write access
+ *
+ * Utility function to access a buffer object. Useful when buffer object cannot
+ * be easily mapped (non-contiguous, non-visible, etc...). Should not directly
+ * be exported to user space via a peak / poke interface.
+ *
+ * Returns:
+ * @len if successful, negative error code on failure.
+ */
+int ttm_bo_access(struct ttm_buffer_object *bo, unsigned long offset,
+ void *buf, int len, int write)
{
- struct ttm_buffer_object *bo = vma->vm_private_data;
- unsigned long offset = (addr) - vma->vm_start +
- ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node))
- << PAGE_SHIFT);
int ret;
if (len < 1 || (offset + len) > bo->base.size)
@@ -422,8 +442,8 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
break;
default:
if (bo->bdev->funcs->access_memory)
- ret = bo->bdev->funcs->access_memory(
- bo, offset, buf, len, write);
+ ret = bo->bdev->funcs->access_memory
+ (bo, offset, buf, len, write);
else
ret = -EIO;
}
@@ -432,6 +452,18 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
return ret;
}
+EXPORT_SYMBOL(ttm_bo_access);
+
+int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
+ void *buf, int len, int write)
+{
+ struct ttm_buffer_object *bo = vma->vm_private_data;
+ unsigned long offset = (addr) - vma->vm_start +
+ ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node))
+ << PAGE_SHIFT);
+
+ return ttm_bo_access(bo, offset, buf, len, write);
+}
EXPORT_SYMBOL(ttm_bo_vm_access);
static const struct vm_operations_struct ttm_bo_vm_ops = {
@@ -455,7 +487,7 @@ int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
if (is_cow_mapping(vma->vm_flags))
return -EINVAL;
- ttm_bo_get(bo);
+ drm_gem_object_get(&bo->base);
/*
* Drivers may want to override the vm_ops field. Otherwise we
@@ -471,8 +503,7 @@ int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
vma->vm_private_data = bo;
- vma->vm_flags |= VM_PFNMAP;
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_PFNMAP | VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
return 0;
}
EXPORT_SYMBOL(ttm_bo_mmap_obj);
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index c7a1862f322a..9a51afaf0749 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -27,14 +27,18 @@
#define pr_fmt(fmt) "[TTM DEVICE] " fmt
+#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/mm.h>
+#include <drm/ttm/ttm_allocation.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_tt.h>
#include <drm/ttm/ttm_placement.h>
#include "ttm_module.h"
+#include "ttm_bo_internal.h"
/*
* ttm_global_mutex - protecting the global state
@@ -95,11 +99,17 @@ static int ttm_global_init(void)
ttm_pool_mgr_init(num_pages);
ttm_tt_mgr_init(num_pages, num_dma32);
- glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
+ glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32 |
+ __GFP_NOWARN);
+ /* Retry without GFP_DMA32 for platforms DMA32 is not available */
if (unlikely(glob->dummy_read_page == NULL)) {
- ret = -ENOMEM;
- goto out;
+ glob->dummy_read_page = alloc_page(__GFP_ZERO);
+ if (unlikely(glob->dummy_read_page == NULL)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ pr_warn("Using GFP_DMA32 fallback for dummy_read_page\n");
}
INIT_LIST_HEAD(&glob->device_list);
@@ -116,6 +126,28 @@ out:
return ret;
}
+/**
+ * ttm_device_prepare_hibernation - move GTT BOs to shmem for hibernation.
+ *
+ * @bdev: A pointer to a struct ttm_device to prepare hibernation for.
+ *
+ * Return: 0 on success, negative number on failure.
+ */
+int ttm_device_prepare_hibernation(struct ttm_device *bdev)
+{
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false,
+ };
+ int ret;
+
+ do {
+ ret = ttm_device_swapout(bdev, &ctx, GFP_KERNEL);
+ } while (ret > 0);
+ return ret;
+}
+EXPORT_SYMBOL(ttm_device_prepare_hibernation);
+
/*
* A buffer object shrink method that tries to swap out the first
* buffer object on the global::swap_lru list.
@@ -137,40 +169,24 @@ int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags)
mutex_unlock(&ttm_global_mutex);
return ret;
}
-EXPORT_SYMBOL(ttm_global_swapout);
int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
gfp_t gfp_flags)
{
- struct ttm_resource_cursor cursor;
struct ttm_resource_manager *man;
- struct ttm_resource *res;
unsigned i;
- int ret;
+ s64 lret;
- spin_lock(&bdev->lru_lock);
for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
man = ttm_manager_type(bdev, i);
if (!man || !man->use_tt)
continue;
- ttm_resource_manager_for_each_res(man, &cursor, res) {
- struct ttm_buffer_object *bo = res->bo;
- uint32_t num_pages;
-
- if (!bo)
- continue;
-
- num_pages = PFN_UP(bo->base.size);
- ret = ttm_bo_swapout(bo, ctx, gfp_flags);
- /* ttm_bo_swapout has dropped the lru_lock */
- if (!ret)
- return num_pages;
- if (ret != -EBUSY)
- return ret;
- }
+ lret = ttm_bo_swapout(bdev, ctx, man, gfp_flags, 1);
+ /* Can be both positive (num_pages) and negative (error) */
+ if (lret)
+ return lret;
}
- spin_unlock(&bdev->lru_lock);
return 0;
}
EXPORT_SYMBOL(ttm_device_swapout);
@@ -183,20 +199,19 @@ EXPORT_SYMBOL(ttm_device_swapout);
* @dev: The core kernel device pointer for DMA mappings and allocations.
* @mapping: The address space to use for this bo.
* @vma_manager: A pointer to a vma manager.
- * @use_dma_alloc: If coherent DMA allocation API should be used.
- * @use_dma32: If we should use GFP_DMA32 for device memory allocations.
+ * @alloc_flags: TTM_ALLOCATION_* flags.
*
* Initializes a struct ttm_device:
* Returns:
* !0: Failure.
*/
-int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
+int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *funcs,
struct device *dev, struct address_space *mapping,
struct drm_vma_offset_manager *vma_manager,
- bool use_dma_alloc, bool use_dma32)
+ unsigned int alloc_flags)
{
struct ttm_global *glob = &ttm_glob;
- int ret;
+ int ret, nid;
if (WARN_ON(vma_manager == NULL))
return -EINVAL;
@@ -205,20 +220,28 @@ int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
if (ret)
return ret;
- bdev->wq = alloc_workqueue("ttm", WQ_MEM_RECLAIM | WQ_HIGHPRI, 16);
+ bdev->wq = alloc_workqueue("ttm",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_UNBOUND, 16);
if (!bdev->wq) {
ttm_global_release();
return -ENOMEM;
}
+ bdev->alloc_flags = alloc_flags;
bdev->funcs = funcs;
ttm_sys_man_init(bdev);
- ttm_pool_init(&bdev->pool, dev, use_dma_alloc, use_dma32);
+
+ if (dev)
+ nid = dev_to_node(dev);
+ else
+ nid = NUMA_NO_NODE;
+
+ ttm_pool_init(&bdev->pool, dev, nid, alloc_flags);
bdev->vma_manager = vma_manager;
spin_lock_init(&bdev->lru_lock);
- INIT_LIST_HEAD(&bdev->pinned);
+ INIT_LIST_HEAD(&bdev->unevictable);
bdev->dev_mapping = mapping;
mutex_lock(&ttm_global_mutex);
list_add_tail(&bdev->device_list, &glob->device_list);
@@ -233,10 +256,6 @@ void ttm_device_fini(struct ttm_device *bdev)
struct ttm_resource_manager *man;
unsigned i;
- man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
- ttm_resource_manager_set_used(man, false);
- ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
-
mutex_lock(&ttm_global_mutex);
list_del(&bdev->device_list);
mutex_unlock(&ttm_global_mutex);
@@ -244,6 +263,10 @@ void ttm_device_fini(struct ttm_device *bdev)
drain_workqueue(bdev->wq);
destroy_workqueue(bdev->wq);
+ man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
+ ttm_resource_manager_set_used(man, false);
+ ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
+
spin_lock(&bdev->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
if (list_empty(&man->lru[0]))
@@ -261,14 +284,14 @@ static void ttm_device_clear_lru_dma_mappings(struct ttm_device *bdev,
struct ttm_resource *res;
spin_lock(&bdev->lru_lock);
- while ((res = list_first_entry_or_null(list, typeof(*res), lru))) {
+ while ((res = ttm_lru_first_res_or_null(list))) {
struct ttm_buffer_object *bo = res->bo;
/* Take ref against racing releases once lru_lock is unlocked */
if (!ttm_bo_get_unless_zero(bo))
continue;
- list_del_init(&res->lru);
+ list_del_init(&bo->resource->lru.link);
spin_unlock(&bdev->lru_lock);
if (bo->ttm)
@@ -285,7 +308,7 @@ void ttm_device_clear_dma_mappings(struct ttm_device *bdev)
struct ttm_resource_manager *man;
unsigned int i, j;
- ttm_device_clear_lru_dma_mappings(bdev, &bdev->pinned);
+ ttm_device_clear_lru_dma_mappings(bdev, &bdev->unevictable);
for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
man = ttm_manager_type(bdev, i);
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index f1c60fa80c2d..bc7a83a9fe44 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -26,6 +26,8 @@
*
**************************************************************************/
+#include <linux/export.h>
+
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_bo.h>
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
index b3fffe7b5062..aa137ead5cc5 100644
--- a/drivers/gpu/drm/ttm/ttm_module.c
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -74,7 +74,8 @@ pgprot_t ttm_prot_from_caching(enum ttm_caching caching, pgprot_t tmp)
#endif /* CONFIG_UML */
#endif /* __i386__ || __x86_64__ */
#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
- defined(__powerpc__) || defined(__mips__) || defined(__loongarch__)
+ defined(__powerpc__) || defined(__mips__) || defined(__loongarch__) || \
+ defined(__riscv)
if (caching == ttm_write_combined)
tmp = pgprot_writecombine(tmp);
else
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index aa116a7bbae3..18b6db015619 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -31,6 +31,7 @@
* cause they are rather slow compared to alloc_pages+map.
*/
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/debugfs.h>
@@ -41,11 +42,20 @@
#include <asm/set_memory.h>
#endif
+#include <drm/ttm/ttm_backup.h>
#include <drm/ttm/ttm_pool.h>
#include <drm/ttm/ttm_tt.h>
#include <drm/ttm/ttm_bo.h>
#include "ttm_module.h"
+#include "ttm_pool_internal.h"
+
+#ifdef CONFIG_FAULT_INJECTION
+#include <linux/fault-inject.h>
+static DECLARE_FAULT_ATTR(backup_fault_inject);
+#else
+#define should_fail(...) false
+#endif
/**
* struct ttm_pool_dma - Helper object for coherent DMA mappings
@@ -58,6 +68,52 @@ struct ttm_pool_dma {
unsigned long vaddr;
};
+/**
+ * struct ttm_pool_alloc_state - Current state of the tt page allocation process
+ * @pages: Pointer to the next tt page pointer to populate.
+ * @caching_divide: Pointer to the first page pointer whose page has a staged but
+ * not committed caching transition from write-back to @tt_caching.
+ * @dma_addr: Pointer to the next tt dma_address entry to populate if any.
+ * @remaining_pages: Remaining pages to populate.
+ * @tt_caching: The requested cpu-caching for the pages allocated.
+ */
+struct ttm_pool_alloc_state {
+ struct page **pages;
+ struct page **caching_divide;
+ dma_addr_t *dma_addr;
+ pgoff_t remaining_pages;
+ enum ttm_caching tt_caching;
+};
+
+/**
+ * struct ttm_pool_tt_restore - State representing restore from backup
+ * @pool: The pool used for page allocation while restoring.
+ * @snapshot_alloc: A snapshot of the most recent struct ttm_pool_alloc_state.
+ * @alloced_page: Pointer to the page most recently allocated from a pool or system.
+ * @first_dma: The dma address corresponding to @alloced_page if dma_mapping
+ * is requested.
+ * @alloced_pages: The number of allocated pages present in the struct ttm_tt
+ * page vector from this restore session.
+ * @restored_pages: The number of 4K pages restored for @alloced_page (which
+ * is typically a multi-order page).
+ * @page_caching: The struct ttm_tt requested caching
+ * @order: The order of @alloced_page.
+ *
+ * Recovery from backup might fail when we've recovered less than the
+ * full ttm_tt. In order not to loose any data (yet), keep information
+ * around that allows us to restart a failed ttm backup recovery.
+ */
+struct ttm_pool_tt_restore {
+ struct ttm_pool *pool;
+ struct ttm_pool_alloc_state snapshot_alloc;
+ struct page *alloced_page;
+ dma_addr_t first_dma;
+ pgoff_t alloced_pages;
+ pgoff_t restored_pages;
+ enum ttm_caching page_caching;
+ unsigned int order;
+};
+
static unsigned long page_pool_size;
MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
@@ -65,20 +121,22 @@ module_param(page_pool_size, ulong, 0644);
static atomic_long_t allocated_pages;
-static struct ttm_pool_type global_write_combined[MAX_ORDER];
-static struct ttm_pool_type global_uncached[MAX_ORDER];
+static struct ttm_pool_type global_write_combined[NR_PAGE_ORDERS];
+static struct ttm_pool_type global_uncached[NR_PAGE_ORDERS];
-static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
-static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
+static struct ttm_pool_type global_dma32_write_combined[NR_PAGE_ORDERS];
+static struct ttm_pool_type global_dma32_uncached[NR_PAGE_ORDERS];
static spinlock_t shrinker_lock;
static struct list_head shrinker_list;
-static struct shrinker mm_shrinker;
+static struct shrinker *mm_shrinker;
+static DECLARE_RWSEM(pool_shrink_rwsem);
/* Allocate pages of size 1 << order with the given gfp_flags */
static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
unsigned int order)
{
+ const unsigned int beneficial_order = ttm_pool_beneficial_order(pool);
unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
struct ttm_pool_dma *dma;
struct page *p;
@@ -90,10 +148,17 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
*/
if (order)
gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
- __GFP_KSWAPD_RECLAIM;
+ __GFP_THISNODE;
+
+ /*
+ * Do not add latency to the allocation path for allocations orders
+ * device tolds us do not bring them additional performance gains.
+ */
+ if (beneficial_order && order > beneficial_order)
+ gfp_flags &= ~__GFP_DIRECT_RECLAIM;
- if (!pool->use_dma_alloc) {
- p = alloc_pages(gfp_flags, order);
+ if (!ttm_pool_uses_dma_alloc(pool)) {
+ p = alloc_pages_node(pool->nid, gfp_flags, order);
if (p)
p->private = order;
return p;
@@ -144,7 +209,7 @@ static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
set_pages_wb(p, 1 << order);
#endif
- if (!pool || !pool->use_dma_alloc) {
+ if (!pool || !ttm_pool_uses_dma_alloc(pool)) {
__free_pages(p, order);
return;
}
@@ -159,36 +224,35 @@ static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
kfree(dma);
}
-/* Apply a new caching to an array of pages */
-static int ttm_pool_apply_caching(struct page **first, struct page **last,
- enum ttm_caching caching)
+/* Apply any cpu-caching deferred during page allocation */
+static int ttm_pool_apply_caching(struct ttm_pool_alloc_state *alloc)
{
#ifdef CONFIG_X86
- unsigned int num_pages = last - first;
+ unsigned int num_pages = alloc->pages - alloc->caching_divide;
if (!num_pages)
return 0;
- switch (caching) {
+ switch (alloc->tt_caching) {
case ttm_cached:
break;
case ttm_write_combined:
- return set_pages_array_wc(first, num_pages);
+ return set_pages_array_wc(alloc->caching_divide, num_pages);
case ttm_uncached:
- return set_pages_array_uc(first, num_pages);
+ return set_pages_array_uc(alloc->caching_divide, num_pages);
}
#endif
+ alloc->caching_divide = alloc->pages;
return 0;
}
-/* Map pages of 1 << order size and fill the DMA address array */
+/* DMA Map pages of 1 << order size and return the resulting dma_address. */
static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
- struct page *p, dma_addr_t **dma_addr)
+ struct page *p, dma_addr_t *dma_addr)
{
dma_addr_t addr;
- unsigned int i;
- if (pool->use_dma_alloc) {
+ if (ttm_pool_uses_dma_alloc(pool)) {
struct ttm_pool_dma *dma = (void *)p->private;
addr = dma->addr;
@@ -200,10 +264,7 @@ static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
return -EFAULT;
}
- for (i = 1 << order; i ; --i) {
- *(*dma_addr)++ = addr;
- addr += PAGE_SIZE;
- }
+ *dma_addr = addr;
return 0;
}
@@ -213,7 +274,7 @@ static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
unsigned int num_pages)
{
/* Unmapped while freeing the page */
- if (pool->use_dma_alloc)
+ if (ttm_pool_uses_dma_alloc(pool))
return;
dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT,
@@ -287,18 +348,24 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
enum ttm_caching caching,
unsigned int order)
{
- if (pool->use_dma_alloc)
+ if (ttm_pool_uses_dma_alloc(pool))
return &pool->caching[caching].orders[order];
#ifdef CONFIG_X86
switch (caching) {
case ttm_write_combined:
- if (pool->use_dma32)
+ if (pool->nid != NUMA_NO_NODE)
+ return &pool->caching[caching].orders[order];
+
+ if (ttm_pool_uses_dma32(pool))
return &global_dma32_write_combined[order];
return &global_write_combined[order];
case ttm_uncached:
- if (pool->use_dma32)
+ if (pool->nid != NUMA_NO_NODE)
+ return &pool->caching[caching].orders[order];
+
+ if (ttm_pool_uses_dma32(pool))
return &global_dma32_uncached[order];
return &global_uncached[order];
@@ -317,6 +384,7 @@ static unsigned int ttm_pool_shrink(void)
unsigned int num_pages;
struct page *p;
+ down_read(&pool_shrink_rwsem);
spin_lock(&shrinker_lock);
pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
list_move_tail(&pt->shrinker_list, &shrinker_list);
@@ -329,6 +397,7 @@ static unsigned int ttm_pool_shrink(void)
} else {
num_pages = 0;
}
+ up_read(&pool_shrink_rwsem);
return num_pages;
}
@@ -336,7 +405,7 @@ static unsigned int ttm_pool_shrink(void)
/* Return the allocation order based for a page */
static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
{
- if (pool->use_dma_alloc) {
+ if (ttm_pool_uses_dma_alloc(pool)) {
struct ttm_pool_dma *dma = (void *)p->private;
return dma->vaddr & ~PAGE_MASK;
@@ -345,54 +414,313 @@ static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
return p->private;
}
-/* Called when we got a page, either from a pool or newly allocated */
+/*
+ * Split larger pages so that we can free each PAGE_SIZE page as soon
+ * as it has been backed up, in order to avoid memory pressure during
+ * reclaim.
+ */
+static void ttm_pool_split_for_swap(struct ttm_pool *pool, struct page *p)
+{
+ unsigned int order = ttm_pool_page_order(pool, p);
+ pgoff_t nr;
+
+ if (!order)
+ return;
+
+ split_page(p, order);
+ nr = 1UL << order;
+ while (nr--)
+ (p++)->private = 0;
+}
+
+/**
+ * DOC: Partial backup and restoration of a struct ttm_tt.
+ *
+ * Swapout using ttm_backup_backup_page() and swapin using
+ * ttm_backup_copy_page() may fail.
+ * The former most likely due to lack of swap-space or memory, the latter due
+ * to lack of memory or because of signal interruption during waits.
+ *
+ * Backup failure is easily handled by using a ttm_tt pages vector that holds
+ * both backup handles and page pointers. This has to be taken into account when
+ * restoring such a ttm_tt from backup, and when freeing it while backed up.
+ * When restoring, for simplicity, new pages are actually allocated from the
+ * pool and the contents of any old pages are copied in and then the old pages
+ * are released.
+ *
+ * For restoration failures, the struct ttm_pool_tt_restore holds sufficient state
+ * to be able to resume an interrupted restore, and that structure is freed once
+ * the restoration is complete. If the struct ttm_tt is destroyed while there
+ * is a valid struct ttm_pool_tt_restore attached, that is also properly taken
+ * care of.
+ */
+
+/* Is restore ongoing for the currently allocated page? */
+static bool ttm_pool_restore_valid(const struct ttm_pool_tt_restore *restore)
+{
+ return restore && restore->restored_pages < (1 << restore->order);
+}
+
+/* DMA unmap and free a multi-order page, either to the relevant pool or to system. */
+static pgoff_t ttm_pool_unmap_and_free(struct ttm_pool *pool, struct page *page,
+ const dma_addr_t *dma_addr, enum ttm_caching caching)
+{
+ struct ttm_pool_type *pt = NULL;
+ unsigned int order;
+ pgoff_t nr;
+
+ if (pool) {
+ order = ttm_pool_page_order(pool, page);
+ nr = (1UL << order);
+ if (dma_addr)
+ ttm_pool_unmap(pool, *dma_addr, nr);
+
+ pt = ttm_pool_select_type(pool, caching, order);
+ } else {
+ order = page->private;
+ nr = (1UL << order);
+ }
+
+ if (pt)
+ ttm_pool_type_give(pt, page);
+ else
+ ttm_pool_free_page(pool, caching, order, page);
+
+ return nr;
+}
+
+/* Populate the page-array using the most recent allocated multi-order page. */
+static void ttm_pool_allocated_page_commit(struct page *allocated,
+ dma_addr_t first_dma,
+ struct ttm_pool_alloc_state *alloc,
+ pgoff_t nr)
+{
+ pgoff_t i;
+
+ for (i = 0; i < nr; ++i)
+ *alloc->pages++ = allocated++;
+
+ alloc->remaining_pages -= nr;
+
+ if (!alloc->dma_addr)
+ return;
+
+ for (i = 0; i < nr; ++i) {
+ *alloc->dma_addr++ = first_dma;
+ first_dma += PAGE_SIZE;
+ }
+}
+
+/*
+ * When restoring, restore backed-up content to the newly allocated page and
+ * if successful, populate the page-table and dma-address arrays.
+ */
+static int ttm_pool_restore_commit(struct ttm_pool_tt_restore *restore,
+ struct file *backup,
+ const struct ttm_operation_ctx *ctx,
+ struct ttm_pool_alloc_state *alloc)
+
+{
+ pgoff_t i, nr = 1UL << restore->order;
+ struct page **first_page = alloc->pages;
+ struct page *p;
+ int ret = 0;
+
+ for (i = restore->restored_pages; i < nr; ++i) {
+ p = first_page[i];
+ if (ttm_backup_page_ptr_is_handle(p)) {
+ unsigned long handle = ttm_backup_page_ptr_to_handle(p);
+
+ if (IS_ENABLED(CONFIG_FAULT_INJECTION) && ctx->interruptible &&
+ should_fail(&backup_fault_inject, 1)) {
+ ret = -EINTR;
+ break;
+ }
+
+ if (handle == 0) {
+ restore->restored_pages++;
+ continue;
+ }
+
+ ret = ttm_backup_copy_page(backup, restore->alloced_page + i,
+ handle, ctx->interruptible);
+ if (ret)
+ break;
+
+ ttm_backup_drop(backup, handle);
+ } else if (p) {
+ /*
+ * We could probably avoid splitting the old page
+ * using clever logic, but ATM we don't care, as
+ * we prioritize releasing memory ASAP. Note that
+ * here, the old retained page is always write-back
+ * cached.
+ */
+ ttm_pool_split_for_swap(restore->pool, p);
+ copy_highpage(restore->alloced_page + i, p);
+ __free_pages(p, 0);
+ }
+
+ restore->restored_pages++;
+ first_page[i] = ttm_backup_handle_to_page_ptr(0);
+ }
+
+ if (ret) {
+ if (!restore->restored_pages) {
+ dma_addr_t *dma_addr = alloc->dma_addr ? &restore->first_dma : NULL;
+
+ ttm_pool_unmap_and_free(restore->pool, restore->alloced_page,
+ dma_addr, restore->page_caching);
+ restore->restored_pages = nr;
+ }
+ return ret;
+ }
+
+ ttm_pool_allocated_page_commit(restore->alloced_page, restore->first_dma,
+ alloc, nr);
+ if (restore->page_caching == alloc->tt_caching || PageHighMem(restore->alloced_page))
+ alloc->caching_divide = alloc->pages;
+ restore->snapshot_alloc = *alloc;
+ restore->alloced_pages += nr;
+
+ return 0;
+}
+
+/* If restoring, save information needed for ttm_pool_restore_commit(). */
+static void
+ttm_pool_page_allocated_restore(struct ttm_pool *pool, unsigned int order,
+ struct page *p,
+ enum ttm_caching page_caching,
+ dma_addr_t first_dma,
+ struct ttm_pool_tt_restore *restore,
+ const struct ttm_pool_alloc_state *alloc)
+{
+ restore->pool = pool;
+ restore->order = order;
+ restore->restored_pages = 0;
+ restore->page_caching = page_caching;
+ restore->first_dma = first_dma;
+ restore->alloced_page = p;
+ restore->snapshot_alloc = *alloc;
+}
+
+/*
+ * Called when we got a page, either from a pool or newly allocated.
+ * if needed, dma map the page and populate the dma address array.
+ * Populate the page address array.
+ * If the caching is consistent, update any deferred caching. Otherwise
+ * stage this page for an upcoming deferred caching update.
+ */
static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
- struct page *p, dma_addr_t **dma_addr,
- unsigned long *num_pages,
- struct page ***pages)
+ struct page *p, enum ttm_caching page_caching,
+ struct ttm_pool_alloc_state *alloc,
+ struct ttm_pool_tt_restore *restore)
{
- unsigned int i;
- int r;
+ bool caching_consistent;
+ dma_addr_t first_dma;
+ int r = 0;
+
+ caching_consistent = (page_caching == alloc->tt_caching) || PageHighMem(p);
+
+ if (caching_consistent) {
+ r = ttm_pool_apply_caching(alloc);
+ if (r)
+ return r;
+ }
- if (*dma_addr) {
- r = ttm_pool_map(pool, order, p, dma_addr);
+ if (alloc->dma_addr) {
+ r = ttm_pool_map(pool, order, p, &first_dma);
if (r)
return r;
}
- *num_pages -= 1 << order;
- for (i = 1 << order; i; --i, ++(*pages), ++p)
- **pages = p;
+ if (restore) {
+ ttm_pool_page_allocated_restore(pool, order, p, page_caching,
+ first_dma, restore, alloc);
+ } else {
+ ttm_pool_allocated_page_commit(p, first_dma, alloc, 1UL << order);
+
+ if (caching_consistent)
+ alloc->caching_divide = alloc->pages;
+ }
return 0;
}
/**
- * ttm_pool_alloc - Fill a ttm_tt object
- *
- * @pool: ttm_pool to use
- * @tt: ttm_tt object to fill
- * @ctx: operation context
- *
- * Fill the ttm_tt object with pages and also make sure to DMA map them when
- * necessary.
+ * ttm_pool_free_range() - Free a range of TTM pages
+ * @pool: The pool used for allocating.
+ * @tt: The struct ttm_tt holding the page pointers.
+ * @caching: The page caching mode used by the range.
+ * @start_page: index for first page to free.
+ * @end_page: index for last page to free + 1.
*
- * Returns: 0 on successe, negative error code otherwise.
+ * During allocation the ttm_tt page-vector may be populated with ranges of
+ * pages with different attributes if allocation hit an error without being
+ * able to completely fulfill the allocation. This function can be used
+ * to free these individual ranges.
*/
-int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
- struct ttm_operation_ctx *ctx)
+static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
+ enum ttm_caching caching,
+ pgoff_t start_page, pgoff_t end_page)
+{
+ struct page **pages = &tt->pages[start_page];
+ struct file *backup = tt->backup;
+ pgoff_t i, nr;
+
+ for (i = start_page; i < end_page; i += nr, pages += nr) {
+ struct page *p = *pages;
+
+ nr = 1;
+ if (ttm_backup_page_ptr_is_handle(p)) {
+ unsigned long handle = ttm_backup_page_ptr_to_handle(p);
+
+ if (handle != 0)
+ ttm_backup_drop(backup, handle);
+ } else if (p) {
+ dma_addr_t *dma_addr = tt->dma_address ?
+ tt->dma_address + i : NULL;
+
+ nr = ttm_pool_unmap_and_free(pool, p, dma_addr, caching);
+ }
+ }
+}
+
+static void ttm_pool_alloc_state_init(const struct ttm_tt *tt,
+ struct ttm_pool_alloc_state *alloc)
+{
+ alloc->pages = tt->pages;
+ alloc->caching_divide = tt->pages;
+ alloc->dma_addr = tt->dma_address;
+ alloc->remaining_pages = tt->num_pages;
+ alloc->tt_caching = tt->caching;
+}
+
+/*
+ * Find a suitable allocation order based on highest desired order
+ * and number of remaining pages
+ */
+static unsigned int ttm_pool_alloc_find_order(unsigned int highest,
+ const struct ttm_pool_alloc_state *alloc)
+{
+ return min_t(unsigned int, highest, __fls(alloc->remaining_pages));
+}
+
+static int __ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+ const struct ttm_operation_ctx *ctx,
+ struct ttm_pool_alloc_state *alloc,
+ struct ttm_pool_tt_restore *restore)
{
- unsigned long num_pages = tt->num_pages;
- dma_addr_t *dma_addr = tt->dma_address;
- struct page **caching = tt->pages;
- struct page **pages = tt->pages;
+ enum ttm_caching page_caching;
gfp_t gfp_flags = GFP_USER;
- unsigned int i, order;
+ pgoff_t caching_divide;
+ unsigned int order;
+ bool allow_pools;
struct page *p;
int r;
- WARN_ON(!num_pages || ttm_tt_is_populated(tt));
- WARN_ON(dma_addr && !pool->dev);
+ WARN_ON(!alloc->remaining_pages || ttm_tt_is_populated(tt));
+ WARN_ON(alloc->dma_addr && !pool->dev);
if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC)
gfp_flags |= __GFP_ZERO;
@@ -400,89 +728,160 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
if (ctx->gfp_retry_mayfail)
gfp_flags |= __GFP_RETRY_MAYFAIL;
- if (pool->use_dma32)
+ if (ttm_pool_uses_dma32(pool))
gfp_flags |= GFP_DMA32;
else
gfp_flags |= GFP_HIGHUSER;
- for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages));
- num_pages;
- order = min_t(unsigned int, order, __fls(num_pages))) {
+ page_caching = tt->caching;
+ allow_pools = true;
+ for (order = ttm_pool_alloc_find_order(MAX_PAGE_ORDER, alloc);
+ alloc->remaining_pages;
+ order = ttm_pool_alloc_find_order(order, alloc)) {
struct ttm_pool_type *pt;
- pt = ttm_pool_select_type(pool, tt->caching, order);
- p = pt ? ttm_pool_type_take(pt) : NULL;
- if (p) {
- r = ttm_pool_apply_caching(caching, pages,
- tt->caching);
- if (r)
- goto error_free_page;
-
- do {
- r = ttm_pool_page_allocated(pool, order, p,
- &dma_addr,
- &num_pages,
- &pages);
- if (r)
- goto error_free_page;
-
- if (num_pages < (1 << order))
- break;
-
- p = ttm_pool_type_take(pt);
- } while (p);
- caching = pages;
- }
-
- while (num_pages >= (1 << order) &&
- (p = ttm_pool_alloc_page(pool, gfp_flags, order))) {
-
- if (PageHighMem(p)) {
- r = ttm_pool_apply_caching(caching, pages,
- tt->caching);
- if (r)
- goto error_free_page;
- }
- r = ttm_pool_page_allocated(pool, order, p, &dma_addr,
- &num_pages, &pages);
- if (r)
- goto error_free_page;
- if (PageHighMem(p))
- caching = pages;
+ /* First, try to allocate a page from a pool if one exists. */
+ p = NULL;
+ pt = ttm_pool_select_type(pool, page_caching, order);
+ if (pt && allow_pools)
+ p = ttm_pool_type_take(pt);
+ /*
+ * If that fails or previously failed, allocate from system.
+ * Note that this also disallows additional pool allocations using
+ * write-back cached pools of the same order. Consider removing
+ * that behaviour.
+ */
+ if (!p) {
+ page_caching = ttm_cached;
+ allow_pools = false;
+ p = ttm_pool_alloc_page(pool, gfp_flags, order);
}
-
+ /* If that fails, lower the order if possible and retry. */
if (!p) {
if (order) {
--order;
+ page_caching = tt->caching;
+ allow_pools = true;
continue;
}
r = -ENOMEM;
goto error_free_all;
}
+ r = ttm_pool_page_allocated(pool, order, p, page_caching, alloc,
+ restore);
+ if (r)
+ goto error_free_page;
+
+ if (ttm_pool_restore_valid(restore)) {
+ r = ttm_pool_restore_commit(restore, tt->backup, ctx, alloc);
+ if (r)
+ goto error_free_all;
+ }
}
- r = ttm_pool_apply_caching(caching, pages, tt->caching);
+ r = ttm_pool_apply_caching(alloc);
if (r)
goto error_free_all;
+ kfree(tt->restore);
+ tt->restore = NULL;
+
return 0;
error_free_page:
- ttm_pool_free_page(pool, tt->caching, order, p);
+ ttm_pool_free_page(pool, page_caching, order, p);
error_free_all:
- num_pages = tt->num_pages - num_pages;
- for (i = 0; i < num_pages; ) {
- order = ttm_pool_page_order(pool, tt->pages[i]);
- ttm_pool_free_page(pool, tt->caching, order, tt->pages[i]);
- i += 1 << order;
- }
+ if (tt->restore)
+ return r;
+
+ caching_divide = alloc->caching_divide - tt->pages;
+ ttm_pool_free_range(pool, tt, tt->caching, 0, caching_divide);
+ ttm_pool_free_range(pool, tt, ttm_cached, caching_divide,
+ tt->num_pages - alloc->remaining_pages);
return r;
}
+
+/**
+ * ttm_pool_alloc - Fill a ttm_tt object
+ *
+ * @pool: ttm_pool to use
+ * @tt: ttm_tt object to fill
+ * @ctx: operation context
+ *
+ * Fill the ttm_tt object with pages and also make sure to DMA map them when
+ * necessary.
+ *
+ * Returns: 0 on successe, negative error code otherwise.
+ */
+int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+ struct ttm_operation_ctx *ctx)
+{
+ struct ttm_pool_alloc_state alloc;
+
+ if (WARN_ON(ttm_tt_is_backed_up(tt)))
+ return -EINVAL;
+
+ ttm_pool_alloc_state_init(tt, &alloc);
+
+ return __ttm_pool_alloc(pool, tt, ctx, &alloc, NULL);
+}
EXPORT_SYMBOL(ttm_pool_alloc);
/**
+ * ttm_pool_restore_and_alloc - Fill a ttm_tt, restoring previously backed-up
+ * content.
+ *
+ * @pool: ttm_pool to use
+ * @tt: ttm_tt object to fill
+ * @ctx: operation context
+ *
+ * Fill the ttm_tt object with pages and also make sure to DMA map them when
+ * necessary. Read in backed-up content.
+ *
+ * Returns: 0 on successe, negative error code otherwise.
+ */
+int ttm_pool_restore_and_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+ const struct ttm_operation_ctx *ctx)
+{
+ struct ttm_pool_alloc_state alloc;
+
+ if (WARN_ON(!ttm_tt_is_backed_up(tt)))
+ return -EINVAL;
+
+ if (!tt->restore) {
+ gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
+
+ ttm_pool_alloc_state_init(tt, &alloc);
+ if (ctx->gfp_retry_mayfail)
+ gfp |= __GFP_RETRY_MAYFAIL;
+
+ tt->restore = kzalloc(sizeof(*tt->restore), gfp);
+ if (!tt->restore)
+ return -ENOMEM;
+
+ tt->restore->snapshot_alloc = alloc;
+ tt->restore->pool = pool;
+ tt->restore->restored_pages = 1;
+ } else {
+ struct ttm_pool_tt_restore *restore = tt->restore;
+ int ret;
+
+ alloc = restore->snapshot_alloc;
+ if (ttm_pool_restore_valid(tt->restore)) {
+ ret = ttm_pool_restore_commit(restore, tt->backup, ctx, &alloc);
+ if (ret)
+ return ret;
+ }
+ if (!alloc.remaining_pages)
+ return 0;
+ }
+
+ return __ttm_pool_alloc(pool, tt, ctx, &alloc, tt->restore);
+}
+
+/**
* ttm_pool_free - Free the backing pages from a ttm_tt object
*
* @pool: Pool to give pages back to.
@@ -492,61 +891,223 @@ EXPORT_SYMBOL(ttm_pool_alloc);
*/
void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
{
- unsigned int i;
+ ttm_pool_free_range(pool, tt, tt->caching, 0, tt->num_pages);
- for (i = 0; i < tt->num_pages; ) {
- struct page *p = tt->pages[i];
- unsigned int order, num_pages;
- struct ttm_pool_type *pt;
+ while (atomic_long_read(&allocated_pages) > page_pool_size)
+ ttm_pool_shrink();
+}
+EXPORT_SYMBOL(ttm_pool_free);
- order = ttm_pool_page_order(pool, p);
- num_pages = 1ULL << order;
- if (tt->dma_address)
- ttm_pool_unmap(pool, tt->dma_address[i], num_pages);
+/**
+ * ttm_pool_drop_backed_up() - Release content of a swapped-out struct ttm_tt
+ * @tt: The struct ttm_tt.
+ *
+ * Release handles with associated content or any remaining pages of
+ * a backed-up struct ttm_tt.
+ */
+void ttm_pool_drop_backed_up(struct ttm_tt *tt)
+{
+ struct ttm_pool_tt_restore *restore;
+ pgoff_t start_page = 0;
- pt = ttm_pool_select_type(pool, tt->caching, order);
- if (pt)
- ttm_pool_type_give(pt, tt->pages[i]);
- else
- ttm_pool_free_page(pool, tt->caching, order,
- tt->pages[i]);
+ WARN_ON(!ttm_tt_is_backed_up(tt));
+
+ restore = tt->restore;
+
+ /*
+ * Unmap and free any uncommitted restore page.
+ * any tt page-array backup entries already read back has
+ * been cleared already
+ */
+ if (ttm_pool_restore_valid(restore)) {
+ dma_addr_t *dma_addr = tt->dma_address ? &restore->first_dma : NULL;
- i += num_pages;
+ ttm_pool_unmap_and_free(restore->pool, restore->alloced_page,
+ dma_addr, restore->page_caching);
+ restore->restored_pages = 1UL << restore->order;
}
- while (atomic_long_read(&allocated_pages) > page_pool_size)
- ttm_pool_shrink();
+ /*
+ * If a restore is ongoing, part of the tt pages may have a
+ * caching different than writeback.
+ */
+ if (restore) {
+ pgoff_t mid = restore->snapshot_alloc.caching_divide - tt->pages;
+
+ start_page = restore->alloced_pages;
+ WARN_ON(mid > start_page);
+ /* Pages that might be dma-mapped and non-cached */
+ ttm_pool_free_range(restore->pool, tt, tt->caching,
+ 0, mid);
+ /* Pages that might be dma-mapped but cached */
+ ttm_pool_free_range(restore->pool, tt, ttm_cached,
+ mid, restore->alloced_pages);
+ kfree(restore);
+ tt->restore = NULL;
+ }
+
+ ttm_pool_free_range(NULL, tt, ttm_cached, start_page, tt->num_pages);
+}
+
+/**
+ * ttm_pool_backup() - Back up or purge a struct ttm_tt
+ * @pool: The pool used when allocating the struct ttm_tt.
+ * @tt: The struct ttm_tt.
+ * @flags: Flags to govern the backup behaviour.
+ *
+ * Back up or purge a struct ttm_tt. If @purge is true, then
+ * all pages will be freed directly to the system rather than to the pool
+ * they were allocated from, making the function behave similarly to
+ * ttm_pool_free(). If @purge is false the pages will be backed up instead,
+ * exchanged for handles.
+ * A subsequent call to ttm_pool_restore_and_alloc() will then read back the content and
+ * a subsequent call to ttm_pool_drop_backed_up() will drop it.
+ * If backup of a page fails for whatever reason, @ttm will still be
+ * partially backed up, retaining those pages for which backup fails.
+ * In that case, this function can be retried, possibly after freeing up
+ * memory resources.
+ *
+ * Return: Number of pages actually backed up or freed, or negative
+ * error code on error.
+ */
+long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *tt,
+ const struct ttm_backup_flags *flags)
+{
+ struct file *backup = tt->backup;
+ struct page *page;
+ unsigned long handle;
+ gfp_t alloc_gfp;
+ gfp_t gfp;
+ int ret = 0;
+ pgoff_t shrunken = 0;
+ pgoff_t i, num_pages;
+
+ if (WARN_ON(ttm_tt_is_backed_up(tt)))
+ return -EINVAL;
+
+ if ((!ttm_backup_bytes_avail() && !flags->purge) ||
+ ttm_pool_uses_dma_alloc(pool) || ttm_tt_is_backed_up(tt))
+ return -EBUSY;
+
+#ifdef CONFIG_X86
+ /* Anything returned to the system needs to be cached. */
+ if (tt->caching != ttm_cached)
+ set_pages_array_wb(tt->pages, tt->num_pages);
+#endif
+
+ if (tt->dma_address || flags->purge) {
+ for (i = 0; i < tt->num_pages; i += num_pages) {
+ unsigned int order;
+
+ page = tt->pages[i];
+ if (unlikely(!page)) {
+ num_pages = 1;
+ continue;
+ }
+
+ order = ttm_pool_page_order(pool, page);
+ num_pages = 1UL << order;
+ if (tt->dma_address)
+ ttm_pool_unmap(pool, tt->dma_address[i],
+ num_pages);
+ if (flags->purge) {
+ shrunken += num_pages;
+ page->private = 0;
+ __free_pages(page, order);
+ memset(tt->pages + i, 0,
+ num_pages * sizeof(*tt->pages));
+ }
+ }
+ }
+
+ if (flags->purge)
+ return shrunken;
+
+ if (ttm_pool_uses_dma32(pool))
+ gfp = GFP_DMA32;
+ else
+ gfp = GFP_HIGHUSER;
+
+ alloc_gfp = GFP_KERNEL | __GFP_HIGH | __GFP_NOWARN | __GFP_RETRY_MAYFAIL;
+
+ num_pages = tt->num_pages;
+
+ /* Pretend doing fault injection by shrinking only half of the pages. */
+ if (IS_ENABLED(CONFIG_FAULT_INJECTION) && should_fail(&backup_fault_inject, 1))
+ num_pages = DIV_ROUND_UP(num_pages, 2);
+
+ for (i = 0; i < num_pages; ++i) {
+ s64 shandle;
+
+ page = tt->pages[i];
+ if (unlikely(!page))
+ continue;
+
+ ttm_pool_split_for_swap(pool, page);
+
+ shandle = ttm_backup_backup_page(backup, page, flags->writeback, i,
+ gfp, alloc_gfp);
+ if (shandle < 0) {
+ /* We allow partially shrunken tts */
+ ret = shandle;
+ break;
+ }
+ handle = shandle;
+ tt->pages[i] = ttm_backup_handle_to_page_ptr(handle);
+ put_page(page);
+ shrunken++;
+ }
+
+ return shrunken ? shrunken : ret;
}
-EXPORT_SYMBOL(ttm_pool_free);
/**
* ttm_pool_init - Initialize a pool
*
* @pool: the pool to initialize
* @dev: device for DMA allocations and mappings
- * @use_dma_alloc: true if coherent DMA alloc should be used
- * @use_dma32: true if GFP_DMA32 should be used
+ * @nid: NUMA node to use for allocations
+ * @alloc_flags: TTM_ALLOCATION_POOL_* flags
*
* Initialize the pool and its pool types.
*/
void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
- bool use_dma_alloc, bool use_dma32)
+ int nid, unsigned int alloc_flags)
{
unsigned int i, j;
- WARN_ON(!dev && use_dma_alloc);
+ WARN_ON(!dev && ttm_pool_uses_dma_alloc(pool));
pool->dev = dev;
- pool->use_dma_alloc = use_dma_alloc;
- pool->use_dma32 = use_dma32;
-
- if (use_dma_alloc) {
- for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
- for (j = 0; j < MAX_ORDER; ++j)
- ttm_pool_type_init(&pool->caching[i].orders[j],
- pool, i, j);
+ pool->nid = nid;
+ pool->alloc_flags = alloc_flags;
+
+ for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
+ for (j = 0; j < NR_PAGE_ORDERS; ++j) {
+ struct ttm_pool_type *pt;
+
+ /* Initialize only pool types which are actually used */
+ pt = ttm_pool_select_type(pool, i, j);
+ if (pt != &pool->caching[i].orders[j])
+ continue;
+
+ ttm_pool_type_init(pt, pool, i, j);
+ }
}
}
+EXPORT_SYMBOL(ttm_pool_init);
+
+/**
+ * ttm_pool_synchronize_shrinkers - Wait for all running shrinkers to complete.
+ *
+ * This is useful to guarantee that all shrinker invocations have seen an
+ * update, before freeing memory, similar to rcu.
+ */
+static void ttm_pool_synchronize_shrinkers(void)
+{
+ down_write(&pool_shrink_rwsem);
+ up_write(&pool_shrink_rwsem);
+}
/**
* ttm_pool_fini - Cleanup a pool
@@ -560,19 +1121,28 @@ void ttm_pool_fini(struct ttm_pool *pool)
{
unsigned int i, j;
- if (pool->use_dma_alloc) {
- for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
- for (j = 0; j < MAX_ORDER; ++j)
- ttm_pool_type_fini(&pool->caching[i].orders[j]);
+ for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
+ for (j = 0; j < NR_PAGE_ORDERS; ++j) {
+ struct ttm_pool_type *pt;
+
+ pt = ttm_pool_select_type(pool, i, j);
+ if (pt != &pool->caching[i].orders[j])
+ continue;
+
+ ttm_pool_type_fini(pt);
+ }
}
/* We removed the pool types from the LRU, but we need to also make sure
* that no shrinker is concurrently freeing pages from the pool.
*/
- synchronize_shrinkers();
+ ttm_pool_synchronize_shrinkers();
}
+EXPORT_SYMBOL(ttm_pool_fini);
+
+/* Free average pool number of pages. */
+#define TTM_SHRINKER_BATCH ((1 << (MAX_PAGE_ORDER / 2)) * NR_PAGE_ORDERS)
-/* As long as pages are available make sure to release at least one */
static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
@@ -580,9 +1150,12 @@ static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
do
num_freed += ttm_pool_shrink();
- while (!num_freed && atomic_long_read(&allocated_pages));
+ while (num_freed < sc->nr_to_scan &&
+ atomic_long_read(&allocated_pages));
+
+ sc->nr_scanned = num_freed;
- return num_freed;
+ return num_freed ?: SHRINK_STOP;
}
/* Return the number of pages available or SHRINK_EMPTY if we have none */
@@ -616,7 +1189,7 @@ static void ttm_pool_debugfs_header(struct seq_file *m)
unsigned int i;
seq_puts(m, "\t ");
- for (i = 0; i < MAX_ORDER; ++i)
+ for (i = 0; i < NR_PAGE_ORDERS; ++i)
seq_printf(m, " ---%2u---", i);
seq_puts(m, "\n");
}
@@ -627,7 +1200,7 @@ static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
{
unsigned int i;
- for (i = 0; i < MAX_ORDER; ++i)
+ for (i = 0; i < NR_PAGE_ORDERS; ++i)
seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
seq_puts(m, "\n");
}
@@ -673,7 +1246,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
{
unsigned int i;
- if (!pool->use_dma_alloc) {
+ if (!ttm_pool_uses_dma_alloc(pool) && pool->nid == NUMA_NO_NODE) {
seq_puts(m, "unused\n");
return 0;
}
@@ -682,7 +1255,12 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
spin_lock(&shrinker_lock);
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
- seq_puts(m, "DMA ");
+ if (!ttm_pool_select_type(pool, i, 0))
+ continue;
+ if (ttm_pool_uses_dma_alloc(pool))
+ seq_puts(m, "DMA ");
+ else
+ seq_printf(m, "N%d ", pool->nid);
switch (i) {
case ttm_cached:
seq_puts(m, "\t:");
@@ -706,11 +1284,16 @@ EXPORT_SYMBOL(ttm_pool_debugfs);
/* Test the shrinker functions and dump the result */
static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data)
{
- struct shrink_control sc = { .gfp_mask = GFP_NOFS };
+ struct shrink_control sc = {
+ .gfp_mask = GFP_NOFS,
+ .nr_to_scan = TTM_SHRINKER_BATCH,
+ };
+ unsigned long count;
fs_reclaim_acquire(GFP_KERNEL);
- seq_printf(m, "%lu/%lu\n", ttm_pool_shrinker_count(&mm_shrinker, &sc),
- ttm_pool_shrinker_scan(&mm_shrinker, &sc));
+ count = ttm_pool_shrinker_count(mm_shrinker, &sc);
+ seq_printf(m, "%lu/%lu\n", count,
+ ttm_pool_shrinker_scan(mm_shrinker, &sc));
fs_reclaim_release(GFP_KERNEL);
return 0;
@@ -736,7 +1319,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
spin_lock_init(&shrinker_lock);
INIT_LIST_HEAD(&shrinker_list);
- for (i = 0; i < MAX_ORDER; ++i) {
+ for (i = 0; i < NR_PAGE_ORDERS; ++i) {
ttm_pool_type_init(&global_write_combined[i], NULL,
ttm_write_combined, i);
ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
@@ -752,12 +1335,24 @@ int ttm_pool_mgr_init(unsigned long num_pages)
&ttm_pool_debugfs_globals_fops);
debugfs_create_file("page_pool_shrink", 0400, ttm_debugfs_root, NULL,
&ttm_pool_debugfs_shrink_fops);
+#ifdef CONFIG_FAULT_INJECTION
+ fault_create_debugfs_attr("backup_fault_inject", ttm_debugfs_root,
+ &backup_fault_inject);
+#endif
#endif
- mm_shrinker.count_objects = ttm_pool_shrinker_count;
- mm_shrinker.scan_objects = ttm_pool_shrinker_scan;
- mm_shrinker.seeks = 1;
- return register_shrinker(&mm_shrinker, "drm-ttm_pool");
+ mm_shrinker = shrinker_alloc(0, "drm-ttm_pool");
+ if (!mm_shrinker)
+ return -ENOMEM;
+
+ mm_shrinker->count_objects = ttm_pool_shrinker_count;
+ mm_shrinker->scan_objects = ttm_pool_shrinker_scan;
+ mm_shrinker->batch = TTM_SHRINKER_BATCH;
+ mm_shrinker->seeks = 1;
+
+ shrinker_register(mm_shrinker);
+
+ return 0;
}
/**
@@ -769,7 +1364,7 @@ void ttm_pool_mgr_fini(void)
{
unsigned int i;
- for (i = 0; i < MAX_ORDER; ++i) {
+ for (i = 0; i < NR_PAGE_ORDERS; ++i) {
ttm_pool_type_fini(&global_write_combined[i]);
ttm_pool_type_fini(&global_uncached[i]);
@@ -777,6 +1372,6 @@ void ttm_pool_mgr_fini(void)
ttm_pool_type_fini(&global_dma32_uncached[i]);
}
- unregister_shrinker(&mm_shrinker);
+ shrinker_free(mm_shrinker);
WARN_ON(!list_empty(&shrinker_list));
}
diff --git a/drivers/gpu/drm/ttm/ttm_pool_internal.h b/drivers/gpu/drm/ttm/ttm_pool_internal.h
new file mode 100644
index 000000000000..82c4b7e56a99
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_pool_internal.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2025 Valve Corporation */
+
+#ifndef _TTM_POOL_INTERNAL_H_
+#define _TTM_POOL_INTERNAL_H_
+
+#include <drm/ttm/ttm_allocation.h>
+#include <drm/ttm/ttm_pool.h>
+
+static inline bool ttm_pool_uses_dma_alloc(struct ttm_pool *pool)
+{
+ return pool->alloc_flags & TTM_ALLOCATION_POOL_USE_DMA_ALLOC;
+}
+
+static inline bool ttm_pool_uses_dma32(struct ttm_pool *pool)
+{
+ return pool->alloc_flags & TTM_ALLOCATION_POOL_USE_DMA32;
+}
+
+static inline bool ttm_pool_beneficial_order(struct ttm_pool *pool)
+{
+ return pool->alloc_flags & 0xff;
+}
+
+#endif
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
index ae11d07eb63a..db854b581d83 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -34,6 +34,8 @@
#include <drm/ttm/ttm_range_manager.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/drm_mm.h>
+
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index b8a826a24fb2..f5aa29dc6ec0 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -22,13 +22,99 @@
* Authors: Christian König
*/
-#include <linux/iosys-map.h>
+#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/io-mapping.h>
+#include <linux/iosys-map.h>
#include <linux/scatterlist.h>
+#include <linux/cgroup_dmem.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_tt.h>
+
+#include <drm/drm_print.h>
+#include <drm/drm_util.h>
+
+/* Detach the cursor from the bulk move list*/
+static void
+ttm_resource_cursor_clear_bulk(struct ttm_resource_cursor *cursor)
+{
+ lockdep_assert_held(&cursor->man->bdev->lru_lock);
+
+ cursor->bulk = NULL;
+ list_del_init(&cursor->bulk_link);
+}
+
+/* Move the cursor to the end of the bulk move list it's in */
+static void ttm_resource_cursor_move_bulk_tail(struct ttm_lru_bulk_move *bulk,
+ struct ttm_resource_cursor *cursor)
+{
+ struct ttm_lru_bulk_move_pos *pos;
+
+ lockdep_assert_held(&cursor->man->bdev->lru_lock);
+
+ if (WARN_ON_ONCE(bulk != cursor->bulk)) {
+ list_del_init(&cursor->bulk_link);
+ return;
+ }
+
+ pos = &bulk->pos[cursor->mem_type][cursor->priority];
+ if (pos->last)
+ list_move(&cursor->hitch.link, &pos->last->lru.link);
+ ttm_resource_cursor_clear_bulk(cursor);
+}
+
+/* Move all cursors attached to a bulk move to its end */
+static void ttm_bulk_move_adjust_cursors(struct ttm_lru_bulk_move *bulk)
+{
+ struct ttm_resource_cursor *cursor, *next;
+
+ list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link)
+ ttm_resource_cursor_move_bulk_tail(bulk, cursor);
+}
+
+/* Remove a cursor from an empty bulk move list */
+static void ttm_bulk_move_drop_cursors(struct ttm_lru_bulk_move *bulk)
+{
+ struct ttm_resource_cursor *cursor, *next;
+
+ list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link)
+ ttm_resource_cursor_clear_bulk(cursor);
+}
+
+/**
+ * ttm_resource_cursor_init() - Initialize a struct ttm_resource_cursor
+ * @cursor: The cursor to initialize.
+ * @man: The resource manager.
+ *
+ * Initialize the cursor before using it for iteration.
+ */
+void ttm_resource_cursor_init(struct ttm_resource_cursor *cursor,
+ struct ttm_resource_manager *man)
+{
+ cursor->priority = 0;
+ cursor->man = man;
+ ttm_lru_item_init(&cursor->hitch, TTM_LRU_HITCH);
+ INIT_LIST_HEAD(&cursor->bulk_link);
+ INIT_LIST_HEAD(&cursor->hitch.link);
+}
+
+/**
+ * ttm_resource_cursor_fini() - Finalize the LRU list cursor usage
+ * @cursor: The struct ttm_resource_cursor to finalize.
+ *
+ * The function pulls the LRU list cursor off any lists it was previusly
+ * attached to. Needs to be called with the LRU lock held. The function
+ * can be called multiple times after eachother.
+ */
+void ttm_resource_cursor_fini(struct ttm_resource_cursor *cursor)
+{
+ lockdep_assert_held(&cursor->man->bdev->lru_lock);
+ list_del_init(&cursor->hitch.link);
+ ttm_resource_cursor_clear_bulk(cursor);
+}
/**
* ttm_lru_bulk_move_init - initialize a bulk move structure
@@ -39,10 +125,28 @@
void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk)
{
memset(bulk, 0, sizeof(*bulk));
+ INIT_LIST_HEAD(&bulk->cursor_list);
}
EXPORT_SYMBOL(ttm_lru_bulk_move_init);
/**
+ * ttm_lru_bulk_move_fini - finalize a bulk move structure
+ * @bdev: The struct ttm_device
+ * @bulk: the structure to finalize
+ *
+ * Sanity checks that bulk moves don't have any
+ * resources left and hence no cursors attached.
+ */
+void ttm_lru_bulk_move_fini(struct ttm_device *bdev,
+ struct ttm_lru_bulk_move *bulk)
+{
+ spin_lock(&bdev->lru_lock);
+ ttm_bulk_move_drop_cursors(bulk);
+ spin_unlock(&bdev->lru_lock);
+}
+EXPORT_SYMBOL(ttm_lru_bulk_move_fini);
+
+/**
* ttm_lru_bulk_move_tail - bulk move range of resources to the LRU tail.
*
* @bulk: bulk move structure
@@ -54,6 +158,7 @@ void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk)
{
unsigned i, j;
+ ttm_bulk_move_adjust_cursors(bulk);
for (i = 0; i < TTM_NUM_MEM_TYPES; ++i) {
for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
struct ttm_lru_bulk_move_pos *pos = &bulk->pos[i][j];
@@ -67,8 +172,8 @@ void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk)
dma_resv_assert_held(pos->last->bo->base.resv);
man = ttm_manager_type(pos->first->bo->bdev, i);
- list_bulk_move_tail(&man->lru[j], &pos->first->lru,
- &pos->last->lru);
+ list_bulk_move_tail(&man->lru[j], &pos->first->lru.link,
+ &pos->last->lru.link);
}
}
}
@@ -81,12 +186,38 @@ ttm_lru_bulk_move_pos(struct ttm_lru_bulk_move *bulk, struct ttm_resource *res)
return &bulk->pos[res->mem_type][res->bo->priority];
}
+/* Return the previous resource on the list (skip over non-resource list items) */
+static struct ttm_resource *ttm_lru_prev_res(struct ttm_resource *cur)
+{
+ struct ttm_lru_item *lru = &cur->lru;
+
+ do {
+ lru = list_prev_entry(lru, link);
+ } while (!ttm_lru_item_is_res(lru));
+
+ return ttm_lru_item_to_res(lru);
+}
+
+/* Return the next resource on the list (skip over non-resource list items) */
+static struct ttm_resource *ttm_lru_next_res(struct ttm_resource *cur)
+{
+ struct ttm_lru_item *lru = &cur->lru;
+
+ do {
+ lru = list_next_entry(lru, link);
+ } while (!ttm_lru_item_is_res(lru));
+
+ return ttm_lru_item_to_res(lru);
+}
+
/* Move the resource to the tail of the bulk move range */
static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos,
struct ttm_resource *res)
{
if (pos->last != res) {
- list_move(&res->lru, &pos->last->lru);
+ if (pos->first == res)
+ pos->first = ttm_lru_next_res(res);
+ list_move(&res->lru.link, &pos->last->lru.link);
pos->last = res;
}
}
@@ -101,6 +232,7 @@ static void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk,
pos->first = res;
pos->last = res;
} else {
+ WARN_ON(pos->first->bo->base.resv != res->bo->base.resv);
ttm_lru_bulk_move_pos_tail(pos, res);
}
}
@@ -111,23 +243,44 @@ static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
{
struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res);
- if (unlikely(pos->first == res && pos->last == res)) {
+ if (unlikely(WARN_ON(!pos->first || !pos->last) ||
+ (pos->first == res && pos->last == res))) {
pos->first = NULL;
pos->last = NULL;
} else if (pos->first == res) {
- pos->first = list_next_entry(res, lru);
+ pos->first = ttm_lru_next_res(res);
} else if (pos->last == res) {
- pos->last = list_prev_entry(res, lru);
+ pos->last = ttm_lru_prev_res(res);
} else {
- list_move(&res->lru, &pos->last->lru);
+ list_move(&res->lru.link, &pos->last->lru.link);
}
}
+static bool ttm_resource_is_swapped(struct ttm_resource *res, struct ttm_buffer_object *bo)
+{
+ /*
+ * Take care when creating a new resource for a bo, that it is not considered
+ * swapped if it's not the current resource for the bo and is thus logically
+ * associated with the ttm_tt. Think a VRAM resource created to move a
+ * swapped-out bo to VRAM.
+ */
+ if (bo->resource != res || !bo->ttm)
+ return false;
+
+ dma_resv_assert_held(bo->base.resv);
+ return ttm_tt_is_swapped(bo->ttm);
+}
+
+static bool ttm_resource_unevictable(struct ttm_resource *res, struct ttm_buffer_object *bo)
+{
+ return bo->pin_count || ttm_resource_is_swapped(res, bo);
+}
+
/* Add the resource to a bulk move if the BO is configured for it */
void ttm_resource_add_bulk_move(struct ttm_resource *res,
struct ttm_buffer_object *bo)
{
- if (bo->bulk_move && !bo->pin_count)
+ if (bo->bulk_move && !ttm_resource_unevictable(res, bo))
ttm_lru_bulk_move_add(bo->bulk_move, res);
}
@@ -135,7 +288,7 @@ void ttm_resource_add_bulk_move(struct ttm_resource *res,
void ttm_resource_del_bulk_move(struct ttm_resource *res,
struct ttm_buffer_object *bo)
{
- if (bo->bulk_move && !bo->pin_count)
+ if (bo->bulk_move && !ttm_resource_unevictable(res, bo))
ttm_lru_bulk_move_del(bo->bulk_move, res);
}
@@ -147,10 +300,10 @@ void ttm_resource_move_to_lru_tail(struct ttm_resource *res)
lockdep_assert_held(&bo->bdev->lru_lock);
- if (bo->pin_count) {
- list_move_tail(&res->lru, &bdev->pinned);
+ if (ttm_resource_unevictable(res, bo)) {
+ list_move_tail(&res->lru.link, &bdev->unevictable);
- } else if (bo->bulk_move) {
+ } else if (bo->bulk_move) {
struct ttm_lru_bulk_move_pos *pos =
ttm_lru_bulk_move_pos(bo->bulk_move, res);
@@ -159,7 +312,7 @@ void ttm_resource_move_to_lru_tail(struct ttm_resource *res)
struct ttm_resource_manager *man;
man = ttm_manager_type(bdev, res->mem_type);
- list_move_tail(&res->lru, &man->lru[bo->priority]);
+ list_move_tail(&res->lru.link, &man->lru[bo->priority]);
}
}
@@ -189,10 +342,10 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
man = ttm_manager_type(bo->bdev, place->mem_type);
spin_lock(&bo->bdev->lru_lock);
- if (bo->pin_count)
- list_add_tail(&res->lru, &bo->bdev->pinned);
+ if (ttm_resource_unevictable(res, bo))
+ list_add_tail(&res->lru.link, &bo->bdev->unevictable);
else
- list_add_tail(&res->lru, &man->lru[bo->priority]);
+ list_add_tail(&res->lru.link, &man->lru[bo->priority]);
man->usage += res->size;
spin_unlock(&bo->bdev->lru_lock);
}
@@ -214,7 +367,7 @@ void ttm_resource_fini(struct ttm_resource_manager *man,
struct ttm_device *bdev = man->bdev;
spin_lock(&bdev->lru_lock);
- list_del_init(&res->lru);
+ list_del_init(&res->lru.link);
man->usage -= res->size;
spin_unlock(&bdev->lru_lock);
}
@@ -222,25 +375,40 @@ EXPORT_SYMBOL(ttm_resource_fini);
int ttm_resource_alloc(struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource **res_ptr)
+ struct ttm_resource **res_ptr,
+ struct dmem_cgroup_pool_state **ret_limit_pool)
{
struct ttm_resource_manager *man =
ttm_manager_type(bo->bdev, place->mem_type);
+ struct dmem_cgroup_pool_state *pool = NULL;
int ret;
+ if (man->cg) {
+ ret = dmem_cgroup_try_charge(man->cg, bo->base.size, &pool, ret_limit_pool);
+ if (ret)
+ return ret;
+ }
+
ret = man->func->alloc(man, bo, place, res_ptr);
- if (ret)
+ if (ret) {
+ if (pool)
+ dmem_cgroup_uncharge(pool, bo->base.size);
return ret;
+ }
+
+ (*res_ptr)->css = pool;
spin_lock(&bo->bdev->lru_lock);
ttm_resource_add_bulk_move(*res_ptr, bo);
spin_unlock(&bo->bdev->lru_lock);
return 0;
}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_resource_alloc);
void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
{
struct ttm_resource_manager *man;
+ struct dmem_cgroup_pool_state *pool;
if (!*res)
return;
@@ -248,9 +416,13 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
spin_lock(&bo->bdev->lru_lock);
ttm_resource_del_bulk_move(*res, bo);
spin_unlock(&bo->bdev->lru_lock);
+
+ pool = (*res)->css;
man = ttm_manager_type(bo->bdev, (*res)->mem_type);
man->func->free(man, *res);
*res = NULL;
+ if (man->cg)
+ dmem_cgroup_uncharge(pool, bo->base.size);
}
EXPORT_SYMBOL(ttm_resource_free);
@@ -285,37 +457,17 @@ bool ttm_resource_intersects(struct ttm_device *bdev,
}
/**
- * ttm_resource_compatible - test for compatibility
+ * ttm_resource_compatible - check if resource is compatible with placement
*
- * @bdev: TTM device structure
- * @res: The resource to test
- * @place: The placement to test
- * @size: How many bytes the new allocation needs.
- *
- * Test if @res compatible with @place and @size.
+ * @res: the resource to check
+ * @placement: the placement to check against
+ * @evicting: true if the caller is doing evictions
*
- * Returns true if the res placement compatible with @place and @size.
+ * Returns true if the placement is compatible.
*/
-bool ttm_resource_compatible(struct ttm_device *bdev,
- struct ttm_resource *res,
- const struct ttm_place *place,
- size_t size)
-{
- struct ttm_resource_manager *man;
-
- if (!res || !place)
- return false;
-
- man = ttm_manager_type(bdev, res->mem_type);
- if (!man->func->compatible)
- return true;
-
- return man->func->compatible(man, res, place, size);
-}
-
-static bool ttm_resource_places_compat(struct ttm_resource *res,
- const struct ttm_place *places,
- unsigned num_placement)
+bool ttm_resource_compatible(struct ttm_resource *res,
+ struct ttm_placement *placement,
+ bool evicting)
{
struct ttm_buffer_object *bo = res->bo;
struct ttm_device *bdev = bo->bdev;
@@ -324,44 +476,30 @@ static bool ttm_resource_places_compat(struct ttm_resource *res,
if (res->placement & TTM_PL_FLAG_TEMPORARY)
return false;
- for (i = 0; i < num_placement; i++) {
- const struct ttm_place *heap = &places[i];
+ for (i = 0; i < placement->num_placement; i++) {
+ const struct ttm_place *place = &placement->placement[i];
+ struct ttm_resource_manager *man;
- if (!ttm_resource_compatible(bdev, res, heap, bo->base.size))
+ if (res->mem_type != place->mem_type)
continue;
- if ((res->mem_type == heap->mem_type) &&
- (!(heap->flags & TTM_PL_FLAG_CONTIGUOUS) ||
- (res->placement & TTM_PL_FLAG_CONTIGUOUS)))
- return true;
- }
- return false;
-}
+ if (place->flags & (evicting ? TTM_PL_FLAG_DESIRED :
+ TTM_PL_FLAG_FALLBACK))
+ continue;
-/**
- * ttm_resource_compat - check if resource is compatible with placement
- *
- * @res: the resource to check
- * @placement: the placement to check against
- *
- * Returns true if the placement is compatible.
- */
-bool ttm_resource_compat(struct ttm_resource *res,
- struct ttm_placement *placement)
-{
- if (ttm_resource_places_compat(res, placement->placement,
- placement->num_placement))
- return true;
+ if (place->flags & TTM_PL_FLAG_CONTIGUOUS &&
+ !(res->placement & TTM_PL_FLAG_CONTIGUOUS))
+ continue;
- if ((placement->busy_placement != placement->placement ||
- placement->num_busy_placement > placement->num_placement) &&
- ttm_resource_places_compat(res, placement->busy_placement,
- placement->num_busy_placement))
- return true;
+ man = ttm_manager_type(bdev, res->mem_type);
+ if (man->func->compatible &&
+ !man->func->compatible(man, res, place, bo->base.size))
+ continue;
+ return true;
+ }
return false;
}
-EXPORT_SYMBOL(ttm_resource_compat);
void ttm_resource_set_bo(struct ttm_resource *res,
struct ttm_buffer_object *bo)
@@ -386,14 +524,15 @@ void ttm_resource_manager_init(struct ttm_resource_manager *man,
{
unsigned i;
- spin_lock_init(&man->move_lock);
man->bdev = bdev;
man->size = size;
man->usage = 0;
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
INIT_LIST_HEAD(&man->lru[i]);
- man->move = NULL;
+ spin_lock_init(&man->eviction_lock);
+ for (i = 0; i < TTM_NUM_MOVE_FENCES; i++)
+ man->eviction_fences[i] = NULL;
}
EXPORT_SYMBOL(ttm_resource_manager_init);
@@ -412,41 +551,36 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev,
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false,
- .force_alloc = true
};
struct dma_fence *fence;
- int ret;
- unsigned i;
+ int ret, i;
- /*
- * Can't use standard list traversal since we're unlocking.
- */
+ do {
+ ret = ttm_bo_evict_first(bdev, man, &ctx);
+ cond_resched();
+ } while (!ret);
- spin_lock(&bdev->lru_lock);
- for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
- while (!list_empty(&man->lru[i])) {
- spin_unlock(&bdev->lru_lock);
- ret = ttm_mem_evict_first(bdev, man, NULL, &ctx,
- NULL);
+ if (ret && ret != -ENOENT)
+ return ret;
+
+ ret = 0;
+
+ spin_lock(&man->eviction_lock);
+ for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) {
+ fence = man->eviction_fences[i];
+ if (fence && !dma_fence_is_signaled(fence)) {
+ dma_fence_get(fence);
+ spin_unlock(&man->eviction_lock);
+ ret = dma_fence_wait(fence, false);
+ dma_fence_put(fence);
if (ret)
return ret;
- spin_lock(&bdev->lru_lock);
+ spin_lock(&man->eviction_lock);
}
}
- spin_unlock(&bdev->lru_lock);
-
- spin_lock(&man->move_lock);
- fence = dma_fence_get(man->move);
- spin_unlock(&man->move_lock);
-
- if (fence) {
- ret = dma_fence_wait(fence, false);
- dma_fence_put(fence);
- if (ret)
- return ret;
- }
+ spin_unlock(&man->eviction_lock);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(ttm_resource_manager_evict_all);
@@ -461,6 +595,9 @@ uint64_t ttm_resource_manager_usage(struct ttm_resource_manager *man)
{
uint64_t usage;
+ if (WARN_ON_ONCE(!man->bdev))
+ return 0;
+
spin_lock(&man->bdev->lru_lock);
usage = man->usage;
spin_unlock(&man->bdev->lru_lock);
@@ -486,53 +623,102 @@ void ttm_resource_manager_debug(struct ttm_resource_manager *man,
}
EXPORT_SYMBOL(ttm_resource_manager_debug);
+static void
+ttm_resource_cursor_check_bulk(struct ttm_resource_cursor *cursor,
+ struct ttm_lru_item *next_lru)
+{
+ struct ttm_resource *next = ttm_lru_item_to_res(next_lru);
+ struct ttm_lru_bulk_move *bulk = NULL;
+ struct ttm_buffer_object *bo = next->bo;
+
+ lockdep_assert_held(&cursor->man->bdev->lru_lock);
+ bulk = bo->bulk_move;
+
+ if (cursor->bulk != bulk) {
+ if (bulk) {
+ list_move_tail(&cursor->bulk_link, &bulk->cursor_list);
+ cursor->mem_type = next->mem_type;
+ } else {
+ list_del_init(&cursor->bulk_link);
+ }
+ cursor->bulk = bulk;
+ }
+}
+
/**
- * ttm_resource_manager_first
- *
- * @man: resource manager to iterate over
+ * ttm_resource_manager_first() - Start iterating over the resources
+ * of a resource manager
* @cursor: cursor to record the position
*
- * Returns the first resource from the resource manager.
+ * Initializes the cursor and starts iterating. When done iterating,
+ * the caller must explicitly call ttm_resource_cursor_fini().
+ *
+ * Return: The first resource from the resource manager.
*/
struct ttm_resource *
-ttm_resource_manager_first(struct ttm_resource_manager *man,
- struct ttm_resource_cursor *cursor)
+ttm_resource_manager_first(struct ttm_resource_cursor *cursor)
{
- struct ttm_resource *res;
+ struct ttm_resource_manager *man = cursor->man;
- lockdep_assert_held(&man->bdev->lru_lock);
+ if (WARN_ON_ONCE(!man))
+ return NULL;
- for (cursor->priority = 0; cursor->priority < TTM_MAX_BO_PRIORITY;
- ++cursor->priority)
- list_for_each_entry(res, &man->lru[cursor->priority], lru)
- return res;
+ lockdep_assert_held(&man->bdev->lru_lock);
- return NULL;
+ list_move(&cursor->hitch.link, &man->lru[cursor->priority]);
+ return ttm_resource_manager_next(cursor);
}
/**
- * ttm_resource_manager_next
- *
- * @man: resource manager to iterate over
+ * ttm_resource_manager_next() - Continue iterating over the resource manager
+ * resources
* @cursor: cursor to record the position
- * @res: the current resource pointer
*
- * Returns the next resource from the resource manager.
+ * Return: the next resource from the resource manager.
*/
struct ttm_resource *
-ttm_resource_manager_next(struct ttm_resource_manager *man,
- struct ttm_resource_cursor *cursor,
- struct ttm_resource *res)
+ttm_resource_manager_next(struct ttm_resource_cursor *cursor)
{
+ struct ttm_resource_manager *man = cursor->man;
+ struct ttm_lru_item *lru;
+
lockdep_assert_held(&man->bdev->lru_lock);
- list_for_each_entry_continue(res, &man->lru[cursor->priority], lru)
- return res;
+ for (;;) {
+ lru = &cursor->hitch;
+ list_for_each_entry_continue(lru, &man->lru[cursor->priority], link) {
+ if (ttm_lru_item_is_res(lru)) {
+ ttm_resource_cursor_check_bulk(cursor, lru);
+ list_move(&cursor->hitch.link, &lru->link);
+ return ttm_lru_item_to_res(lru);
+ }
+ }
+
+ if (++cursor->priority >= TTM_MAX_BO_PRIORITY)
+ break;
+
+ list_move(&cursor->hitch.link, &man->lru[cursor->priority]);
+ ttm_resource_cursor_clear_bulk(cursor);
+ }
- for (++cursor->priority; cursor->priority < TTM_MAX_BO_PRIORITY;
- ++cursor->priority)
- list_for_each_entry(res, &man->lru[cursor->priority], lru)
- return res;
+ return NULL;
+}
+
+/**
+ * ttm_lru_first_res_or_null() - Return the first resource on an lru list
+ * @head: The list head of the lru list.
+ *
+ * Return: Pointer to the first resource on the lru list or NULL if
+ * there is none.
+ */
+struct ttm_resource *ttm_lru_first_res_or_null(struct list_head *head)
+{
+ struct ttm_lru_item *lru;
+
+ list_for_each_entry(lru, head, link) {
+ if (ttm_lru_item_is_res(lru))
+ return ttm_lru_item_to_res(lru);
+ }
return NULL;
}
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index ab725d9d14a6..611d20ab966d 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -31,15 +31,23 @@
#define pr_fmt(fmt) "[TTM] " fmt
-#include <linux/sched.h>
-#include <linux/shmem_fs.h>
+#include <linux/cc_platform.h>
+#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/file.h>
#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/shmem_fs.h>
#include <drm/drm_cache.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include <drm/drm_util.h>
+#include <drm/ttm/ttm_backup.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_tt.h>
#include "ttm_module.h"
+#include "ttm_pool_internal.h"
static unsigned long ttm_pages_limit;
@@ -60,6 +68,7 @@ static atomic_long_t ttm_dma32_pages_allocated;
int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
{
struct ttm_device *bdev = bo->bdev;
+ struct drm_device *ddev = bo->base.dev;
uint32_t page_flags = 0;
dma_resv_assert_held(bo->base.resv);
@@ -81,6 +90,16 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
pr_err("Illegal buffer object type\n");
return -EINVAL;
}
+ /*
+ * When using dma_alloc_coherent with memory encryption the
+ * mapped TT pages need to be decrypted or otherwise the drivers
+ * will end up sending encrypted mem to the gpu.
+ */
+ if (ttm_pool_uses_dma_alloc(&bdev->pool) &&
+ cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
+ page_flags |= TTM_TT_FLAG_DECRYPTED;
+ drm_info_once(ddev, "TT memory decryption enabled.");
+ }
bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags);
if (unlikely(bo->ttm == NULL))
@@ -91,6 +110,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
return 0;
}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_create);
/*
* Allocates storage for pointers to the pages that back the ttm.
@@ -129,6 +149,7 @@ void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
bdev->funcs->ttm_tt_destroy(bdev, ttm);
}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_destroy);
static void ttm_tt_init_fields(struct ttm_tt *ttm,
struct ttm_buffer_object *bo,
@@ -137,12 +158,13 @@ static void ttm_tt_init_fields(struct ttm_tt *ttm,
unsigned long extra_pages)
{
ttm->num_pages = (PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT) + extra_pages;
- ttm->caching = ttm_cached;
ttm->page_flags = page_flags;
ttm->dma_address = NULL;
ttm->swap_storage = NULL;
ttm->sg = bo->sg;
ttm->caching = caching;
+ ttm->restore = NULL;
+ ttm->backup = NULL;
}
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
@@ -167,6 +189,13 @@ void ttm_tt_fini(struct ttm_tt *ttm)
fput(ttm->swap_storage);
ttm->swap_storage = NULL;
+ if (ttm_tt_is_backed_up(ttm))
+ ttm_pool_drop_backed_up(ttm);
+ if (ttm->backup) {
+ ttm_backup_fini(ttm->backup);
+ ttm->backup = NULL;
+ }
+
if (ttm->pages)
kvfree(ttm->pages);
else
@@ -236,6 +265,50 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
out_err:
return ret;
}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_swapin);
+
+/**
+ * ttm_tt_backup() - Helper to back up a struct ttm_tt.
+ * @bdev: The TTM device.
+ * @tt: The struct ttm_tt.
+ * @flags: Flags that govern the backup behaviour.
+ *
+ * Update the page accounting and call ttm_pool_shrink_tt to free pages
+ * or back them up.
+ *
+ * Return: Number of pages freed or swapped out, or negative error code on
+ * error.
+ */
+long ttm_tt_backup(struct ttm_device *bdev, struct ttm_tt *tt,
+ const struct ttm_backup_flags flags)
+{
+ long ret;
+
+ if (WARN_ON(IS_ERR_OR_NULL(tt->backup)))
+ return 0;
+
+ ret = ttm_pool_backup(&bdev->pool, tt, &flags);
+ if (ret > 0) {
+ tt->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED;
+ tt->page_flags |= TTM_TT_FLAG_BACKED_UP;
+ }
+
+ return ret;
+}
+
+int ttm_tt_restore(struct ttm_device *bdev, struct ttm_tt *tt,
+ const struct ttm_operation_ctx *ctx)
+{
+ int ret = ttm_pool_restore_and_alloc(&bdev->pool, tt, ctx);
+
+ if (ret)
+ return ret;
+
+ tt->page_flags &= ~TTM_TT_FLAG_BACKED_UP;
+
+ return 0;
+}
+EXPORT_SYMBOL(ttm_tt_restore);
/**
* ttm_tt_swapout - swap out tt object
@@ -293,6 +366,7 @@ out_err:
return ret;
}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_swapout);
int ttm_tt_populate(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
@@ -307,7 +381,7 @@ int ttm_tt_populate(struct ttm_device *bdev,
if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
atomic_long_add(ttm->num_pages, &ttm_pages_allocated);
- if (bdev->pool.use_dma32)
+ if (ttm_pool_uses_dma32(&bdev->pool))
atomic_long_add(ttm->num_pages,
&ttm_dma32_pages_allocated);
}
@@ -331,6 +405,7 @@ int ttm_tt_populate(struct ttm_device *bdev,
goto error;
ttm->page_flags |= TTM_TT_FLAG_PRIV_POPULATED;
+ ttm->page_flags &= ~TTM_TT_FLAG_BACKED_UP;
if (unlikely(ttm->page_flags & TTM_TT_FLAG_SWAPPED)) {
ret = ttm_tt_swapin(ttm);
if (unlikely(ret != 0)) {
@@ -344,13 +419,16 @@ int ttm_tt_populate(struct ttm_device *bdev,
error:
if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
- if (bdev->pool.use_dma32)
+ if (ttm_pool_uses_dma32(&bdev->pool))
atomic_long_sub(ttm->num_pages,
&ttm_dma32_pages_allocated);
}
return ret;
}
+
+#if IS_ENABLED(CONFIG_DRM_TTM_KUNIT_TEST)
EXPORT_SYMBOL(ttm_tt_populate);
+#endif
void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
{
@@ -364,13 +442,14 @@ void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
- if (bdev->pool.use_dma32)
+ if (ttm_pool_uses_dma32(&bdev->pool))
atomic_long_sub(ttm->num_pages,
&ttm_dma32_pages_allocated);
}
ttm->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED;
}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_unpopulate);
#ifdef CONFIG_DEBUG_FS
@@ -450,3 +529,38 @@ ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
return &iter_tt->base;
}
EXPORT_SYMBOL(ttm_kmap_iter_tt_init);
+
+unsigned long ttm_tt_pages_limit(void)
+{
+ return ttm_pages_limit;
+}
+EXPORT_SYMBOL(ttm_tt_pages_limit);
+
+/**
+ * ttm_tt_setup_backup() - Allocate and assign a backup structure for a ttm_tt
+ * @tt: The ttm_tt for wich to allocate and assign a backup structure.
+ *
+ * Assign a backup structure to be used for tt backup. This should
+ * typically be done at bo creation, to avoid allocations at shrinking
+ * time.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ttm_tt_setup_backup(struct ttm_tt *tt)
+{
+ struct file *backup =
+ ttm_backup_shmem_create(((loff_t)tt->num_pages) << PAGE_SHIFT);
+
+ if (WARN_ON_ONCE(!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)))
+ return -EINVAL;
+
+ if (IS_ERR(backup))
+ return PTR_ERR(backup);
+
+ if (tt->backup)
+ ttm_backup_fini(tt->backup);
+
+ tt->backup = backup;
+ return 0;
+}
+EXPORT_SYMBOL(ttm_tt_setup_backup);