summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/Makefile15
-rw-r--r--drivers/gpu/drm/ttm/tests/.kunitconfig3
-rw-r--r--drivers/gpu/drm/ttm/tests/Makefile11
-rw-r--r--drivers/gpu/drm/ttm/tests/TODO27
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_test.c637
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c1176
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_device_test.c206
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c304
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h52
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_mock_manager.c238
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_mock_manager.h30
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_pool_test.c437
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_resource_test.c337
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_tt_test.c402
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c80
-rw-r--r--drivers/gpu/drm/ttm/ttm_backup.c182
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c2397
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_internal.h60
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c151
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c1424
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c704
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c322
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c207
-rw-r--r--drivers/gpu/drm/ttm/ttm_lock.c310
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c601
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c106
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.h43
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c454
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c919
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c1131
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c1377
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool_internal.h25
-rw-r--r--drivers/gpu/drm/ttm/ttm_range_manager.c242
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c945
-rw-r--r--drivers/gpu/drm/ttm/ttm_sys_manager.c49
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c647
36 files changed, 9968 insertions, 6283 deletions
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index b2b33dde2afb..40d07a35293a 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -1,14 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the drm device driver. This driver provides support for the
-ccflags-y := -Iinclude/drm
-ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
- ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
- ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
- ttm_bo_manager.o
-
-ifeq ($(CONFIG_SWIOTLB),y)
-ttm-y += ttm_page_alloc_dma.o
-endif
+ttm-y := ttm_tt.o ttm_bo.o ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
+ ttm_execbuf_util.o ttm_range_manager.o ttm_resource.o ttm_pool.o \
+ ttm_device.o ttm_sys_manager.o ttm_backup.o
+ttm-$(CONFIG_AGP) += ttm_agp_backend.o
obj-$(CONFIG_DRM_TTM) += ttm.o
+obj-$(CONFIG_DRM_TTM_KUNIT_TEST) += tests/
diff --git a/drivers/gpu/drm/ttm/tests/.kunitconfig b/drivers/gpu/drm/ttm/tests/.kunitconfig
new file mode 100644
index 000000000000..1ae1ffabd51e
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/.kunitconfig
@@ -0,0 +1,3 @@
+CONFIG_KUNIT=y
+CONFIG_DRM=y
+CONFIG_DRM_TTM_KUNIT_TEST=y
diff --git a/drivers/gpu/drm/ttm/tests/Makefile b/drivers/gpu/drm/ttm/tests/Makefile
new file mode 100644
index 000000000000..f3149de77541
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0 AND MIT
+
+obj-$(CONFIG_DRM_TTM_KUNIT_TEST) += \
+ ttm_device_test.o \
+ ttm_pool_test.o \
+ ttm_resource_test.o \
+ ttm_tt_test.o \
+ ttm_bo_test.o \
+ ttm_bo_validate_test.o \
+ ttm_mock_manager.o \
+ ttm_kunit_helpers.o
diff --git a/drivers/gpu/drm/ttm/tests/TODO b/drivers/gpu/drm/ttm/tests/TODO
new file mode 100644
index 000000000000..45b03d184ccf
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/TODO
@@ -0,0 +1,27 @@
+TODO
+=====
+
+- Add a test case where the only evictable BO is busy
+- Update eviction tests so they use parametrized "from" memory type
+- Improve mock manager's implementation, e.g. allocate a block of
+ dummy memory that can be used when testing page mapping functions
+- Suggestion: Add test cases with external BOs
+- Suggestion: randomize the number and size of tested buffers in
+ ttm_bo_validate()
+- Agree on the naming convention
+- Rewrite the mock manager: drop use_tt and manage mock memory using
+ drm_mm manager
+
+Notes and gotchas
+=================
+
+- These tests are built and run with a UML kernel, because
+ 1) We are interested in hardware-independent testing
+ 2) We don't want to have actual DRM devices interacting with TTM
+ at the same time as the test one. Getting these to work in
+ parallel would require some time (...and that's a "todo" in itself!)
+- Triggering ttm_bo_vm_ops callbacks from KUnit (i.e. kernel) might be
+ a challenge, but is worth trying. Look at selftests like
+ i915/gem/selftests/i915_gem_mman.c for inspiration
+- The test suite uses UML where ioremap() call returns NULL, meaning that
+ ttm_bo_ioremap() can't be tested, unless we find a way to stub it
diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
new file mode 100644
index 000000000000..d468f8322072
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
@@ -0,0 +1,637 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#include <linux/dma-resv.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/ww_mutex.h>
+
+#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_tt.h>
+
+#include "ttm_kunit_helpers.h"
+
+#define BO_SIZE SZ_8K
+
+#ifdef CONFIG_PREEMPT_RT
+#define ww_mutex_base_lock(b) rt_mutex_lock(b)
+#else
+#define ww_mutex_base_lock(b) mutex_lock(b)
+#endif
+
+struct ttm_bo_test_case {
+ const char *description;
+ bool interruptible;
+ bool no_wait;
+};
+
+static const struct ttm_bo_test_case ttm_bo_reserved_cases[] = {
+ {
+ .description = "Cannot be interrupted and sleeps",
+ .interruptible = false,
+ .no_wait = false,
+ },
+ {
+ .description = "Cannot be interrupted, locks straight away",
+ .interruptible = false,
+ .no_wait = true,
+ },
+ {
+ .description = "Can be interrupted, sleeps",
+ .interruptible = true,
+ .no_wait = false,
+ },
+};
+
+static void ttm_bo_init_case_desc(const struct ttm_bo_test_case *t,
+ char *desc)
+{
+ strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(ttm_bo_reserve, ttm_bo_reserved_cases, ttm_bo_init_case_desc);
+
+static void ttm_bo_reserve_optimistic_no_ticket(struct kunit *test)
+{
+ const struct ttm_bo_test_case *params = test->param_value;
+ struct ttm_buffer_object *bo;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ err = ttm_bo_reserve(bo, params->interruptible, params->no_wait, NULL);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ dma_resv_unlock(bo->base.resv);
+}
+
+static void ttm_bo_reserve_locked_no_sleep(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ bool interruptible = false;
+ bool no_wait = true;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ /* Let's lock it beforehand */
+ dma_resv_lock(bo->base.resv, NULL);
+
+ err = ttm_bo_reserve(bo, interruptible, no_wait, NULL);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_ASSERT_EQ(test, err, -EBUSY);
+}
+
+static void ttm_bo_reserve_no_wait_ticket(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ struct ww_acquire_ctx ctx;
+ bool interruptible = false;
+ bool no_wait = true;
+ int err;
+
+ ww_acquire_init(&ctx, &reservation_ww_class);
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx);
+ KUNIT_ASSERT_EQ(test, err, -EBUSY);
+
+ ww_acquire_fini(&ctx);
+}
+
+static void ttm_bo_reserve_double_resv(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ struct ww_acquire_ctx ctx;
+ bool interruptible = false;
+ bool no_wait = false;
+ int err;
+
+ ww_acquire_init(&ctx, &reservation_ww_class);
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx);
+
+ dma_resv_unlock(bo->base.resv);
+ ww_acquire_fini(&ctx);
+
+ KUNIT_ASSERT_EQ(test, err, -EALREADY);
+}
+
+/*
+ * A test case heavily inspired by ww_test_edeadlk_normal(). It injects
+ * a deadlock by manipulating the sequence number of the context that holds
+ * dma_resv lock of bo2 so the other context is "wounded" and has to back off
+ * (indicated by -EDEADLK). The subtest checks if ttm_bo_reserve() properly
+ * propagates that error.
+ */
+static void ttm_bo_reserve_deadlock(struct kunit *test)
+{
+ struct ttm_buffer_object *bo1, *bo2;
+ struct ww_acquire_ctx ctx1, ctx2;
+ bool interruptible = false;
+ bool no_wait = false;
+ int err;
+
+ bo1 = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+ bo2 = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ ww_acquire_init(&ctx1, &reservation_ww_class);
+ ww_mutex_base_lock(&bo2->base.resv->lock.base);
+
+ /* The deadlock will be caught by WW mutex, don't warn about it */
+ lock_release(&bo2->base.resv->lock.base.dep_map, 1);
+
+ bo2->base.resv->lock.ctx = &ctx2;
+ ctx2 = ctx1;
+ ctx2.stamp--; /* Make the context holding the lock younger */
+
+ err = ttm_bo_reserve(bo1, interruptible, no_wait, &ctx1);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ err = ttm_bo_reserve(bo2, interruptible, no_wait, &ctx1);
+ KUNIT_ASSERT_EQ(test, err, -EDEADLK);
+
+ dma_resv_unlock(bo1->base.resv);
+ ww_acquire_fini(&ctx1);
+}
+
+#if IS_BUILTIN(CONFIG_DRM_TTM_KUNIT_TEST)
+struct signal_timer {
+ struct timer_list timer;
+ struct ww_acquire_ctx *ctx;
+};
+
+static void signal_for_ttm_bo_reserve(struct timer_list *t)
+{
+ struct signal_timer *s_timer = timer_container_of(s_timer, t, timer);
+ struct task_struct *task = s_timer->ctx->task;
+
+ do_send_sig_info(SIGTERM, SEND_SIG_PRIV, task, PIDTYPE_PID);
+}
+
+static int threaded_ttm_bo_reserve(void *arg)
+{
+ struct ttm_buffer_object *bo = arg;
+ struct signal_timer s_timer;
+ struct ww_acquire_ctx ctx;
+ bool interruptible = true;
+ bool no_wait = false;
+ int err;
+
+ ww_acquire_init(&ctx, &reservation_ww_class);
+
+ /* Prepare a signal that will interrupt the reservation attempt */
+ timer_setup_on_stack(&s_timer.timer, &signal_for_ttm_bo_reserve, 0);
+ s_timer.ctx = &ctx;
+
+ mod_timer(&s_timer.timer, msecs_to_jiffies(100));
+
+ err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx);
+
+ timer_delete_sync(&s_timer.timer);
+ timer_destroy_on_stack(&s_timer.timer);
+
+ ww_acquire_fini(&ctx);
+
+ return err;
+}
+
+static void ttm_bo_reserve_interrupted(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ struct task_struct *task;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ task = kthread_create(threaded_ttm_bo_reserve, bo, "ttm-bo-reserve");
+
+ if (IS_ERR(task))
+ KUNIT_FAIL(test, "Couldn't create ttm bo reserve task\n");
+
+ /* Take a lock so the threaded reserve has to wait */
+ mutex_lock(&bo->base.resv->lock.base);
+
+ wake_up_process(task);
+ msleep(20);
+ err = kthread_stop(task);
+
+ mutex_unlock(&bo->base.resv->lock.base);
+
+ KUNIT_ASSERT_EQ(test, err, -ERESTARTSYS);
+}
+#endif /* IS_BUILTIN(CONFIG_DRM_TTM_KUNIT_TEST) */
+
+static void ttm_bo_unreserve_basic(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_buffer_object *bo;
+ struct ttm_device *ttm_dev;
+ struct ttm_resource *res1, *res2;
+ struct ttm_place *place;
+ struct ttm_resource_manager *man;
+ unsigned int bo_prio = TTM_MAX_BO_PRIORITY - 1;
+ u32 mem_type = TTM_PL_SYSTEM;
+ int err;
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ priv->ttm_dev = ttm_dev;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+ bo->priority = bo_prio;
+
+ err = ttm_resource_alloc(bo, place, &res1, NULL);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ bo->resource = res1;
+
+ /* Add a dummy resource to populate LRU */
+ ttm_resource_alloc(bo, place, &res2, NULL);
+
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_unreserve(bo);
+
+ man = ttm_manager_type(priv->ttm_dev, mem_type);
+ KUNIT_ASSERT_EQ(test,
+ list_is_last(&res1->lru.link, &man->lru[bo->priority]), 1);
+
+ ttm_resource_free(bo, &res2);
+ ttm_resource_free(bo, &res1);
+}
+
+static void ttm_bo_unreserve_pinned(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_buffer_object *bo;
+ struct ttm_device *ttm_dev;
+ struct ttm_resource *res1, *res2;
+ struct ttm_place *place;
+ u32 mem_type = TTM_PL_SYSTEM;
+ int err;
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ priv->ttm_dev = ttm_dev;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+ place = ttm_place_kunit_init(test, mem_type, 0);
+
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_pin(bo);
+
+ err = ttm_resource_alloc(bo, place, &res1, NULL);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ bo->resource = res1;
+
+ /* Add a dummy resource to the pinned list */
+ err = ttm_resource_alloc(bo, place, &res2, NULL);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_EQ(test,
+ list_is_last(&res2->lru.link, &priv->ttm_dev->unevictable), 1);
+
+ ttm_bo_unreserve(bo);
+ KUNIT_ASSERT_EQ(test,
+ list_is_last(&res1->lru.link, &priv->ttm_dev->unevictable), 1);
+
+ ttm_resource_free(bo, &res1);
+ ttm_resource_free(bo, &res2);
+}
+
+static void ttm_bo_unreserve_bulk(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_lru_bulk_move lru_bulk_move;
+ struct ttm_lru_bulk_move_pos *pos;
+ struct ttm_buffer_object *bo1, *bo2;
+ struct ttm_resource *res1, *res2;
+ struct ttm_device *ttm_dev;
+ struct ttm_place *place;
+ struct dma_resv *resv;
+ u32 mem_type = TTM_PL_SYSTEM;
+ unsigned int bo_priority = 0;
+ int err;
+
+ ttm_lru_bulk_move_init(&lru_bulk_move);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ resv = kunit_kzalloc(test, sizeof(*resv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, resv);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ priv->ttm_dev = ttm_dev;
+
+ dma_resv_init(resv);
+
+ bo1 = ttm_bo_kunit_init(test, test->priv, BO_SIZE, resv);
+ bo2 = ttm_bo_kunit_init(test, test->priv, BO_SIZE, resv);
+
+ dma_resv_lock(bo1->base.resv, NULL);
+ ttm_bo_set_bulk_move(bo1, &lru_bulk_move);
+ dma_resv_unlock(bo1->base.resv);
+
+ err = ttm_resource_alloc(bo1, place, &res1, NULL);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ bo1->resource = res1;
+
+ dma_resv_lock(bo2->base.resv, NULL);
+ ttm_bo_set_bulk_move(bo2, &lru_bulk_move);
+ dma_resv_unlock(bo2->base.resv);
+
+ err = ttm_resource_alloc(bo2, place, &res2, NULL);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ bo2->resource = res2;
+
+ ttm_bo_reserve(bo1, false, false, NULL);
+ ttm_bo_unreserve(bo1);
+
+ pos = &lru_bulk_move.pos[mem_type][bo_priority];
+ KUNIT_ASSERT_PTR_EQ(test, res1, pos->last);
+
+ ttm_resource_free(bo1, &res1);
+ ttm_resource_free(bo2, &res2);
+
+ dma_resv_fini(resv);
+}
+
+static void ttm_bo_fini_basic(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_buffer_object *bo;
+ struct ttm_resource *res;
+ struct ttm_device *ttm_dev;
+ struct ttm_place *place;
+ u32 mem_type = TTM_PL_SYSTEM;
+ int err;
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ priv->ttm_dev = ttm_dev;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+ bo->type = ttm_bo_type_device;
+
+ err = ttm_resource_alloc(bo, place, &res, NULL);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ bo->resource = res;
+
+ dma_resv_lock(bo->base.resv, NULL);
+ err = ttm_tt_create(bo, false);
+ dma_resv_unlock(bo->base.resv);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ ttm_bo_fini(bo);
+}
+
+static const char *mock_name(struct dma_fence *f)
+{
+ return "kunit-ttm-bo-put";
+}
+
+static const struct dma_fence_ops mock_fence_ops = {
+ .get_driver_name = mock_name,
+ .get_timeline_name = mock_name,
+};
+
+static void ttm_bo_fini_shared_resv(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_buffer_object *bo;
+ struct dma_resv *external_resv;
+ struct dma_fence *fence;
+ /* A dummy DMA fence lock */
+ spinlock_t fence_lock;
+ struct ttm_device *ttm_dev;
+ int err;
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ priv->ttm_dev = ttm_dev;
+
+ external_resv = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, external_resv);
+
+ dma_resv_init(external_resv);
+
+ fence = kunit_kzalloc(test, sizeof(*fence), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, fence);
+
+ spin_lock_init(&fence_lock);
+ dma_fence_init(fence, &mock_fence_ops, &fence_lock, 0, 0);
+
+ dma_resv_lock(external_resv, NULL);
+ dma_resv_reserve_fences(external_resv, 1);
+ dma_resv_add_fence(external_resv, fence, DMA_RESV_USAGE_BOOKKEEP);
+ dma_resv_unlock(external_resv);
+
+ dma_fence_signal(fence);
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+ bo->type = ttm_bo_type_device;
+ bo->base.resv = external_resv;
+
+ ttm_bo_fini(bo);
+}
+
+static void ttm_bo_pin_basic(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_buffer_object *bo;
+ struct ttm_device *ttm_dev;
+ unsigned int no_pins = 3;
+ int err;
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ priv->ttm_dev = ttm_dev;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ for (int i = 0; i < no_pins; i++) {
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_pin(bo);
+ dma_resv_unlock(bo->base.resv);
+ }
+
+ KUNIT_ASSERT_EQ(test, bo->pin_count, no_pins);
+}
+
+static void ttm_bo_pin_unpin_resource(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_lru_bulk_move lru_bulk_move;
+ struct ttm_lru_bulk_move_pos *pos;
+ struct ttm_buffer_object *bo;
+ struct ttm_resource *res;
+ struct ttm_device *ttm_dev;
+ struct ttm_place *place;
+ u32 mem_type = TTM_PL_SYSTEM;
+ unsigned int bo_priority = 0;
+ int err;
+
+ ttm_lru_bulk_move_init(&lru_bulk_move);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ priv->ttm_dev = ttm_dev;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ err = ttm_resource_alloc(bo, place, &res, NULL);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ bo->resource = res;
+
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_set_bulk_move(bo, &lru_bulk_move);
+ ttm_bo_pin(bo);
+ dma_resv_unlock(bo->base.resv);
+
+ pos = &lru_bulk_move.pos[mem_type][bo_priority];
+
+ KUNIT_ASSERT_EQ(test, bo->pin_count, 1);
+ KUNIT_ASSERT_NULL(test, pos->first);
+ KUNIT_ASSERT_NULL(test, pos->last);
+
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_unpin(bo);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_ASSERT_PTR_EQ(test, res, pos->last);
+ KUNIT_ASSERT_EQ(test, bo->pin_count, 0);
+
+ ttm_resource_free(bo, &res);
+}
+
+static void ttm_bo_multiple_pin_one_unpin(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_lru_bulk_move lru_bulk_move;
+ struct ttm_lru_bulk_move_pos *pos;
+ struct ttm_buffer_object *bo;
+ struct ttm_resource *res;
+ struct ttm_device *ttm_dev;
+ struct ttm_place *place;
+ u32 mem_type = TTM_PL_SYSTEM;
+ unsigned int bo_priority = 0;
+ int err;
+
+ ttm_lru_bulk_move_init(&lru_bulk_move);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ priv->ttm_dev = ttm_dev;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ err = ttm_resource_alloc(bo, place, &res, NULL);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ bo->resource = res;
+
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_set_bulk_move(bo, &lru_bulk_move);
+
+ /* Multiple pins */
+ ttm_bo_pin(bo);
+ ttm_bo_pin(bo);
+
+ dma_resv_unlock(bo->base.resv);
+
+ pos = &lru_bulk_move.pos[mem_type][bo_priority];
+
+ KUNIT_ASSERT_EQ(test, bo->pin_count, 2);
+ KUNIT_ASSERT_NULL(test, pos->first);
+ KUNIT_ASSERT_NULL(test, pos->last);
+
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_unpin(bo);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_ASSERT_EQ(test, bo->pin_count, 1);
+ KUNIT_ASSERT_NULL(test, pos->first);
+ KUNIT_ASSERT_NULL(test, pos->last);
+
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_unpin(bo);
+ dma_resv_unlock(bo->base.resv);
+
+ ttm_resource_free(bo, &res);
+}
+
+static struct kunit_case ttm_bo_test_cases[] = {
+ KUNIT_CASE_PARAM(ttm_bo_reserve_optimistic_no_ticket,
+ ttm_bo_reserve_gen_params),
+ KUNIT_CASE(ttm_bo_reserve_locked_no_sleep),
+ KUNIT_CASE(ttm_bo_reserve_no_wait_ticket),
+ KUNIT_CASE(ttm_bo_reserve_double_resv),
+#if IS_BUILTIN(CONFIG_DRM_TTM_KUNIT_TEST)
+ KUNIT_CASE(ttm_bo_reserve_interrupted),
+#endif
+ KUNIT_CASE(ttm_bo_reserve_deadlock),
+ KUNIT_CASE(ttm_bo_unreserve_basic),
+ KUNIT_CASE(ttm_bo_unreserve_pinned),
+ KUNIT_CASE(ttm_bo_unreserve_bulk),
+ KUNIT_CASE(ttm_bo_fini_basic),
+ KUNIT_CASE(ttm_bo_fini_shared_resv),
+ KUNIT_CASE(ttm_bo_pin_basic),
+ KUNIT_CASE(ttm_bo_pin_unpin_resource),
+ KUNIT_CASE(ttm_bo_multiple_pin_one_unpin),
+ {}
+};
+
+static struct kunit_suite ttm_bo_test_suite = {
+ .name = "ttm_bo",
+ .init = ttm_test_devices_init,
+ .exit = ttm_test_devices_fini,
+ .test_cases = ttm_bo_test_cases,
+};
+
+kunit_test_suites(&ttm_bo_test_suite);
+
+MODULE_DESCRIPTION("KUnit tests for ttm_bo APIs");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
new file mode 100644
index 000000000000..2eda87882e65
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
@@ -0,0 +1,1176 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#include <linux/delay.h>
+#include <linux/kthread.h>
+
+#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_tt.h>
+
+#include "ttm_kunit_helpers.h"
+#include "ttm_mock_manager.h"
+
+#define BO_SIZE SZ_4K
+#define MANAGER_SIZE SZ_1M
+
+static struct spinlock fence_lock;
+
+struct ttm_bo_validate_test_case {
+ const char *description;
+ enum ttm_bo_type bo_type;
+ u32 mem_type;
+ bool with_ttm;
+ bool no_gpu_wait;
+};
+
+static struct ttm_placement *ttm_placement_kunit_init(struct kunit *test,
+ struct ttm_place *places,
+ unsigned int num_places)
+{
+ struct ttm_placement *placement;
+
+ placement = kunit_kzalloc(test, sizeof(*placement), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, placement);
+
+ placement->num_placement = num_places;
+ placement->placement = places;
+
+ return placement;
+}
+
+static const char *fence_name(struct dma_fence *f)
+{
+ return "ttm-bo-validate-fence";
+}
+
+static const struct dma_fence_ops fence_ops = {
+ .get_driver_name = fence_name,
+ .get_timeline_name = fence_name,
+};
+
+static struct dma_fence *alloc_mock_fence(struct kunit *test)
+{
+ struct dma_fence *fence;
+
+ fence = kunit_kzalloc(test, sizeof(*fence), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, fence);
+
+ dma_fence_init(fence, &fence_ops, &fence_lock, 0, 0);
+
+ return fence;
+}
+
+static void dma_resv_kunit_active_fence_init(struct kunit *test,
+ struct dma_resv *resv,
+ enum dma_resv_usage usage)
+{
+ struct dma_fence *fence;
+
+ fence = alloc_mock_fence(test);
+ dma_fence_enable_sw_signaling(fence);
+
+ dma_resv_lock(resv, NULL);
+ dma_resv_reserve_fences(resv, 1);
+ dma_resv_add_fence(resv, fence, usage);
+ dma_resv_unlock(resv);
+}
+
+static void ttm_bo_validate_case_desc(const struct ttm_bo_validate_test_case *t,
+ char *desc)
+{
+ strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
+}
+
+static const struct ttm_bo_validate_test_case ttm_bo_type_cases[] = {
+ {
+ .description = "Buffer object for userspace",
+ .bo_type = ttm_bo_type_device,
+ },
+ {
+ .description = "Kernel buffer object",
+ .bo_type = ttm_bo_type_kernel,
+ },
+ {
+ .description = "Shared buffer object",
+ .bo_type = ttm_bo_type_sg,
+ },
+};
+
+KUNIT_ARRAY_PARAM(ttm_bo_types, ttm_bo_type_cases,
+ ttm_bo_validate_case_desc);
+
+static void ttm_bo_init_reserved_sys_man(struct kunit *test)
+{
+ const struct ttm_bo_validate_test_case *params = test->param_value;
+ struct ttm_test_devices *priv = test->priv;
+ enum ttm_bo_type bo_type = params->bo_type;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_operation_ctx ctx = { };
+ struct ttm_placement *placement;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ int err;
+
+ bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+
+ place = ttm_place_kunit_init(test, TTM_PL_SYSTEM, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ drm_gem_private_object_init(priv->drm, &bo->base, size);
+
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo, bo_type, placement,
+ PAGE_SIZE, &ctx, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, kref_read(&bo->kref), 1);
+ KUNIT_EXPECT_PTR_EQ(test, bo->bdev, priv->ttm_dev);
+ KUNIT_EXPECT_EQ(test, bo->type, bo_type);
+ KUNIT_EXPECT_EQ(test, bo->page_alignment, PAGE_SIZE);
+ KUNIT_EXPECT_PTR_EQ(test, bo->destroy, &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, bo->pin_count, 0);
+ KUNIT_EXPECT_NULL(test, bo->bulk_move);
+ KUNIT_EXPECT_NOT_NULL(test, bo->ttm);
+ KUNIT_EXPECT_FALSE(test, ttm_tt_is_populated(bo->ttm));
+ KUNIT_EXPECT_NOT_NULL(test, (void *)bo->base.resv->fences);
+ KUNIT_EXPECT_EQ(test, ctx.bytes_moved, size);
+
+ if (bo_type != ttm_bo_type_kernel)
+ KUNIT_EXPECT_TRUE(test,
+ drm_mm_node_allocated(&bo->base.vma_node.vm_node));
+
+ ttm_resource_free(bo, &bo->resource);
+ ttm_bo_fini(bo);
+}
+
+static void ttm_bo_init_reserved_mock_man(struct kunit *test)
+{
+ const struct ttm_bo_validate_test_case *params = test->param_value;
+ enum ttm_bo_type bo_type = params->bo_type;
+ struct ttm_test_devices *priv = test->priv;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_operation_ctx ctx = { };
+ struct ttm_placement *placement;
+ u32 mem_type = TTM_PL_VRAM;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ int err;
+
+ ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
+
+ bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ drm_gem_private_object_init(priv->drm, &bo->base, size);
+
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo, bo_type, placement,
+ PAGE_SIZE, &ctx, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, kref_read(&bo->kref), 1);
+ KUNIT_EXPECT_PTR_EQ(test, bo->bdev, priv->ttm_dev);
+ KUNIT_EXPECT_EQ(test, bo->type, bo_type);
+ KUNIT_EXPECT_EQ(test, ctx.bytes_moved, size);
+
+ if (bo_type != ttm_bo_type_kernel)
+ KUNIT_EXPECT_TRUE(test,
+ drm_mm_node_allocated(&bo->base.vma_node.vm_node));
+
+ ttm_resource_free(bo, &bo->resource);
+ ttm_bo_fini(bo);
+ ttm_mock_manager_fini(priv->ttm_dev, mem_type);
+}
+
+static void ttm_bo_init_reserved_resv(struct kunit *test)
+{
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_test_devices *priv = test->priv;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_operation_ctx ctx = { };
+ struct ttm_placement *placement;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ struct dma_resv resv;
+ int err;
+
+ bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+
+ place = ttm_place_kunit_init(test, TTM_PL_SYSTEM, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ drm_gem_private_object_init(priv->drm, &bo->base, size);
+ dma_resv_init(&resv);
+ dma_resv_lock(&resv, NULL);
+
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo, bo_type, placement,
+ PAGE_SIZE, &ctx, NULL, &resv,
+ &dummy_ttm_bo_destroy);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_PTR_EQ(test, bo->base.resv, &resv);
+
+ ttm_resource_free(bo, &bo->resource);
+ ttm_bo_fini(bo);
+}
+
+static void ttm_bo_validate_basic(struct kunit *test)
+{
+ const struct ttm_bo_validate_test_case *params = test->param_value;
+ u32 fst_mem = TTM_PL_SYSTEM, snd_mem = TTM_PL_VRAM;
+ struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
+ struct ttm_placement *fst_placement, *snd_placement;
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_place *fst_place, *snd_place;
+ u32 size = ALIGN(SZ_8K, PAGE_SIZE);
+ struct ttm_buffer_object *bo;
+ int err;
+
+ ttm_mock_manager_init(priv->ttm_dev, snd_mem, MANAGER_SIZE);
+
+ fst_place = ttm_place_kunit_init(test, fst_mem, 0);
+ fst_placement = ttm_placement_kunit_init(test, fst_place, 1);
+
+ bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+
+ drm_gem_private_object_init(priv->drm, &bo->base, size);
+
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo, params->bo_type,
+ fst_placement, PAGE_SIZE, &ctx_init, NULL,
+ NULL, &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ snd_place = ttm_place_kunit_init(test, snd_mem, DRM_BUDDY_TOPDOWN_ALLOCATION);
+ snd_placement = ttm_placement_kunit_init(test, snd_place, 1);
+
+ err = ttm_bo_validate(bo, snd_placement, &ctx_val);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, bo->base.size);
+ KUNIT_EXPECT_NOT_NULL(test, bo->ttm);
+ KUNIT_EXPECT_TRUE(test, ttm_tt_is_populated(bo->ttm));
+ KUNIT_EXPECT_EQ(test, bo->resource->mem_type, snd_mem);
+ KUNIT_EXPECT_EQ(test, bo->resource->placement,
+ DRM_BUDDY_TOPDOWN_ALLOCATION);
+
+ ttm_bo_fini(bo);
+ ttm_mock_manager_fini(priv->ttm_dev, snd_mem);
+}
+
+static void ttm_bo_validate_invalid_placement(struct kunit *test)
+{
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ u32 unknown_mem_type = TTM_PL_PRIV + 1;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_operation_ctx ctx = { };
+ struct ttm_placement *placement;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ int err;
+
+ place = ttm_place_kunit_init(test, unknown_mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
+ bo->type = bo_type;
+
+ ttm_bo_reserve(bo, false, false, NULL);
+ err = ttm_bo_validate(bo, placement, &ctx);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_EXPECT_EQ(test, err, -ENOMEM);
+
+ ttm_bo_fini(bo);
+}
+
+static void ttm_bo_validate_failed_alloc(struct kunit *test)
+{
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_test_devices *priv = test->priv;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_operation_ctx ctx = { };
+ struct ttm_placement *placement;
+ u32 mem_type = TTM_PL_VRAM;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
+ bo->type = bo_type;
+
+ ttm_bad_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ ttm_bo_reserve(bo, false, false, NULL);
+ err = ttm_bo_validate(bo, placement, &ctx);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_EXPECT_EQ(test, err, -ENOMEM);
+
+ ttm_bo_fini(bo);
+ ttm_bad_manager_fini(priv->ttm_dev, mem_type);
+}
+
+static void ttm_bo_validate_pinned(struct kunit *test)
+{
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_operation_ctx ctx = { };
+ u32 mem_type = TTM_PL_SYSTEM;
+ struct ttm_placement *placement;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ int err;
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
+ bo->type = bo_type;
+
+ ttm_bo_reserve(bo, false, false, NULL);
+ ttm_bo_pin(bo);
+ err = ttm_bo_validate(bo, placement, &ctx);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_EXPECT_EQ(test, err, -EINVAL);
+
+ ttm_bo_reserve(bo, false, false, NULL);
+ ttm_bo_unpin(bo);
+ dma_resv_unlock(bo->base.resv);
+
+ ttm_bo_fini(bo);
+}
+
+static const struct ttm_bo_validate_test_case ttm_mem_type_cases[] = {
+ {
+ .description = "System manager",
+ .mem_type = TTM_PL_SYSTEM,
+ },
+ {
+ .description = "VRAM manager",
+ .mem_type = TTM_PL_VRAM,
+ },
+};
+
+KUNIT_ARRAY_PARAM(ttm_bo_validate_mem, ttm_mem_type_cases,
+ ttm_bo_validate_case_desc);
+
+static void ttm_bo_validate_same_placement(struct kunit *test)
+{
+ const struct ttm_bo_validate_test_case *params = test->param_value;
+ struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
+ struct ttm_test_devices *priv = test->priv;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_placement *placement;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ int err;
+
+ place = ttm_place_kunit_init(test, params->mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ if (params->mem_type != TTM_PL_SYSTEM)
+ ttm_mock_manager_init(priv->ttm_dev, params->mem_type, MANAGER_SIZE);
+
+ bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+
+ drm_gem_private_object_init(priv->drm, &bo->base, size);
+
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo, params->bo_type,
+ placement, PAGE_SIZE, &ctx_init, NULL,
+ NULL, &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ err = ttm_bo_validate(bo, placement, &ctx_val);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, 0);
+
+ ttm_bo_fini(bo);
+
+ if (params->mem_type != TTM_PL_SYSTEM)
+ ttm_mock_manager_fini(priv->ttm_dev, params->mem_type);
+}
+
+static void ttm_bo_validate_busy_placement(struct kunit *test)
+{
+ u32 fst_mem = TTM_PL_VRAM, snd_mem = TTM_PL_VRAM + 1;
+ struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
+ struct ttm_placement *placement_init, *placement_val;
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_test_devices *priv = test->priv;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_place *init_place, places[2];
+ struct ttm_resource_manager *man;
+ struct ttm_buffer_object *bo;
+ int err;
+
+ ttm_bad_manager_init(priv->ttm_dev, fst_mem, MANAGER_SIZE);
+ ttm_mock_manager_init(priv->ttm_dev, snd_mem, MANAGER_SIZE);
+
+ init_place = ttm_place_kunit_init(test, TTM_PL_SYSTEM, 0);
+ placement_init = ttm_placement_kunit_init(test, init_place, 1);
+
+ bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+
+ drm_gem_private_object_init(priv->drm, &bo->base, size);
+
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo, bo_type, placement_init,
+ PAGE_SIZE, &ctx_init, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ places[0] = (struct ttm_place){ .mem_type = fst_mem, .flags = TTM_PL_FLAG_DESIRED };
+ places[1] = (struct ttm_place){ .mem_type = snd_mem, .flags = TTM_PL_FLAG_FALLBACK };
+ placement_val = ttm_placement_kunit_init(test, places, 2);
+
+ err = ttm_bo_validate(bo, placement_val, &ctx_val);
+ dma_resv_unlock(bo->base.resv);
+
+ man = ttm_manager_type(priv->ttm_dev, snd_mem);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, bo->base.size);
+ KUNIT_EXPECT_EQ(test, bo->resource->mem_type, snd_mem);
+ KUNIT_ASSERT_TRUE(test, list_is_singular(&man->lru[bo->priority]));
+
+ ttm_bo_fini(bo);
+ ttm_bad_manager_fini(priv->ttm_dev, fst_mem);
+ ttm_mock_manager_fini(priv->ttm_dev, snd_mem);
+}
+
+static void ttm_bo_validate_multihop(struct kunit *test)
+{
+ const struct ttm_bo_validate_test_case *params = test->param_value;
+ struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
+ struct ttm_placement *placement_init, *placement_val;
+ u32 fst_mem = TTM_PL_VRAM, tmp_mem = TTM_PL_TT, final_mem = TTM_PL_SYSTEM;
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_place *fst_place, *final_place;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_buffer_object *bo;
+ int err;
+
+ ttm_mock_manager_init(priv->ttm_dev, fst_mem, MANAGER_SIZE);
+ ttm_mock_manager_init(priv->ttm_dev, tmp_mem, MANAGER_SIZE);
+
+ fst_place = ttm_place_kunit_init(test, fst_mem, 0);
+ placement_init = ttm_placement_kunit_init(test, fst_place, 1);
+
+ bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+
+ drm_gem_private_object_init(priv->drm, &bo->base, size);
+
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo, params->bo_type,
+ placement_init, PAGE_SIZE, &ctx_init, NULL,
+ NULL, &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ final_place = ttm_place_kunit_init(test, final_mem, 0);
+ placement_val = ttm_placement_kunit_init(test, final_place, 1);
+
+ err = ttm_bo_validate(bo, placement_val, &ctx_val);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, size * 2);
+ KUNIT_EXPECT_EQ(test, bo->resource->mem_type, final_mem);
+
+ ttm_bo_fini(bo);
+
+ ttm_mock_manager_fini(priv->ttm_dev, fst_mem);
+ ttm_mock_manager_fini(priv->ttm_dev, tmp_mem);
+}
+
+static const struct ttm_bo_validate_test_case ttm_bo_no_placement_cases[] = {
+ {
+ .description = "Buffer object in system domain, no page vector",
+ },
+ {
+ .description = "Buffer object in system domain with an existing page vector",
+ .with_ttm = true,
+ },
+};
+
+KUNIT_ARRAY_PARAM(ttm_bo_no_placement, ttm_bo_no_placement_cases,
+ ttm_bo_validate_case_desc);
+
+static void ttm_bo_validate_no_placement_signaled(struct kunit *test)
+{
+ const struct ttm_bo_validate_test_case *params = test->param_value;
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_test_devices *priv = test->priv;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_operation_ctx ctx = { };
+ u32 mem_type = TTM_PL_SYSTEM;
+ struct ttm_resource_manager *man;
+ struct ttm_placement *placement;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ struct ttm_tt *old_tt;
+ u32 flags;
+ int err;
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+ man = ttm_manager_type(priv->ttm_dev, mem_type);
+
+ bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
+ bo->type = bo_type;
+
+ if (params->with_ttm) {
+ old_tt = priv->ttm_dev->funcs->ttm_tt_create(bo, 0);
+ ttm_pool_alloc(&priv->ttm_dev->pool, old_tt, &ctx);
+ bo->ttm = old_tt;
+ }
+
+ placement = kunit_kzalloc(test, sizeof(*placement), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, placement);
+
+ ttm_bo_reserve(bo, false, false, NULL);
+
+ err = ttm_resource_alloc(bo, place, &bo->resource, NULL);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_ASSERT_EQ(test, man->usage, size);
+
+ err = ttm_bo_validate(bo, placement, &ctx);
+ ttm_bo_unreserve(bo);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_ASSERT_EQ(test, man->usage, 0);
+ KUNIT_ASSERT_NOT_NULL(test, bo->ttm);
+ KUNIT_EXPECT_EQ(test, ctx.bytes_moved, 0);
+
+ if (params->with_ttm) {
+ flags = bo->ttm->page_flags;
+
+ KUNIT_ASSERT_PTR_EQ(test, bo->ttm, old_tt);
+ KUNIT_ASSERT_FALSE(test, flags & TTM_TT_FLAG_PRIV_POPULATED);
+ KUNIT_ASSERT_TRUE(test, flags & TTM_TT_FLAG_ZERO_ALLOC);
+ }
+
+ ttm_bo_fini(bo);
+}
+
+static int threaded_dma_resv_signal(void *arg)
+{
+ struct ttm_buffer_object *bo = arg;
+ struct dma_resv *resv = bo->base.resv;
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
+
+ dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
+ dma_fence_signal(fence);
+ }
+ dma_resv_iter_end(&cursor);
+
+ return 0;
+}
+
+static void ttm_bo_validate_no_placement_not_signaled(struct kunit *test)
+{
+ const struct ttm_bo_validate_test_case *params = test->param_value;
+ enum dma_resv_usage usage = DMA_RESV_USAGE_BOOKKEEP;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_operation_ctx ctx = { };
+ u32 mem_type = TTM_PL_SYSTEM;
+ struct ttm_placement *placement;
+ struct ttm_buffer_object *bo;
+ struct task_struct *task;
+ struct ttm_place *place;
+ int err;
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+
+ bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
+ bo->type = params->bo_type;
+
+ err = ttm_resource_alloc(bo, place, &bo->resource, NULL);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ placement = kunit_kzalloc(test, sizeof(*placement), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, placement);
+
+ /* Create an active fence to simulate a non-idle resv object */
+ spin_lock_init(&fence_lock);
+ dma_resv_kunit_active_fence_init(test, bo->base.resv, usage);
+
+ task = kthread_create(threaded_dma_resv_signal, bo, "dma-resv-signal");
+ if (IS_ERR(task))
+ KUNIT_FAIL(test, "Couldn't create dma resv signal task\n");
+
+ wake_up_process(task);
+ ttm_bo_reserve(bo, false, false, NULL);
+ err = ttm_bo_validate(bo, placement, &ctx);
+ ttm_bo_unreserve(bo);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_ASSERT_NOT_NULL(test, bo->ttm);
+ KUNIT_ASSERT_NULL(test, bo->resource);
+ KUNIT_ASSERT_NULL(test, bo->bulk_move);
+ KUNIT_EXPECT_EQ(test, ctx.bytes_moved, 0);
+
+ if (bo->type != ttm_bo_type_sg)
+ KUNIT_ASSERT_PTR_EQ(test, bo->base.resv, &bo->base._resv);
+
+ /* Make sure we have an idle object at this point */
+ dma_resv_wait_timeout(bo->base.resv, usage, false, MAX_SCHEDULE_TIMEOUT);
+
+ ttm_bo_fini(bo);
+}
+
+static void ttm_bo_validate_move_fence_signaled(struct kunit *test)
+{
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_test_devices *priv = test->priv;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_operation_ctx ctx = { };
+ u32 mem_type = TTM_PL_SYSTEM;
+ struct ttm_resource_manager *man;
+ struct ttm_placement *placement;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ int err;
+
+ man = ttm_manager_type(priv->ttm_dev, mem_type);
+ man->eviction_fences[0] = dma_fence_get_stub();
+
+ bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
+ bo->type = bo_type;
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ ttm_bo_reserve(bo, false, false, NULL);
+ err = ttm_bo_validate(bo, placement, &ctx);
+ ttm_bo_unreserve(bo);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, bo->resource->mem_type, mem_type);
+ KUNIT_EXPECT_EQ(test, ctx.bytes_moved, size);
+
+ ttm_bo_fini(bo);
+ dma_fence_put(man->eviction_fences[0]);
+}
+
+static const struct ttm_bo_validate_test_case ttm_bo_validate_wait_cases[] = {
+ {
+ .description = "Waits for GPU",
+ .no_gpu_wait = false,
+ },
+ {
+ .description = "Tries to lock straight away",
+ .no_gpu_wait = true,
+ },
+};
+
+KUNIT_ARRAY_PARAM(ttm_bo_validate_wait, ttm_bo_validate_wait_cases,
+ ttm_bo_validate_case_desc);
+
+static int threaded_fence_signal(void *arg)
+{
+ struct dma_fence *fence = arg;
+
+ msleep(20);
+
+ return dma_fence_signal(fence);
+}
+
+static void ttm_bo_validate_move_fence_not_signaled(struct kunit *test)
+{
+ const struct ttm_bo_validate_test_case *params = test->param_value;
+ struct ttm_operation_ctx ctx_init = { },
+ ctx_val = { .no_wait_gpu = params->no_gpu_wait };
+ u32 fst_mem = TTM_PL_VRAM, snd_mem = TTM_PL_VRAM + 1;
+ struct ttm_placement *placement_init, *placement_val;
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_test_devices *priv = test->priv;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_place *init_place, places[2];
+ struct ttm_resource_manager *man;
+ struct ttm_buffer_object *bo;
+ struct task_struct *task;
+ int err;
+
+ init_place = ttm_place_kunit_init(test, TTM_PL_SYSTEM, 0);
+ placement_init = ttm_placement_kunit_init(test, init_place, 1);
+
+ bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+
+ drm_gem_private_object_init(priv->drm, &bo->base, size);
+
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo, bo_type, placement_init,
+ PAGE_SIZE, &ctx_init, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ ttm_mock_manager_init(priv->ttm_dev, fst_mem, MANAGER_SIZE);
+ ttm_mock_manager_init(priv->ttm_dev, snd_mem, MANAGER_SIZE);
+
+ places[0] = (struct ttm_place){ .mem_type = fst_mem, .flags = TTM_PL_FLAG_DESIRED };
+ places[1] = (struct ttm_place){ .mem_type = snd_mem, .flags = TTM_PL_FLAG_FALLBACK };
+ placement_val = ttm_placement_kunit_init(test, places, 2);
+
+ spin_lock_init(&fence_lock);
+ man = ttm_manager_type(priv->ttm_dev, fst_mem);
+ man->eviction_fences[0] = alloc_mock_fence(test);
+
+ task = kthread_create(threaded_fence_signal, man->eviction_fences[0], "move-fence-signal");
+ if (IS_ERR(task))
+ KUNIT_FAIL(test, "Couldn't create move fence signal task\n");
+
+ wake_up_process(task);
+ err = ttm_bo_validate(bo, placement_val, &ctx_val);
+ dma_resv_unlock(bo->base.resv);
+
+ dma_fence_wait_timeout(man->eviction_fences[0], false, MAX_SCHEDULE_TIMEOUT);
+ man->eviction_fences[0] = NULL;
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, size);
+
+ if (params->no_gpu_wait)
+ KUNIT_EXPECT_EQ(test, bo->resource->mem_type, snd_mem);
+ else
+ KUNIT_EXPECT_EQ(test, bo->resource->mem_type, fst_mem);
+
+ ttm_bo_fini(bo);
+ ttm_mock_manager_fini(priv->ttm_dev, fst_mem);
+ ttm_mock_manager_fini(priv->ttm_dev, snd_mem);
+}
+
+static void ttm_bo_validate_happy_evict(struct kunit *test)
+{
+ u32 mem_type = TTM_PL_VRAM, mem_multihop = TTM_PL_TT,
+ mem_type_evict = TTM_PL_SYSTEM;
+ struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ u32 small = SZ_8K, medium = SZ_512K,
+ big = MANAGER_SIZE - (small + medium);
+ u32 bo_sizes[] = { small, medium, big };
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_buffer_object *bos, *bo_val;
+ struct ttm_placement *placement;
+ struct ttm_place *place;
+ u32 bo_no = 3;
+ int i, err;
+
+ ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
+ ttm_mock_manager_init(priv->ttm_dev, mem_multihop, MANAGER_SIZE);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ bos = kunit_kmalloc_array(test, bo_no, sizeof(*bos), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bos);
+
+ memset(bos, 0, sizeof(*bos) * bo_no);
+ for (i = 0; i < bo_no; i++) {
+ drm_gem_private_object_init(priv->drm, &bos[i].base, bo_sizes[i]);
+ err = ttm_bo_init_reserved(priv->ttm_dev, &bos[i], bo_type, placement,
+ PAGE_SIZE, &ctx_init, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ dma_resv_unlock(bos[i].base.resv);
+ }
+
+ bo_val = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+ bo_val->type = bo_type;
+
+ ttm_bo_reserve(bo_val, false, false, NULL);
+ err = ttm_bo_validate(bo_val, placement, &ctx_val);
+ ttm_bo_unreserve(bo_val);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, bos[0].resource->mem_type, mem_type_evict);
+ KUNIT_EXPECT_TRUE(test, bos[0].ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC);
+ KUNIT_EXPECT_TRUE(test, bos[0].ttm->page_flags & TTM_TT_FLAG_PRIV_POPULATED);
+ KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, small * 2 + BO_SIZE);
+ KUNIT_EXPECT_EQ(test, bos[1].resource->mem_type, mem_type);
+
+ for (i = 0; i < bo_no; i++)
+ ttm_bo_fini(&bos[i]);
+ ttm_bo_fini(bo_val);
+
+ ttm_mock_manager_fini(priv->ttm_dev, mem_type);
+ ttm_mock_manager_fini(priv->ttm_dev, mem_multihop);
+}
+
+static void ttm_bo_validate_all_pinned_evict(struct kunit *test)
+{
+ struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_buffer_object *bo_big, *bo_small;
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_placement *placement;
+ u32 mem_type = TTM_PL_VRAM, mem_multihop = TTM_PL_TT;
+ struct ttm_place *place;
+ int err;
+
+ ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
+ ttm_mock_manager_init(priv->ttm_dev, mem_multihop, MANAGER_SIZE);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ bo_big = kunit_kzalloc(test, sizeof(*bo_big), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo_big);
+
+ drm_gem_private_object_init(priv->drm, &bo_big->base, MANAGER_SIZE);
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo_big, bo_type, placement,
+ PAGE_SIZE, &ctx_init, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ ttm_bo_pin(bo_big);
+ dma_resv_unlock(bo_big->base.resv);
+
+ bo_small = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+ bo_small->type = bo_type;
+
+ ttm_bo_reserve(bo_small, false, false, NULL);
+ err = ttm_bo_validate(bo_small, placement, &ctx_val);
+ ttm_bo_unreserve(bo_small);
+
+ KUNIT_EXPECT_EQ(test, err, -ENOMEM);
+
+ ttm_bo_fini(bo_small);
+
+ ttm_bo_reserve(bo_big, false, false, NULL);
+ ttm_bo_unpin(bo_big);
+ dma_resv_unlock(bo_big->base.resv);
+ ttm_bo_fini(bo_big);
+
+ ttm_mock_manager_fini(priv->ttm_dev, mem_type);
+ ttm_mock_manager_fini(priv->ttm_dev, mem_multihop);
+}
+
+static void ttm_bo_validate_allowed_only_evict(struct kunit *test)
+{
+ u32 mem_type = TTM_PL_VRAM, mem_multihop = TTM_PL_TT,
+ mem_type_evict = TTM_PL_SYSTEM;
+ struct ttm_buffer_object *bo, *bo_evictable, *bo_pinned;
+ struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_placement *placement;
+ struct ttm_place *place;
+ u32 size = SZ_512K;
+ int err;
+
+ ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
+ ttm_mock_manager_init(priv->ttm_dev, mem_multihop, MANAGER_SIZE);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ bo_pinned = kunit_kzalloc(test, sizeof(*bo_pinned), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo_pinned);
+
+ drm_gem_private_object_init(priv->drm, &bo_pinned->base, size);
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo_pinned, bo_type, placement,
+ PAGE_SIZE, &ctx_init, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ ttm_bo_pin(bo_pinned);
+ dma_resv_unlock(bo_pinned->base.resv);
+
+ bo_evictable = kunit_kzalloc(test, sizeof(*bo_evictable), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo_evictable);
+
+ drm_gem_private_object_init(priv->drm, &bo_evictable->base, size);
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo_evictable, bo_type, placement,
+ PAGE_SIZE, &ctx_init, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ dma_resv_unlock(bo_evictable->base.resv);
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+ bo->type = bo_type;
+
+ ttm_bo_reserve(bo, false, false, NULL);
+ err = ttm_bo_validate(bo, placement, &ctx_val);
+ ttm_bo_unreserve(bo);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, bo->resource->mem_type, mem_type);
+ KUNIT_EXPECT_EQ(test, bo_pinned->resource->mem_type, mem_type);
+ KUNIT_EXPECT_EQ(test, bo_evictable->resource->mem_type, mem_type_evict);
+ KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, size * 2 + BO_SIZE);
+
+ ttm_bo_fini(bo);
+ ttm_bo_fini(bo_evictable);
+
+ ttm_bo_reserve(bo_pinned, false, false, NULL);
+ ttm_bo_unpin(bo_pinned);
+ dma_resv_unlock(bo_pinned->base.resv);
+ ttm_bo_fini(bo_pinned);
+
+ ttm_mock_manager_fini(priv->ttm_dev, mem_type);
+ ttm_mock_manager_fini(priv->ttm_dev, mem_multihop);
+}
+
+static void ttm_bo_validate_deleted_evict(struct kunit *test)
+{
+ struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
+ u32 small = SZ_8K, big = MANAGER_SIZE - BO_SIZE;
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_buffer_object *bo_big, *bo_small;
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_resource_manager *man;
+ u32 mem_type = TTM_PL_VRAM;
+ struct ttm_placement *placement;
+ struct ttm_place *place;
+ int err;
+
+ ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
+ man = ttm_manager_type(priv->ttm_dev, mem_type);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ bo_big = kunit_kzalloc(test, sizeof(*bo_big), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo_big);
+
+ drm_gem_private_object_init(priv->drm, &bo_big->base, big);
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo_big, bo_type, placement,
+ PAGE_SIZE, &ctx_init, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, ttm_resource_manager_usage(man), big);
+
+ dma_resv_unlock(bo_big->base.resv);
+ bo_big->deleted = true;
+
+ bo_small = ttm_bo_kunit_init(test, test->priv, small, NULL);
+ bo_small->type = bo_type;
+
+ ttm_bo_reserve(bo_small, false, false, NULL);
+ err = ttm_bo_validate(bo_small, placement, &ctx_val);
+ ttm_bo_unreserve(bo_small);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, bo_small->resource->mem_type, mem_type);
+ KUNIT_EXPECT_EQ(test, ttm_resource_manager_usage(man), small);
+ KUNIT_EXPECT_NULL(test, bo_big->ttm);
+ KUNIT_EXPECT_NULL(test, bo_big->resource);
+
+ ttm_bo_fini(bo_small);
+ ttm_bo_fini(bo_big);
+ ttm_mock_manager_fini(priv->ttm_dev, mem_type);
+}
+
+static void ttm_bo_validate_busy_domain_evict(struct kunit *test)
+{
+ u32 mem_type = TTM_PL_VRAM, mem_type_evict = TTM_PL_MOCK1;
+ struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_buffer_object *bo_init, *bo_val;
+ struct ttm_placement *placement;
+ struct ttm_place *place;
+ int err;
+
+ /*
+ * Drop the default device and setup a new one that points to busy
+ * thus unsuitable eviction domain
+ */
+ ttm_device_fini(priv->ttm_dev);
+
+ err = ttm_device_kunit_init_bad_evict(test->priv, priv->ttm_dev);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
+ ttm_busy_manager_init(priv->ttm_dev, mem_type_evict, MANAGER_SIZE);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ bo_init = kunit_kzalloc(test, sizeof(*bo_init), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo_init);
+
+ drm_gem_private_object_init(priv->drm, &bo_init->base, MANAGER_SIZE);
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo_init, bo_type, placement,
+ PAGE_SIZE, &ctx_init, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ dma_resv_unlock(bo_init->base.resv);
+
+ bo_val = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+ bo_val->type = bo_type;
+
+ ttm_bo_reserve(bo_val, false, false, NULL);
+ err = ttm_bo_validate(bo_val, placement, &ctx_val);
+ ttm_bo_unreserve(bo_val);
+
+ KUNIT_EXPECT_EQ(test, err, -ENOMEM);
+ KUNIT_EXPECT_EQ(test, bo_init->resource->mem_type, mem_type);
+ KUNIT_EXPECT_NULL(test, bo_val->resource);
+
+ ttm_bo_fini(bo_init);
+ ttm_bo_fini(bo_val);
+
+ ttm_mock_manager_fini(priv->ttm_dev, mem_type);
+ ttm_bad_manager_fini(priv->ttm_dev, mem_type_evict);
+}
+
+static void ttm_bo_validate_evict_gutting(struct kunit *test)
+{
+ struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_buffer_object *bo, *bo_evict;
+ u32 mem_type = TTM_PL_MOCK1;
+ struct ttm_placement *placement;
+ struct ttm_place *place;
+ int err;
+
+ ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ bo_evict = kunit_kzalloc(test, sizeof(*bo_evict), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo_evict);
+
+ drm_gem_private_object_init(priv->drm, &bo_evict->base, MANAGER_SIZE);
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo_evict, bo_type, placement,
+ PAGE_SIZE, &ctx_init, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ dma_resv_unlock(bo_evict->base.resv);
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+ bo->type = bo_type;
+
+ ttm_bo_reserve(bo, false, false, NULL);
+ err = ttm_bo_validate(bo, placement, &ctx_val);
+ ttm_bo_unreserve(bo);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, bo->resource->mem_type, mem_type);
+ KUNIT_ASSERT_NULL(test, bo_evict->resource);
+ KUNIT_ASSERT_TRUE(test, bo_evict->ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC);
+
+ ttm_bo_fini(bo_evict);
+ ttm_bo_fini(bo);
+
+ ttm_mock_manager_fini(priv->ttm_dev, mem_type);
+}
+
+static void ttm_bo_validate_recrusive_evict(struct kunit *test)
+{
+ u32 mem_type = TTM_PL_TT, mem_type_evict = TTM_PL_MOCK2;
+ struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
+ struct ttm_placement *placement_tt, *placement_mock;
+ struct ttm_buffer_object *bo_tt, *bo_mock, *bo_val;
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_place *place_tt, *place_mock;
+ int err;
+
+ ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
+ ttm_mock_manager_init(priv->ttm_dev, mem_type_evict, MANAGER_SIZE);
+
+ place_tt = ttm_place_kunit_init(test, mem_type, 0);
+ place_mock = ttm_place_kunit_init(test, mem_type_evict, 0);
+
+ placement_tt = ttm_placement_kunit_init(test, place_tt, 1);
+ placement_mock = ttm_placement_kunit_init(test, place_mock, 1);
+
+ bo_tt = kunit_kzalloc(test, sizeof(*bo_tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo_tt);
+
+ bo_mock = kunit_kzalloc(test, sizeof(*bo_mock), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo_mock);
+
+ drm_gem_private_object_init(priv->drm, &bo_tt->base, MANAGER_SIZE);
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo_tt, bo_type, placement_tt,
+ PAGE_SIZE, &ctx_init, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ dma_resv_unlock(bo_tt->base.resv);
+
+ drm_gem_private_object_init(priv->drm, &bo_mock->base, MANAGER_SIZE);
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo_mock, bo_type, placement_mock,
+ PAGE_SIZE, &ctx_init, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ dma_resv_unlock(bo_mock->base.resv);
+
+ bo_val = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+ bo_val->type = bo_type;
+
+ ttm_bo_reserve(bo_val, false, false, NULL);
+ err = ttm_bo_validate(bo_val, placement_tt, &ctx_val);
+ ttm_bo_unreserve(bo_val);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ ttm_mock_manager_fini(priv->ttm_dev, mem_type);
+ ttm_mock_manager_fini(priv->ttm_dev, mem_type_evict);
+
+ ttm_bo_fini(bo_val);
+ ttm_bo_fini(bo_tt);
+ ttm_bo_fini(bo_mock);
+}
+
+static struct kunit_case ttm_bo_validate_test_cases[] = {
+ KUNIT_CASE_PARAM(ttm_bo_init_reserved_sys_man, ttm_bo_types_gen_params),
+ KUNIT_CASE_PARAM(ttm_bo_init_reserved_mock_man, ttm_bo_types_gen_params),
+ KUNIT_CASE(ttm_bo_init_reserved_resv),
+ KUNIT_CASE_PARAM(ttm_bo_validate_basic, ttm_bo_types_gen_params),
+ KUNIT_CASE(ttm_bo_validate_invalid_placement),
+ KUNIT_CASE_PARAM(ttm_bo_validate_same_placement,
+ ttm_bo_validate_mem_gen_params),
+ KUNIT_CASE(ttm_bo_validate_failed_alloc),
+ KUNIT_CASE(ttm_bo_validate_pinned),
+ KUNIT_CASE(ttm_bo_validate_busy_placement),
+ KUNIT_CASE_PARAM(ttm_bo_validate_multihop, ttm_bo_types_gen_params),
+ KUNIT_CASE_PARAM(ttm_bo_validate_no_placement_signaled,
+ ttm_bo_no_placement_gen_params),
+ KUNIT_CASE_PARAM(ttm_bo_validate_no_placement_not_signaled,
+ ttm_bo_types_gen_params),
+ KUNIT_CASE(ttm_bo_validate_move_fence_signaled),
+ KUNIT_CASE_PARAM(ttm_bo_validate_move_fence_not_signaled,
+ ttm_bo_validate_wait_gen_params),
+ KUNIT_CASE(ttm_bo_validate_happy_evict),
+ KUNIT_CASE(ttm_bo_validate_all_pinned_evict),
+ KUNIT_CASE(ttm_bo_validate_allowed_only_evict),
+ KUNIT_CASE(ttm_bo_validate_deleted_evict),
+ KUNIT_CASE(ttm_bo_validate_busy_domain_evict),
+ KUNIT_CASE(ttm_bo_validate_evict_gutting),
+ KUNIT_CASE(ttm_bo_validate_recrusive_evict),
+ {}
+};
+
+static struct kunit_suite ttm_bo_validate_test_suite = {
+ .name = "ttm_bo_validate",
+ .init = ttm_test_devices_all_init,
+ .exit = ttm_test_devices_fini,
+ .test_cases = ttm_bo_validate_test_cases,
+};
+
+kunit_test_suites(&ttm_bo_validate_test_suite);
+
+MODULE_DESCRIPTION("KUnit tests for ttm_bo APIs");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/ttm/tests/ttm_device_test.c b/drivers/gpu/drm/ttm/tests/ttm_device_test.c
new file mode 100644
index 000000000000..2d55ad34fe48
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_device_test.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_device.h>
+#include <drm/ttm/ttm_placement.h>
+
+#include "ttm_kunit_helpers.h"
+#include "../ttm_pool_internal.h"
+
+struct ttm_device_test_case {
+ const char *description;
+ unsigned int alloc_flags;
+ bool pools_init_expected;
+};
+
+static void ttm_device_init_basic(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_device *ttm_dev;
+ struct ttm_resource_manager *ttm_sys_man;
+ int err;
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ KUNIT_EXPECT_PTR_EQ(test, ttm_dev->funcs, &ttm_dev_funcs);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev->wq);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev->man_drv[TTM_PL_SYSTEM]);
+
+ ttm_sys_man = &ttm_dev->sysman;
+ KUNIT_ASSERT_NOT_NULL(test, ttm_sys_man);
+ KUNIT_EXPECT_TRUE(test, ttm_sys_man->use_tt);
+ KUNIT_EXPECT_TRUE(test, ttm_sys_man->use_type);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_sys_man->func);
+
+ KUNIT_EXPECT_PTR_EQ(test, ttm_dev->dev_mapping,
+ priv->drm->anon_inode->i_mapping);
+
+ ttm_device_fini(ttm_dev);
+}
+
+static void ttm_device_init_multiple(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_device *ttm_devs;
+ unsigned int i, num_dev = 3;
+ int err;
+
+ ttm_devs = kunit_kcalloc(test, num_dev, sizeof(*ttm_devs), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_devs);
+
+ for (i = 0; i < num_dev; i++) {
+ err = ttm_device_kunit_init(priv, &ttm_devs[i], 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ KUNIT_EXPECT_PTR_EQ(test, ttm_devs[i].dev_mapping,
+ priv->drm->anon_inode->i_mapping);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_devs[i].wq);
+ KUNIT_EXPECT_PTR_EQ(test, ttm_devs[i].funcs, &ttm_dev_funcs);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_devs[i].man_drv[TTM_PL_SYSTEM]);
+ }
+
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&ttm_devs[0].device_list), num_dev);
+
+ for (i = 0; i < num_dev; i++)
+ ttm_device_fini(&ttm_devs[i]);
+}
+
+static void ttm_device_fini_basic(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_device *ttm_dev;
+ struct ttm_resource_manager *man;
+ int err;
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ man = ttm_manager_type(ttm_dev, TTM_PL_SYSTEM);
+ KUNIT_ASSERT_NOT_NULL(test, man);
+
+ ttm_device_fini(ttm_dev);
+
+ KUNIT_ASSERT_FALSE(test, man->use_type);
+ KUNIT_ASSERT_TRUE(test, list_empty(&man->lru[0]));
+ KUNIT_ASSERT_NULL(test, ttm_dev->man_drv[TTM_PL_SYSTEM]);
+}
+
+static void ttm_device_init_no_vma_man(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct drm_device *drm = priv->drm;
+ struct ttm_device *ttm_dev;
+ struct drm_vma_offset_manager *vma_man;
+ int err;
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ /* Let's pretend there's no VMA manager allocated */
+ vma_man = drm->vma_offset_manager;
+ drm->vma_offset_manager = NULL;
+
+ err = ttm_device_kunit_init(priv, ttm_dev, 0);
+ KUNIT_EXPECT_EQ(test, err, -EINVAL);
+
+ /* Bring the manager back for a graceful cleanup */
+ drm->vma_offset_manager = vma_man;
+}
+
+static const struct ttm_device_test_case ttm_device_cases[] = {
+ {
+ .description = "No DMA allocations, no DMA32 required",
+ .pools_init_expected = false,
+ },
+ {
+ .description = "DMA allocations, DMA32 required",
+ .alloc_flags = TTM_ALLOCATION_POOL_USE_DMA_ALLOC |
+ TTM_ALLOCATION_POOL_USE_DMA32,
+ .pools_init_expected = true,
+ },
+ {
+ .description = "No DMA allocations, DMA32 required",
+ .alloc_flags = TTM_ALLOCATION_POOL_USE_DMA32,
+ .pools_init_expected = false,
+ },
+ {
+ .description = "DMA allocations, no DMA32 required",
+ .alloc_flags = TTM_ALLOCATION_POOL_USE_DMA_ALLOC,
+ .pools_init_expected = true,
+ },
+};
+
+static void ttm_device_case_desc(const struct ttm_device_test_case *t, char *desc)
+{
+ strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(ttm_device, ttm_device_cases, ttm_device_case_desc);
+
+static void ttm_device_init_pools(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ const struct ttm_device_test_case *params = test->param_value;
+ struct ttm_device *ttm_dev;
+ struct ttm_pool *pool;
+ struct ttm_pool_type pt;
+ int err;
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, params->alloc_flags);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ pool = &ttm_dev->pool;
+ KUNIT_ASSERT_NOT_NULL(test, pool);
+ KUNIT_EXPECT_PTR_EQ(test, pool->dev, priv->dev);
+ KUNIT_EXPECT_EQ(test, pool->alloc_flags, params->alloc_flags);
+
+ if (params->pools_init_expected) {
+ for (int i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
+ for (int j = 0; j < NR_PAGE_ORDERS; ++j) {
+ pt = pool->caching[i].orders[j];
+ KUNIT_EXPECT_PTR_EQ(test, pt.pool, pool);
+ KUNIT_EXPECT_EQ(test, pt.caching, i);
+ KUNIT_EXPECT_EQ(test, pt.order, j);
+
+ if (ttm_pool_uses_dma_alloc(pool))
+ KUNIT_ASSERT_FALSE(test,
+ list_empty(&pt.pages));
+ }
+ }
+ }
+
+ ttm_device_fini(ttm_dev);
+}
+
+static struct kunit_case ttm_device_test_cases[] = {
+ KUNIT_CASE(ttm_device_init_basic),
+ KUNIT_CASE(ttm_device_init_multiple),
+ KUNIT_CASE(ttm_device_fini_basic),
+ KUNIT_CASE(ttm_device_init_no_vma_man),
+ KUNIT_CASE_PARAM(ttm_device_init_pools, ttm_device_gen_params),
+ {}
+};
+
+static struct kunit_suite ttm_device_test_suite = {
+ .name = "ttm_device",
+ .init = ttm_test_devices_init,
+ .exit = ttm_test_devices_fini,
+ .test_cases = ttm_device_test_cases,
+};
+
+kunit_test_suites(&ttm_device_test_suite);
+
+MODULE_DESCRIPTION("KUnit tests for ttm_device APIs");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
new file mode 100644
index 000000000000..7b533e4e1e04
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <linux/export.h>
+
+#include <drm/ttm/ttm_tt.h>
+
+#include "ttm_kunit_helpers.h"
+
+static const struct ttm_place sys_place = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .mem_type = TTM_PL_SYSTEM,
+ .flags = TTM_PL_FLAG_FALLBACK,
+};
+
+static const struct ttm_place mock1_place = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .mem_type = TTM_PL_MOCK1,
+ .flags = TTM_PL_FLAG_FALLBACK,
+};
+
+static const struct ttm_place mock2_place = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .mem_type = TTM_PL_MOCK2,
+ .flags = TTM_PL_FLAG_FALLBACK,
+};
+
+static struct ttm_placement sys_placement = {
+ .num_placement = 1,
+ .placement = &sys_place,
+};
+
+static struct ttm_placement bad_placement = {
+ .num_placement = 1,
+ .placement = &mock1_place,
+};
+
+static struct ttm_placement mock_placement = {
+ .num_placement = 1,
+ .placement = &mock2_place,
+};
+
+static struct ttm_tt *ttm_tt_simple_create(struct ttm_buffer_object *bo, u32 page_flags)
+{
+ struct ttm_tt *tt;
+
+ tt = kzalloc(sizeof(*tt), GFP_KERNEL);
+ ttm_tt_init(tt, bo, page_flags, ttm_cached, 0);
+
+ return tt;
+}
+
+static void ttm_tt_simple_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
+{
+ kfree(ttm);
+}
+
+static int mock_move(struct ttm_buffer_object *bo, bool evict,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_resource *new_mem,
+ struct ttm_place *hop)
+{
+ struct ttm_resource *old_mem = bo->resource;
+
+ if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm)) {
+ ttm_bo_move_null(bo, new_mem);
+ return 0;
+ }
+
+ if (bo->resource->mem_type == TTM_PL_VRAM &&
+ new_mem->mem_type == TTM_PL_SYSTEM) {
+ hop->mem_type = TTM_PL_TT;
+ hop->flags = TTM_PL_FLAG_TEMPORARY;
+ hop->fpfn = 0;
+ hop->lpfn = 0;
+ return -EMULTIHOP;
+ }
+
+ if ((old_mem->mem_type == TTM_PL_SYSTEM &&
+ new_mem->mem_type == TTM_PL_TT) ||
+ (old_mem->mem_type == TTM_PL_TT &&
+ new_mem->mem_type == TTM_PL_SYSTEM)) {
+ ttm_bo_move_null(bo, new_mem);
+ return 0;
+ }
+
+ return ttm_bo_move_memcpy(bo, ctx, new_mem);
+}
+
+static void mock_evict_flags(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement)
+{
+ switch (bo->resource->mem_type) {
+ case TTM_PL_VRAM:
+ case TTM_PL_SYSTEM:
+ *placement = sys_placement;
+ break;
+ case TTM_PL_TT:
+ *placement = mock_placement;
+ break;
+ case TTM_PL_MOCK1:
+ /* Purge objects coming from this domain */
+ break;
+ }
+}
+
+static void bad_evict_flags(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement)
+{
+ *placement = bad_placement;
+}
+
+static int ttm_device_kunit_init_with_funcs(struct ttm_test_devices *priv,
+ struct ttm_device *ttm,
+ unsigned int alloc_flags,
+ struct ttm_device_funcs *funcs)
+{
+ struct drm_device *drm = priv->drm;
+ int err;
+
+ err = ttm_device_init(ttm, funcs, drm->dev,
+ drm->anon_inode->i_mapping,
+ drm->vma_offset_manager,
+ alloc_flags);
+
+ return err;
+}
+
+struct ttm_device_funcs ttm_dev_funcs = {
+ .ttm_tt_create = ttm_tt_simple_create,
+ .ttm_tt_destroy = ttm_tt_simple_destroy,
+ .move = mock_move,
+ .eviction_valuable = ttm_bo_eviction_valuable,
+ .evict_flags = mock_evict_flags,
+};
+EXPORT_SYMBOL_GPL(ttm_dev_funcs);
+
+int ttm_device_kunit_init(struct ttm_test_devices *priv,
+ struct ttm_device *ttm,
+ unsigned int alloc_flags)
+{
+ return ttm_device_kunit_init_with_funcs(priv, ttm, alloc_flags,
+ &ttm_dev_funcs);
+}
+EXPORT_SYMBOL_GPL(ttm_device_kunit_init);
+
+struct ttm_device_funcs ttm_dev_funcs_bad_evict = {
+ .ttm_tt_create = ttm_tt_simple_create,
+ .ttm_tt_destroy = ttm_tt_simple_destroy,
+ .move = mock_move,
+ .eviction_valuable = ttm_bo_eviction_valuable,
+ .evict_flags = bad_evict_flags,
+};
+EXPORT_SYMBOL_GPL(ttm_dev_funcs_bad_evict);
+
+int ttm_device_kunit_init_bad_evict(struct ttm_test_devices *priv,
+ struct ttm_device *ttm)
+{
+ return ttm_device_kunit_init_with_funcs(priv, ttm, 0,
+ &ttm_dev_funcs_bad_evict);
+}
+EXPORT_SYMBOL_GPL(ttm_device_kunit_init_bad_evict);
+
+struct ttm_buffer_object *ttm_bo_kunit_init(struct kunit *test,
+ struct ttm_test_devices *devs,
+ size_t size,
+ struct dma_resv *obj)
+{
+ struct drm_gem_object gem_obj = { };
+ struct ttm_buffer_object *bo;
+ int err;
+
+ bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+
+ bo->base = gem_obj;
+
+ if (obj)
+ bo->base.resv = obj;
+
+ err = drm_gem_object_init(devs->drm, &bo->base, size);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ bo->bdev = devs->ttm_dev;
+ bo->destroy = dummy_ttm_bo_destroy;
+
+ kref_init(&bo->kref);
+
+ return bo;
+}
+EXPORT_SYMBOL_GPL(ttm_bo_kunit_init);
+
+struct ttm_place *ttm_place_kunit_init(struct kunit *test, u32 mem_type, u32 flags)
+{
+ struct ttm_place *place;
+
+ place = kunit_kzalloc(test, sizeof(*place), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, place);
+
+ place->mem_type = mem_type;
+ place->flags = flags;
+
+ return place;
+}
+EXPORT_SYMBOL_GPL(ttm_place_kunit_init);
+
+void dummy_ttm_bo_destroy(struct ttm_buffer_object *bo)
+{
+ drm_gem_object_release(&bo->base);
+}
+EXPORT_SYMBOL_GPL(dummy_ttm_bo_destroy);
+
+struct ttm_test_devices *ttm_test_devices_basic(struct kunit *test)
+{
+ struct ttm_test_devices *devs;
+
+ devs = kunit_kzalloc(test, sizeof(*devs), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, devs);
+
+ devs->dev = drm_kunit_helper_alloc_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, devs->dev);
+
+ /* Set mask for alloc_coherent mappings to enable ttm_pool_alloc testing */
+ devs->dev->coherent_dma_mask = -1;
+
+ devs->drm = __drm_kunit_helper_alloc_drm_device(test, devs->dev,
+ sizeof(*devs->drm), 0,
+ DRIVER_GEM);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, devs->drm);
+
+ return devs;
+}
+EXPORT_SYMBOL_GPL(ttm_test_devices_basic);
+
+struct ttm_test_devices *ttm_test_devices_all(struct kunit *test)
+{
+ struct ttm_test_devices *devs;
+ struct ttm_device *ttm_dev;
+ int err;
+
+ devs = ttm_test_devices_basic(test);
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(devs, ttm_dev, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ devs->ttm_dev = ttm_dev;
+
+ return devs;
+}
+EXPORT_SYMBOL_GPL(ttm_test_devices_all);
+
+void ttm_test_devices_put(struct kunit *test, struct ttm_test_devices *devs)
+{
+ if (devs->ttm_dev)
+ ttm_device_fini(devs->ttm_dev);
+
+ drm_kunit_helper_free_device(test, devs->dev);
+}
+EXPORT_SYMBOL_GPL(ttm_test_devices_put);
+
+int ttm_test_devices_init(struct kunit *test)
+{
+ struct ttm_test_devices *priv;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ priv = ttm_test_devices_basic(test);
+ test->priv = priv;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ttm_test_devices_init);
+
+int ttm_test_devices_all_init(struct kunit *test)
+{
+ struct ttm_test_devices *priv;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ priv = ttm_test_devices_all(test);
+ test->priv = priv;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ttm_test_devices_all_init);
+
+void ttm_test_devices_fini(struct kunit *test)
+{
+ ttm_test_devices_put(test, test->priv);
+}
+EXPORT_SYMBOL_GPL(ttm_test_devices_fini);
+
+MODULE_DESCRIPTION("TTM KUnit test helper functions");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h
new file mode 100644
index 000000000000..f8402b979d05
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 AND MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#ifndef TTM_KUNIT_HELPERS_H
+#define TTM_KUNIT_HELPERS_H
+
+#include <drm/drm_drv.h>
+#include <drm/ttm/ttm_device.h>
+#include <drm/ttm/ttm_bo.h>
+#include <drm/ttm/ttm_placement.h>
+
+#include <drm/drm_kunit_helpers.h>
+#include <kunit/test.h>
+
+#define TTM_PL_MOCK1 (TTM_PL_PRIV + 1)
+#define TTM_PL_MOCK2 (TTM_PL_PRIV + 2)
+
+extern struct ttm_device_funcs ttm_dev_funcs;
+extern struct ttm_device_funcs ttm_dev_funcs_bad_evict;
+
+struct ttm_test_devices {
+ struct drm_device *drm;
+ struct device *dev;
+ struct ttm_device *ttm_dev;
+};
+
+/* Building blocks for test-specific init functions */
+int ttm_device_kunit_init(struct ttm_test_devices *priv,
+ struct ttm_device *ttm,
+ unsigned int alloc_flags);
+int ttm_device_kunit_init_bad_evict(struct ttm_test_devices *priv,
+ struct ttm_device *ttm);
+struct ttm_buffer_object *ttm_bo_kunit_init(struct kunit *test,
+ struct ttm_test_devices *devs,
+ size_t size,
+ struct dma_resv *obj);
+struct ttm_place *ttm_place_kunit_init(struct kunit *test, u32 mem_type,
+ u32 flags);
+void dummy_ttm_bo_destroy(struct ttm_buffer_object *bo);
+
+struct ttm_test_devices *ttm_test_devices_basic(struct kunit *test);
+struct ttm_test_devices *ttm_test_devices_all(struct kunit *test);
+
+void ttm_test_devices_put(struct kunit *test, struct ttm_test_devices *devs);
+
+/* Generic init/fini for tests that only need DRM/TTM devices */
+int ttm_test_devices_init(struct kunit *test);
+int ttm_test_devices_all_init(struct kunit *test);
+void ttm_test_devices_fini(struct kunit *test);
+
+#endif // TTM_KUNIT_HELPERS_H
diff --git a/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c b/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c
new file mode 100644
index 000000000000..dd395229e388
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <linux/export.h>
+#include <linux/module.h>
+
+#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_device.h>
+#include <drm/ttm/ttm_placement.h>
+
+#include "ttm_mock_manager.h"
+
+static inline struct ttm_mock_manager *
+to_mock_mgr(struct ttm_resource_manager *man)
+{
+ return container_of(man, struct ttm_mock_manager, man);
+}
+
+static inline struct ttm_mock_resource *
+to_mock_mgr_resource(struct ttm_resource *res)
+{
+ return container_of(res, struct ttm_mock_resource, base);
+}
+
+static int ttm_mock_manager_alloc(struct ttm_resource_manager *man,
+ struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_resource **res)
+{
+ struct ttm_mock_manager *manager = to_mock_mgr(man);
+ struct ttm_mock_resource *mock_res;
+ struct drm_buddy *mm = &manager->mm;
+ u64 lpfn, fpfn, alloc_size;
+ int err;
+
+ mock_res = kzalloc(sizeof(*mock_res), GFP_KERNEL);
+
+ if (!mock_res)
+ return -ENOMEM;
+
+ fpfn = 0;
+ lpfn = man->size;
+
+ ttm_resource_init(bo, place, &mock_res->base);
+ INIT_LIST_HEAD(&mock_res->blocks);
+
+ if (place->flags & TTM_PL_FLAG_TOPDOWN)
+ mock_res->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
+
+ if (place->flags & TTM_PL_FLAG_CONTIGUOUS)
+ mock_res->flags |= DRM_BUDDY_CONTIGUOUS_ALLOCATION;
+
+ alloc_size = (uint64_t)mock_res->base.size;
+ mutex_lock(&manager->lock);
+ err = drm_buddy_alloc_blocks(mm, fpfn, lpfn, alloc_size,
+ manager->default_page_size,
+ &mock_res->blocks,
+ mock_res->flags);
+
+ if (err)
+ goto error_free_blocks;
+ mutex_unlock(&manager->lock);
+
+ *res = &mock_res->base;
+ return 0;
+
+error_free_blocks:
+ drm_buddy_free_list(mm, &mock_res->blocks, 0);
+ ttm_resource_fini(man, &mock_res->base);
+ mutex_unlock(&manager->lock);
+
+ return err;
+}
+
+static void ttm_mock_manager_free(struct ttm_resource_manager *man,
+ struct ttm_resource *res)
+{
+ struct ttm_mock_manager *manager = to_mock_mgr(man);
+ struct ttm_mock_resource *mock_res = to_mock_mgr_resource(res);
+ struct drm_buddy *mm = &manager->mm;
+
+ mutex_lock(&manager->lock);
+ drm_buddy_free_list(mm, &mock_res->blocks, 0);
+ mutex_unlock(&manager->lock);
+
+ ttm_resource_fini(man, res);
+ kfree(mock_res);
+}
+
+static const struct ttm_resource_manager_func ttm_mock_manager_funcs = {
+ .alloc = ttm_mock_manager_alloc,
+ .free = ttm_mock_manager_free,
+};
+
+int ttm_mock_manager_init(struct ttm_device *bdev, u32 mem_type, u32 size)
+{
+ struct ttm_mock_manager *manager;
+ struct ttm_resource_manager *base;
+ int err;
+
+ manager = kzalloc(sizeof(*manager), GFP_KERNEL);
+ if (!manager)
+ return -ENOMEM;
+
+ mutex_init(&manager->lock);
+
+ err = drm_buddy_init(&manager->mm, size, PAGE_SIZE);
+
+ if (err) {
+ kfree(manager);
+ return err;
+ }
+
+ manager->default_page_size = PAGE_SIZE;
+ base = &manager->man;
+ base->func = &ttm_mock_manager_funcs;
+ base->use_tt = true;
+
+ ttm_resource_manager_init(base, bdev, size);
+ ttm_set_driver_manager(bdev, mem_type, base);
+ ttm_resource_manager_set_used(base, true);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ttm_mock_manager_init);
+
+void ttm_mock_manager_fini(struct ttm_device *bdev, u32 mem_type)
+{
+ struct ttm_resource_manager *man;
+ struct ttm_mock_manager *mock_man;
+ int err;
+
+ man = ttm_manager_type(bdev, mem_type);
+ mock_man = to_mock_mgr(man);
+
+ err = ttm_resource_manager_evict_all(bdev, man);
+ if (err)
+ return;
+
+ ttm_resource_manager_set_used(man, false);
+
+ mutex_lock(&mock_man->lock);
+ drm_buddy_fini(&mock_man->mm);
+ mutex_unlock(&mock_man->lock);
+
+ ttm_set_driver_manager(bdev, mem_type, NULL);
+}
+EXPORT_SYMBOL_GPL(ttm_mock_manager_fini);
+
+static int ttm_bad_manager_alloc(struct ttm_resource_manager *man,
+ struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_resource **res)
+{
+ return -ENOSPC;
+}
+
+static int ttm_busy_manager_alloc(struct ttm_resource_manager *man,
+ struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_resource **res)
+{
+ return -EBUSY;
+}
+
+static void ttm_bad_manager_free(struct ttm_resource_manager *man,
+ struct ttm_resource *res)
+{
+}
+
+static bool ttm_bad_manager_compatible(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ return true;
+}
+
+static const struct ttm_resource_manager_func ttm_bad_manager_funcs = {
+ .alloc = ttm_bad_manager_alloc,
+ .free = ttm_bad_manager_free,
+ .compatible = ttm_bad_manager_compatible
+};
+
+static const struct ttm_resource_manager_func ttm_bad_busy_manager_funcs = {
+ .alloc = ttm_busy_manager_alloc,
+ .free = ttm_bad_manager_free,
+ .compatible = ttm_bad_manager_compatible
+};
+
+int ttm_bad_manager_init(struct ttm_device *bdev, u32 mem_type, u32 size)
+{
+ struct ttm_resource_manager *man;
+
+ man = kzalloc(sizeof(*man), GFP_KERNEL);
+ if (!man)
+ return -ENOMEM;
+
+ man->func = &ttm_bad_manager_funcs;
+
+ ttm_resource_manager_init(man, bdev, size);
+ ttm_set_driver_manager(bdev, mem_type, man);
+ ttm_resource_manager_set_used(man, true);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ttm_bad_manager_init);
+
+int ttm_busy_manager_init(struct ttm_device *bdev, u32 mem_type, u32 size)
+{
+ struct ttm_resource_manager *man;
+
+ ttm_bad_manager_init(bdev, mem_type, size);
+ man = ttm_manager_type(bdev, mem_type);
+
+ man->func = &ttm_bad_busy_manager_funcs;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ttm_busy_manager_init);
+
+void ttm_bad_manager_fini(struct ttm_device *bdev, uint32_t mem_type)
+{
+ struct ttm_resource_manager *man;
+
+ man = ttm_manager_type(bdev, mem_type);
+
+ ttm_resource_manager_set_used(man, false);
+ ttm_set_driver_manager(bdev, mem_type, NULL);
+
+ kfree(man);
+}
+EXPORT_SYMBOL_GPL(ttm_bad_manager_fini);
+
+MODULE_DESCRIPTION("KUnit tests for ttm with mock resource managers");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/ttm/tests/ttm_mock_manager.h b/drivers/gpu/drm/ttm/tests/ttm_mock_manager.h
new file mode 100644
index 000000000000..e4c95f86a467
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_mock_manager.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 AND MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#ifndef TTM_MOCK_MANAGER_H
+#define TTM_MOCK_MANAGER_H
+
+#include <drm/drm_buddy.h>
+
+struct ttm_mock_manager {
+ struct ttm_resource_manager man;
+ struct drm_buddy mm;
+ u64 default_page_size;
+ /* protects allocations of mock buffer objects */
+ struct mutex lock;
+};
+
+struct ttm_mock_resource {
+ struct ttm_resource base;
+ struct list_head blocks;
+ unsigned long flags;
+};
+
+int ttm_mock_manager_init(struct ttm_device *bdev, u32 mem_type, u32 size);
+int ttm_bad_manager_init(struct ttm_device *bdev, u32 mem_type, u32 size);
+int ttm_busy_manager_init(struct ttm_device *bdev, u32 mem_type, u32 size);
+void ttm_mock_manager_fini(struct ttm_device *bdev, u32 mem_type);
+void ttm_bad_manager_fini(struct ttm_device *bdev, u32 mem_type);
+
+#endif // TTM_MOCK_MANAGER_H
diff --git a/drivers/gpu/drm/ttm/tests/ttm_pool_test.c b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
new file mode 100644
index 000000000000..11c92bd75779
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#include <linux/mm.h>
+
+#include <drm/ttm/ttm_tt.h>
+#include <drm/ttm/ttm_pool.h>
+
+#include "ttm_kunit_helpers.h"
+#include "../ttm_pool_internal.h"
+
+struct ttm_pool_test_case {
+ const char *description;
+ unsigned int order;
+ unsigned int alloc_flags;
+};
+
+struct ttm_pool_test_priv {
+ struct ttm_test_devices *devs;
+
+ /* Used to create mock ttm_tts */
+ struct ttm_buffer_object *mock_bo;
+};
+
+static struct ttm_operation_ctx simple_ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false,
+};
+
+static int ttm_pool_test_init(struct kunit *test)
+{
+ struct ttm_pool_test_priv *priv;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ priv->devs = ttm_test_devices_basic(test);
+ test->priv = priv;
+
+ return 0;
+}
+
+static void ttm_pool_test_fini(struct kunit *test)
+{
+ struct ttm_pool_test_priv *priv = test->priv;
+
+ ttm_test_devices_put(test, priv->devs);
+}
+
+static struct ttm_tt *ttm_tt_kunit_init(struct kunit *test,
+ u32 page_flags,
+ enum ttm_caching caching,
+ size_t size)
+{
+ struct ttm_pool_test_priv *priv = test->priv;
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, priv->devs, size, NULL);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+ priv->mock_bo = bo;
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ err = ttm_tt_init(tt, priv->mock_bo, page_flags, caching, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ return tt;
+}
+
+static struct ttm_pool *ttm_pool_pre_populated(struct kunit *test,
+ size_t size,
+ enum ttm_caching caching)
+{
+ struct ttm_pool_test_priv *priv = test->priv;
+ struct ttm_test_devices *devs = priv->devs;
+ struct ttm_pool *pool;
+ struct ttm_tt *tt;
+ int err;
+
+ tt = ttm_tt_kunit_init(test, 0, caching, size);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, pool);
+
+ ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, TTM_ALLOCATION_POOL_USE_DMA_ALLOC);
+
+ err = ttm_pool_alloc(pool, tt, &simple_ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+
+ return pool;
+}
+
+static const struct ttm_pool_test_case ttm_pool_basic_cases[] = {
+ {
+ .description = "One page",
+ .order = 0,
+ },
+ {
+ .description = "More than one page",
+ .order = 2,
+ },
+ {
+ .description = "Above the allocation limit",
+ .order = MAX_PAGE_ORDER + 1,
+ },
+ {
+ .description = "One page, with coherent DMA mappings enabled",
+ .order = 0,
+ .alloc_flags = TTM_ALLOCATION_POOL_USE_DMA_ALLOC,
+ },
+ {
+ .description = "Above the allocation limit, with coherent DMA mappings enabled",
+ .order = MAX_PAGE_ORDER + 1,
+ .alloc_flags = TTM_ALLOCATION_POOL_USE_DMA_ALLOC,
+ },
+};
+
+static void ttm_pool_alloc_case_desc(const struct ttm_pool_test_case *t,
+ char *desc)
+{
+ strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(ttm_pool_alloc_basic, ttm_pool_basic_cases,
+ ttm_pool_alloc_case_desc);
+
+static void ttm_pool_alloc_basic(struct kunit *test)
+{
+ struct ttm_pool_test_priv *priv = test->priv;
+ struct ttm_test_devices *devs = priv->devs;
+ const struct ttm_pool_test_case *params = test->param_value;
+ struct ttm_tt *tt;
+ struct ttm_pool *pool;
+ struct page *fst_page, *last_page;
+ enum ttm_caching caching = ttm_uncached;
+ unsigned int expected_num_pages = 1 << params->order;
+ size_t size = expected_num_pages * PAGE_SIZE;
+ int err;
+
+ tt = ttm_tt_kunit_init(test, 0, caching, size);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, pool);
+
+ ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, params->alloc_flags);
+
+ KUNIT_ASSERT_PTR_EQ(test, pool->dev, devs->dev);
+ KUNIT_ASSERT_EQ(test, pool->nid, NUMA_NO_NODE);
+ KUNIT_ASSERT_EQ(test, pool->alloc_flags, params->alloc_flags);
+
+ err = ttm_pool_alloc(pool, tt, &simple_ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_EQ(test, tt->num_pages, expected_num_pages);
+
+ fst_page = tt->pages[0];
+ last_page = tt->pages[tt->num_pages - 1];
+
+ if (params->order <= MAX_PAGE_ORDER) {
+ if (ttm_pool_uses_dma_alloc(pool)) {
+ KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
+ KUNIT_ASSERT_NOT_NULL(test, (void *)last_page->private);
+ } else {
+ KUNIT_ASSERT_EQ(test, fst_page->private, params->order);
+ }
+ } else {
+ if (ttm_pool_uses_dma_alloc(pool)) {
+ KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
+ KUNIT_ASSERT_NULL(test, (void *)last_page->private);
+ } else {
+ /*
+ * We expect to alloc one big block, followed by
+ * order 0 blocks
+ */
+ KUNIT_ASSERT_EQ(test, fst_page->private,
+ min_t(unsigned int, MAX_PAGE_ORDER,
+ params->order));
+ KUNIT_ASSERT_EQ(test, last_page->private, 0);
+ }
+ }
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+ ttm_pool_fini(pool);
+}
+
+static void ttm_pool_alloc_basic_dma_addr(struct kunit *test)
+{
+ struct ttm_pool_test_priv *priv = test->priv;
+ struct ttm_test_devices *devs = priv->devs;
+ const struct ttm_pool_test_case *params = test->param_value;
+ struct ttm_tt *tt;
+ struct ttm_pool *pool;
+ struct ttm_buffer_object *bo;
+ dma_addr_t dma1, dma2;
+ enum ttm_caching caching = ttm_uncached;
+ unsigned int expected_num_pages = 1 << params->order;
+ size_t size = expected_num_pages * PAGE_SIZE;
+ int err;
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ bo = ttm_bo_kunit_init(test, devs, size, NULL);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+
+ err = ttm_sg_tt_init(tt, bo, 0, caching);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, pool);
+
+ ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, TTM_ALLOCATION_POOL_USE_DMA_ALLOC);
+
+ err = ttm_pool_alloc(pool, tt, &simple_ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_EQ(test, tt->num_pages, expected_num_pages);
+
+ dma1 = tt->dma_address[0];
+ dma2 = tt->dma_address[tt->num_pages - 1];
+
+ KUNIT_ASSERT_NOT_NULL(test, (void *)(uintptr_t)dma1);
+ KUNIT_ASSERT_NOT_NULL(test, (void *)(uintptr_t)dma2);
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+ ttm_pool_fini(pool);
+}
+
+static void ttm_pool_alloc_order_caching_match(struct kunit *test)
+{
+ struct ttm_tt *tt;
+ struct ttm_pool *pool;
+ struct ttm_pool_type *pt;
+ enum ttm_caching caching = ttm_uncached;
+ unsigned int order = 0;
+ size_t size = PAGE_SIZE;
+ int err;
+
+ pool = ttm_pool_pre_populated(test, size, caching);
+
+ pt = &pool->caching[caching].orders[order];
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
+
+ tt = ttm_tt_kunit_init(test, 0, caching, size);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ err = ttm_pool_alloc(pool, tt, &simple_ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+ ttm_pool_fini(pool);
+}
+
+static void ttm_pool_alloc_caching_mismatch(struct kunit *test)
+{
+ struct ttm_tt *tt;
+ struct ttm_pool *pool;
+ struct ttm_pool_type *pt_pool, *pt_tt;
+ enum ttm_caching tt_caching = ttm_uncached;
+ enum ttm_caching pool_caching = ttm_cached;
+ size_t size = PAGE_SIZE;
+ unsigned int order = 0;
+ int err;
+
+ pool = ttm_pool_pre_populated(test, size, pool_caching);
+
+ pt_pool = &pool->caching[pool_caching].orders[order];
+ pt_tt = &pool->caching[tt_caching].orders[order];
+
+ tt = ttm_tt_kunit_init(test, 0, tt_caching, size);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
+ KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
+
+ err = ttm_pool_alloc(pool, tt, &simple_ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
+
+ ttm_pool_fini(pool);
+}
+
+static void ttm_pool_alloc_order_mismatch(struct kunit *test)
+{
+ struct ttm_tt *tt;
+ struct ttm_pool *pool;
+ struct ttm_pool_type *pt_pool, *pt_tt;
+ enum ttm_caching caching = ttm_uncached;
+ unsigned int order = 2;
+ size_t fst_size = (1 << order) * PAGE_SIZE;
+ size_t snd_size = PAGE_SIZE;
+ int err;
+
+ pool = ttm_pool_pre_populated(test, fst_size, caching);
+
+ pt_pool = &pool->caching[caching].orders[order];
+ pt_tt = &pool->caching[caching].orders[0];
+
+ tt = ttm_tt_kunit_init(test, 0, caching, snd_size);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
+ KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
+
+ err = ttm_pool_alloc(pool, tt, &simple_ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
+
+ ttm_pool_fini(pool);
+}
+
+static void ttm_pool_free_dma_alloc(struct kunit *test)
+{
+ struct ttm_pool_test_priv *priv = test->priv;
+ struct ttm_test_devices *devs = priv->devs;
+ struct ttm_tt *tt;
+ struct ttm_pool *pool;
+ struct ttm_pool_type *pt;
+ enum ttm_caching caching = ttm_uncached;
+ unsigned int order = 2;
+ size_t size = (1 << order) * PAGE_SIZE;
+
+ tt = ttm_tt_kunit_init(test, 0, caching, size);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, pool);
+
+ ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, TTM_ALLOCATION_POOL_USE_DMA_ALLOC);
+ ttm_pool_alloc(pool, tt, &simple_ctx);
+
+ pt = &pool->caching[caching].orders[order];
+ KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
+
+ ttm_pool_fini(pool);
+}
+
+static void ttm_pool_free_no_dma_alloc(struct kunit *test)
+{
+ struct ttm_pool_test_priv *priv = test->priv;
+ struct ttm_test_devices *devs = priv->devs;
+ struct ttm_tt *tt;
+ struct ttm_pool *pool;
+ struct ttm_pool_type *pt;
+ enum ttm_caching caching = ttm_uncached;
+ unsigned int order = 2;
+ size_t size = (1 << order) * PAGE_SIZE;
+
+ tt = ttm_tt_kunit_init(test, 0, caching, size);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, pool);
+
+ ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, 0);
+ ttm_pool_alloc(pool, tt, &simple_ctx);
+
+ pt = &pool->caching[caching].orders[order];
+ KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+
+ KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
+
+ ttm_pool_fini(pool);
+}
+
+static void ttm_pool_fini_basic(struct kunit *test)
+{
+ struct ttm_pool *pool;
+ struct ttm_pool_type *pt;
+ enum ttm_caching caching = ttm_uncached;
+ unsigned int order = 0;
+ size_t size = PAGE_SIZE;
+
+ pool = ttm_pool_pre_populated(test, size, caching);
+ pt = &pool->caching[caching].orders[order];
+
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
+
+ ttm_pool_fini(pool);
+
+ KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
+}
+
+static struct kunit_case ttm_pool_test_cases[] = {
+ KUNIT_CASE_PARAM(ttm_pool_alloc_basic, ttm_pool_alloc_basic_gen_params),
+ KUNIT_CASE_PARAM(ttm_pool_alloc_basic_dma_addr,
+ ttm_pool_alloc_basic_gen_params),
+ KUNIT_CASE(ttm_pool_alloc_order_caching_match),
+ KUNIT_CASE(ttm_pool_alloc_caching_mismatch),
+ KUNIT_CASE(ttm_pool_alloc_order_mismatch),
+ KUNIT_CASE(ttm_pool_free_dma_alloc),
+ KUNIT_CASE(ttm_pool_free_no_dma_alloc),
+ KUNIT_CASE(ttm_pool_fini_basic),
+ {}
+};
+
+static struct kunit_suite ttm_pool_test_suite = {
+ .name = "ttm_pool",
+ .init = ttm_pool_test_init,
+ .exit = ttm_pool_test_fini,
+ .test_cases = ttm_pool_test_cases,
+};
+
+kunit_test_suites(&ttm_pool_test_suite);
+
+MODULE_DESCRIPTION("KUnit tests for ttm_pool APIs");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/ttm/tests/ttm_resource_test.c b/drivers/gpu/drm/ttm/tests/ttm_resource_test.c
new file mode 100644
index 000000000000..c0e4e35e0442
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_resource_test.c
@@ -0,0 +1,337 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#include <drm/ttm/ttm_resource.h>
+
+#include "ttm_kunit_helpers.h"
+
+#define RES_SIZE SZ_4K
+#define TTM_PRIV_DUMMY_REG (TTM_NUM_MEM_TYPES - 1)
+
+struct ttm_resource_test_case {
+ const char *description;
+ u32 mem_type;
+ u32 flags;
+};
+
+struct ttm_resource_test_priv {
+ struct ttm_test_devices *devs;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+};
+
+static const struct ttm_resource_manager_func ttm_resource_manager_mock_funcs = { };
+
+static int ttm_resource_test_init(struct kunit *test)
+{
+ struct ttm_resource_test_priv *priv;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ priv->devs = ttm_test_devices_all(test);
+ KUNIT_ASSERT_NOT_NULL(test, priv->devs);
+
+ test->priv = priv;
+
+ return 0;
+}
+
+static void ttm_resource_test_fini(struct kunit *test)
+{
+ struct ttm_resource_test_priv *priv = test->priv;
+
+ ttm_test_devices_put(test, priv->devs);
+}
+
+static void ttm_init_test_mocks(struct kunit *test,
+ struct ttm_resource_test_priv *priv,
+ u32 mem_type, u32 flags)
+{
+ size_t size = RES_SIZE;
+
+ /* Make sure we have what we need for a good BO mock */
+ KUNIT_ASSERT_NOT_NULL(test, priv->devs->ttm_dev);
+
+ priv->bo = ttm_bo_kunit_init(test, priv->devs, size, NULL);
+ priv->place = ttm_place_kunit_init(test, mem_type, flags);
+}
+
+static void ttm_init_test_manager(struct kunit *test,
+ struct ttm_resource_test_priv *priv,
+ u32 mem_type)
+{
+ struct ttm_device *ttm_dev = priv->devs->ttm_dev;
+ struct ttm_resource_manager *man;
+ size_t size = SZ_16K;
+
+ man = kunit_kzalloc(test, sizeof(*man), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, man);
+
+ man->use_tt = false;
+ man->func = &ttm_resource_manager_mock_funcs;
+
+ ttm_resource_manager_init(man, ttm_dev, size);
+ ttm_set_driver_manager(ttm_dev, mem_type, man);
+ ttm_resource_manager_set_used(man, true);
+}
+
+static const struct ttm_resource_test_case ttm_resource_cases[] = {
+ {
+ .description = "Init resource in TTM_PL_SYSTEM",
+ .mem_type = TTM_PL_SYSTEM,
+ },
+ {
+ .description = "Init resource in TTM_PL_VRAM",
+ .mem_type = TTM_PL_VRAM,
+ },
+ {
+ .description = "Init resource in a private placement",
+ .mem_type = TTM_PRIV_DUMMY_REG,
+ },
+ {
+ .description = "Init resource in TTM_PL_SYSTEM, set placement flags",
+ .mem_type = TTM_PL_SYSTEM,
+ .flags = TTM_PL_FLAG_TOPDOWN,
+ },
+};
+
+static void ttm_resource_case_desc(const struct ttm_resource_test_case *t, char *desc)
+{
+ strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(ttm_resource, ttm_resource_cases, ttm_resource_case_desc);
+
+static void ttm_resource_init_basic(struct kunit *test)
+{
+ const struct ttm_resource_test_case *params = test->param_value;
+ struct ttm_resource_test_priv *priv = test->priv;
+ struct ttm_resource *res;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ struct ttm_resource_manager *man;
+ u64 expected_usage;
+
+ ttm_init_test_mocks(test, priv, params->mem_type, params->flags);
+ bo = priv->bo;
+ place = priv->place;
+
+ if (params->mem_type > TTM_PL_SYSTEM)
+ ttm_init_test_manager(test, priv, params->mem_type);
+
+ res = kunit_kzalloc(test, sizeof(*res), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, res);
+
+ man = ttm_manager_type(priv->devs->ttm_dev, place->mem_type);
+ expected_usage = man->usage + RES_SIZE;
+
+ KUNIT_ASSERT_TRUE(test, list_empty(&man->lru[bo->priority]));
+
+ ttm_resource_init(bo, place, res);
+
+ KUNIT_ASSERT_EQ(test, res->start, 0);
+ KUNIT_ASSERT_EQ(test, res->size, RES_SIZE);
+ KUNIT_ASSERT_EQ(test, res->mem_type, place->mem_type);
+ KUNIT_ASSERT_EQ(test, res->placement, place->flags);
+ KUNIT_ASSERT_PTR_EQ(test, res->bo, bo);
+
+ KUNIT_ASSERT_NULL(test, res->bus.addr);
+ KUNIT_ASSERT_EQ(test, res->bus.offset, 0);
+ KUNIT_ASSERT_FALSE(test, res->bus.is_iomem);
+ KUNIT_ASSERT_EQ(test, res->bus.caching, ttm_cached);
+ KUNIT_ASSERT_EQ(test, man->usage, expected_usage);
+
+ KUNIT_ASSERT_TRUE(test, list_is_singular(&man->lru[bo->priority]));
+
+ ttm_resource_fini(man, res);
+}
+
+static void ttm_resource_init_pinned(struct kunit *test)
+{
+ struct ttm_resource_test_priv *priv = test->priv;
+ struct ttm_resource *res;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ struct ttm_resource_manager *man;
+
+ ttm_init_test_mocks(test, priv, TTM_PL_SYSTEM, 0);
+ bo = priv->bo;
+ place = priv->place;
+
+ man = ttm_manager_type(priv->devs->ttm_dev, place->mem_type);
+
+ res = kunit_kzalloc(test, sizeof(*res), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, res);
+ KUNIT_ASSERT_TRUE(test, list_empty(&bo->bdev->unevictable));
+
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_pin(bo);
+ ttm_resource_init(bo, place, res);
+ KUNIT_ASSERT_TRUE(test, list_is_singular(&bo->bdev->unevictable));
+
+ ttm_bo_unpin(bo);
+ ttm_resource_fini(man, res);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_ASSERT_TRUE(test, list_empty(&bo->bdev->unevictable));
+}
+
+static void ttm_resource_fini_basic(struct kunit *test)
+{
+ struct ttm_resource_test_priv *priv = test->priv;
+ struct ttm_resource *res;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ struct ttm_resource_manager *man;
+
+ ttm_init_test_mocks(test, priv, TTM_PL_SYSTEM, 0);
+ bo = priv->bo;
+ place = priv->place;
+
+ man = ttm_manager_type(priv->devs->ttm_dev, place->mem_type);
+
+ res = kunit_kzalloc(test, sizeof(*res), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, res);
+
+ ttm_resource_init(bo, place, res);
+ ttm_resource_fini(man, res);
+
+ KUNIT_ASSERT_TRUE(test, list_empty(&res->lru.link));
+ KUNIT_ASSERT_EQ(test, man->usage, 0);
+}
+
+static void ttm_resource_manager_init_basic(struct kunit *test)
+{
+ struct ttm_resource_test_priv *priv = test->priv;
+ struct ttm_resource_manager *man;
+ size_t size = SZ_16K;
+ int i;
+
+ man = kunit_kzalloc(test, sizeof(*man), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, man);
+
+ ttm_resource_manager_init(man, priv->devs->ttm_dev, size);
+
+ KUNIT_ASSERT_PTR_EQ(test, man->bdev, priv->devs->ttm_dev);
+ KUNIT_ASSERT_EQ(test, man->size, size);
+ KUNIT_ASSERT_EQ(test, man->usage, 0);
+ for (i = 0; i < TTM_NUM_MOVE_FENCES; i++)
+ KUNIT_ASSERT_NULL(test, man->eviction_fences[i]);
+
+ for (int i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
+ KUNIT_ASSERT_TRUE(test, list_empty(&man->lru[i]));
+}
+
+static void ttm_resource_manager_usage_basic(struct kunit *test)
+{
+ struct ttm_resource_test_priv *priv = test->priv;
+ struct ttm_resource *res;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ struct ttm_resource_manager *man;
+ u64 actual_usage;
+
+ ttm_init_test_mocks(test, priv, TTM_PL_SYSTEM, TTM_PL_FLAG_TOPDOWN);
+ bo = priv->bo;
+ place = priv->place;
+
+ res = kunit_kzalloc(test, sizeof(*res), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, res);
+
+ man = ttm_manager_type(priv->devs->ttm_dev, place->mem_type);
+
+ ttm_resource_init(bo, place, res);
+ actual_usage = ttm_resource_manager_usage(man);
+
+ KUNIT_ASSERT_EQ(test, actual_usage, RES_SIZE);
+
+ ttm_resource_fini(man, res);
+}
+
+static void ttm_resource_manager_set_used_basic(struct kunit *test)
+{
+ struct ttm_resource_test_priv *priv = test->priv;
+ struct ttm_resource_manager *man;
+
+ man = ttm_manager_type(priv->devs->ttm_dev, TTM_PL_SYSTEM);
+ KUNIT_ASSERT_TRUE(test, man->use_type);
+
+ ttm_resource_manager_set_used(man, false);
+ KUNIT_ASSERT_FALSE(test, man->use_type);
+}
+
+static void ttm_sys_man_alloc_basic(struct kunit *test)
+{
+ struct ttm_resource_test_priv *priv = test->priv;
+ struct ttm_resource_manager *man;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ struct ttm_resource *res;
+ u32 mem_type = TTM_PL_SYSTEM;
+ int ret;
+
+ ttm_init_test_mocks(test, priv, mem_type, 0);
+ bo = priv->bo;
+ place = priv->place;
+
+ man = ttm_manager_type(priv->devs->ttm_dev, mem_type);
+ ret = man->func->alloc(man, bo, place, &res);
+
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_EQ(test, res->size, RES_SIZE);
+ KUNIT_ASSERT_EQ(test, res->mem_type, mem_type);
+ KUNIT_ASSERT_PTR_EQ(test, res->bo, bo);
+
+ ttm_resource_fini(man, res);
+}
+
+static void ttm_sys_man_free_basic(struct kunit *test)
+{
+ struct ttm_resource_test_priv *priv = test->priv;
+ struct ttm_resource_manager *man;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ struct ttm_resource *res;
+ u32 mem_type = TTM_PL_SYSTEM;
+
+ ttm_init_test_mocks(test, priv, mem_type, 0);
+ bo = priv->bo;
+ place = priv->place;
+
+ res = kunit_kzalloc(test, sizeof(*res), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, res);
+
+ ttm_resource_alloc(bo, place, &res, NULL);
+
+ man = ttm_manager_type(priv->devs->ttm_dev, mem_type);
+ man->func->free(man, res);
+
+ KUNIT_ASSERT_TRUE(test, list_empty(&man->lru[bo->priority]));
+ KUNIT_ASSERT_EQ(test, man->usage, 0);
+}
+
+static struct kunit_case ttm_resource_test_cases[] = {
+ KUNIT_CASE_PARAM(ttm_resource_init_basic, ttm_resource_gen_params),
+ KUNIT_CASE(ttm_resource_init_pinned),
+ KUNIT_CASE(ttm_resource_fini_basic),
+ KUNIT_CASE(ttm_resource_manager_init_basic),
+ KUNIT_CASE(ttm_resource_manager_usage_basic),
+ KUNIT_CASE(ttm_resource_manager_set_used_basic),
+ KUNIT_CASE(ttm_sys_man_alloc_basic),
+ KUNIT_CASE(ttm_sys_man_free_basic),
+ {}
+};
+
+static struct kunit_suite ttm_resource_test_suite = {
+ .name = "ttm_resource",
+ .init = ttm_resource_test_init,
+ .exit = ttm_resource_test_fini,
+ .test_cases = ttm_resource_test_cases,
+};
+
+kunit_test_suites(&ttm_resource_test_suite);
+
+MODULE_DESCRIPTION("KUnit tests for ttm_resource and ttm_sys_man APIs");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/ttm/tests/ttm_tt_test.c b/drivers/gpu/drm/ttm/tests/ttm_tt_test.c
new file mode 100644
index 000000000000..61ec6f580b62
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_tt_test.c
@@ -0,0 +1,402 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#include <linux/shmem_fs.h>
+#include <drm/ttm/ttm_tt.h>
+
+#include "ttm_kunit_helpers.h"
+
+#define BO_SIZE SZ_4K
+
+struct ttm_tt_test_case {
+ const char *description;
+ u32 size;
+ u32 extra_pages_num;
+};
+
+static const struct ttm_tt_test_case ttm_tt_init_basic_cases[] = {
+ {
+ .description = "Page-aligned size",
+ .size = SZ_4K,
+ },
+ {
+ .description = "Extra pages requested",
+ .size = SZ_4K,
+ .extra_pages_num = 1,
+ },
+};
+
+static void ttm_tt_init_case_desc(const struct ttm_tt_test_case *t,
+ char *desc)
+{
+ strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(ttm_tt_init_basic, ttm_tt_init_basic_cases,
+ ttm_tt_init_case_desc);
+
+static void ttm_tt_init_basic(struct kunit *test)
+{
+ const struct ttm_tt_test_case *params = test->param_value;
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ u32 page_flags = TTM_TT_FLAG_ZERO_ALLOC;
+ enum ttm_caching caching = ttm_cached;
+ u32 extra_pages = params->extra_pages_num;
+ int num_pages = params->size >> PAGE_SHIFT;
+ int err;
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ bo = ttm_bo_kunit_init(test, test->priv, params->size, NULL);
+
+ err = ttm_tt_init(tt, bo, page_flags, caching, extra_pages);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ KUNIT_ASSERT_EQ(test, tt->num_pages, num_pages + extra_pages);
+
+ KUNIT_ASSERT_EQ(test, tt->page_flags, page_flags);
+ KUNIT_ASSERT_EQ(test, tt->caching, caching);
+
+ KUNIT_ASSERT_NULL(test, tt->dma_address);
+ KUNIT_ASSERT_NULL(test, tt->swap_storage);
+}
+
+static void ttm_tt_init_misaligned(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ enum ttm_caching caching = ttm_cached;
+ u32 size = SZ_8K;
+ int num_pages = (size + SZ_4K) >> PAGE_SHIFT;
+ int err;
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
+
+ /* Make the object size misaligned */
+ bo->base.size += 1;
+
+ err = ttm_tt_init(tt, bo, 0, caching, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ KUNIT_ASSERT_EQ(test, tt->num_pages, num_pages);
+}
+
+static void ttm_tt_fini_basic(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ enum ttm_caching caching = ttm_cached;
+ int err;
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ err = ttm_tt_init(tt, bo, 0, caching, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_NOT_NULL(test, tt->pages);
+
+ ttm_tt_fini(tt);
+ KUNIT_ASSERT_NULL(test, tt->pages);
+}
+
+static void ttm_tt_fini_sg(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ enum ttm_caching caching = ttm_cached;
+ int err;
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ err = ttm_sg_tt_init(tt, bo, 0, caching);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_NOT_NULL(test, tt->dma_address);
+
+ ttm_tt_fini(tt);
+ KUNIT_ASSERT_NULL(test, tt->dma_address);
+}
+
+static void ttm_tt_fini_shmem(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ struct file *shmem;
+ enum ttm_caching caching = ttm_cached;
+ int err;
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ err = ttm_tt_init(tt, bo, 0, caching, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ shmem = shmem_file_setup("ttm swap", BO_SIZE, 0);
+ tt->swap_storage = shmem;
+
+ ttm_tt_fini(tt);
+ KUNIT_ASSERT_NULL(test, tt->swap_storage);
+}
+
+static void ttm_tt_create_basic(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+ bo->type = ttm_bo_type_device;
+
+ dma_resv_lock(bo->base.resv, NULL);
+ err = ttm_tt_create(bo, false);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_NOT_NULL(test, bo->ttm);
+
+ /* Free manually, as it was allocated outside of KUnit */
+ kfree(bo->ttm);
+}
+
+static void ttm_tt_create_invalid_bo_type(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+ bo->type = ttm_bo_type_sg + 1;
+
+ dma_resv_lock(bo->base.resv, NULL);
+ err = ttm_tt_create(bo, false);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_EXPECT_EQ(test, err, -EINVAL);
+ KUNIT_EXPECT_NULL(test, bo->ttm);
+}
+
+static void ttm_tt_create_ttm_exists(struct kunit *test)
+{
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ enum ttm_caching caching = ttm_cached;
+ int err;
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ err = ttm_tt_init(tt, bo, 0, caching, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ bo->ttm = tt;
+
+ dma_resv_lock(bo->base.resv, NULL);
+ err = ttm_tt_create(bo, false);
+ dma_resv_unlock(bo->base.resv);
+
+ /* Expect to keep the previous TTM */
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_PTR_EQ(test, tt, bo->ttm);
+}
+
+static struct ttm_tt *ttm_tt_null_create(struct ttm_buffer_object *bo,
+ u32 page_flags)
+{
+ return NULL;
+}
+
+static struct ttm_device_funcs ttm_dev_empty_funcs = {
+ .ttm_tt_create = ttm_tt_null_create,
+};
+
+static void ttm_tt_create_failed(struct kunit *test)
+{
+ const struct ttm_test_devices *devs = test->priv;
+ struct ttm_buffer_object *bo;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ /* Update ttm_device_funcs so we don't alloc ttm_tt */
+ devs->ttm_dev->funcs = &ttm_dev_empty_funcs;
+
+ dma_resv_lock(bo->base.resv, NULL);
+ err = ttm_tt_create(bo, false);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_ASSERT_EQ(test, err, -ENOMEM);
+}
+
+static void ttm_tt_destroy_basic(struct kunit *test)
+{
+ const struct ttm_test_devices *devs = test->priv;
+ struct ttm_buffer_object *bo;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ dma_resv_lock(bo->base.resv, NULL);
+ err = ttm_tt_create(bo, false);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_NOT_NULL(test, bo->ttm);
+
+ ttm_tt_destroy(devs->ttm_dev, bo->ttm);
+}
+
+static void ttm_tt_populate_null_ttm(struct kunit *test)
+{
+ const struct ttm_test_devices *devs = test->priv;
+ struct ttm_operation_ctx ctx = { };
+ int err;
+
+ err = ttm_tt_populate(devs->ttm_dev, NULL, &ctx);
+ KUNIT_ASSERT_EQ(test, err, -EINVAL);
+}
+
+static void ttm_tt_populate_populated_ttm(struct kunit *test)
+{
+ const struct ttm_test_devices *devs = test->priv;
+ struct ttm_operation_ctx ctx = { };
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ struct page *populated_page;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ err = ttm_tt_init(tt, bo, 0, ttm_cached, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ err = ttm_tt_populate(devs->ttm_dev, tt, &ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ populated_page = *tt->pages;
+
+ err = ttm_tt_populate(devs->ttm_dev, tt, &ctx);
+ KUNIT_ASSERT_PTR_EQ(test, populated_page, *tt->pages);
+}
+
+static void ttm_tt_unpopulate_basic(struct kunit *test)
+{
+ const struct ttm_test_devices *devs = test->priv;
+ struct ttm_operation_ctx ctx = { };
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ err = ttm_tt_init(tt, bo, 0, ttm_cached, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ err = ttm_tt_populate(devs->ttm_dev, tt, &ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_TRUE(test, ttm_tt_is_populated(tt));
+
+ ttm_tt_unpopulate(devs->ttm_dev, tt);
+ KUNIT_ASSERT_FALSE(test, ttm_tt_is_populated(tt));
+}
+
+static void ttm_tt_unpopulate_empty_ttm(struct kunit *test)
+{
+ const struct ttm_test_devices *devs = test->priv;
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ err = ttm_tt_init(tt, bo, 0, ttm_cached, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ ttm_tt_unpopulate(devs->ttm_dev, tt);
+ /* Expect graceful handling of unpopulated TTs */
+}
+
+static void ttm_tt_swapin_basic(struct kunit *test)
+{
+ const struct ttm_test_devices *devs = test->priv;
+ int expected_num_pages = BO_SIZE >> PAGE_SHIFT;
+ struct ttm_operation_ctx ctx = { };
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ int err, num_pages;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ err = ttm_tt_init(tt, bo, 0, ttm_cached, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ err = ttm_tt_populate(devs->ttm_dev, tt, &ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_TRUE(test, ttm_tt_is_populated(tt));
+
+ num_pages = ttm_tt_swapout(devs->ttm_dev, tt, GFP_KERNEL);
+ KUNIT_ASSERT_EQ(test, num_pages, expected_num_pages);
+ KUNIT_ASSERT_NOT_NULL(test, tt->swap_storage);
+ KUNIT_ASSERT_TRUE(test, tt->page_flags & TTM_TT_FLAG_SWAPPED);
+
+ /* Swapout depopulates TT, allocate pages and then swap them in */
+ err = ttm_pool_alloc(&devs->ttm_dev->pool, tt, &ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ err = ttm_tt_swapin(tt);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_NULL(test, tt->swap_storage);
+ KUNIT_ASSERT_FALSE(test, tt->page_flags & TTM_TT_FLAG_SWAPPED);
+}
+
+static struct kunit_case ttm_tt_test_cases[] = {
+ KUNIT_CASE_PARAM(ttm_tt_init_basic, ttm_tt_init_basic_gen_params),
+ KUNIT_CASE(ttm_tt_init_misaligned),
+ KUNIT_CASE(ttm_tt_fini_basic),
+ KUNIT_CASE(ttm_tt_fini_sg),
+ KUNIT_CASE(ttm_tt_fini_shmem),
+ KUNIT_CASE(ttm_tt_create_basic),
+ KUNIT_CASE(ttm_tt_create_invalid_bo_type),
+ KUNIT_CASE(ttm_tt_create_ttm_exists),
+ KUNIT_CASE(ttm_tt_create_failed),
+ KUNIT_CASE(ttm_tt_destroy_basic),
+ KUNIT_CASE(ttm_tt_populate_null_ttm),
+ KUNIT_CASE(ttm_tt_populate_populated_ttm),
+ KUNIT_CASE(ttm_tt_unpopulate_basic),
+ KUNIT_CASE(ttm_tt_unpopulate_empty_ttm),
+ KUNIT_CASE(ttm_tt_swapin_basic),
+ {}
+};
+
+static struct kunit_suite ttm_tt_test_suite = {
+ .name = "ttm_tt",
+ .init = ttm_test_devices_all_init,
+ .exit = ttm_test_devices_fini,
+ .test_cases = ttm_tt_test_cases,
+};
+
+kunit_test_suites(&ttm_tt_test_suite);
+
+MODULE_DESCRIPTION("KUnit tests for ttm_tt APIs");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 3302f99e7497..fca0a1a3c6fd 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
@@ -31,12 +32,11 @@
#define pr_fmt(fmt) "[TTM] " fmt
-#include <drm/ttm/ttm_module.h>
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_page_alloc.h>
-#ifdef TTM_HAS_AGP
-#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_device.h>
+#include <drm/ttm/ttm_tt.h>
+#include <drm/ttm/ttm_resource.h>
#include <linux/agp_backend.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/io.h>
@@ -48,14 +48,17 @@ struct ttm_agp_backend {
struct agp_bridge_data *bridge;
};
-static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
+int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem)
{
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
- struct drm_mm_node *node = bo_mem->mm_node;
+ struct page *dummy_read_page = ttm_glob.dummy_read_page;
struct agp_memory *mem;
- int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
+ int ret, cached = ttm->caching == ttm_cached;
unsigned i;
+ if (agp_be->mem)
+ return 0;
+
mem = agp_allocate_memory(agp_be->bridge, ttm->num_pages, AGP_USER_MEMORY);
if (unlikely(mem == NULL))
return -ENOMEM;
@@ -65,7 +68,7 @@ static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
struct page *page = ttm->pages[i];
if (!page)
- page = ttm->dummy_read_page;
+ page = dummy_read_page;
mem->pages[mem->page_count++] = page;
}
@@ -74,27 +77,41 @@ static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
mem->is_flushed = 1;
mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
- ret = agp_bind_memory(mem, node->start);
+ ret = agp_bind_memory(mem, bo_mem->start);
if (ret)
pr_err("AGP Bind memory failed\n");
return ret;
}
+EXPORT_SYMBOL(ttm_agp_bind);
-static int ttm_agp_unbind(struct ttm_tt *ttm)
+void ttm_agp_unbind(struct ttm_tt *ttm)
{
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
if (agp_be->mem) {
- if (agp_be->mem->is_bound)
- return agp_unbind_memory(agp_be->mem);
+ if (agp_be->mem->is_bound) {
+ agp_unbind_memory(agp_be->mem);
+ return;
+ }
agp_free_memory(agp_be->mem);
agp_be->mem = NULL;
}
- return 0;
}
+EXPORT_SYMBOL(ttm_agp_unbind);
+
+bool ttm_agp_is_bound(struct ttm_tt *ttm)
+{
+ struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
+
+ if (!ttm)
+ return false;
+
+ return (agp_be->mem != NULL);
+}
+EXPORT_SYMBOL(ttm_agp_is_bound);
-static void ttm_agp_destroy(struct ttm_tt *ttm)
+void ttm_agp_destroy(struct ttm_tt *ttm)
{
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
@@ -103,17 +120,11 @@ static void ttm_agp_destroy(struct ttm_tt *ttm)
ttm_tt_fini(ttm);
kfree(agp_be);
}
+EXPORT_SYMBOL(ttm_agp_destroy);
-static struct ttm_backend_func ttm_agp_func = {
- .bind = ttm_agp_bind,
- .unbind = ttm_agp_unbind,
- .destroy = ttm_agp_destroy,
-};
-
-struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
+struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo,
struct agp_bridge_data *bridge,
- unsigned long size, uint32_t page_flags,
- struct page *dummy_read_page)
+ uint32_t page_flags)
{
struct ttm_agp_backend *agp_be;
@@ -123,29 +134,12 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
agp_be->mem = NULL;
agp_be->bridge = bridge;
- agp_be->ttm.func = &ttm_agp_func;
- if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) {
+ if (ttm_tt_init(&agp_be->ttm, bo, page_flags, ttm_write_combined, 0)) {
+ kfree(agp_be);
return NULL;
}
return &agp_be->ttm;
}
EXPORT_SYMBOL(ttm_agp_tt_create);
-
-int ttm_agp_tt_populate(struct ttm_tt *ttm)
-{
- if (ttm->state != tt_unpopulated)
- return 0;
-
- return ttm_pool_populate(ttm);
-}
-EXPORT_SYMBOL(ttm_agp_tt_populate);
-
-void ttm_agp_tt_unpopulate(struct ttm_tt *ttm)
-{
- ttm_pool_unpopulate(ttm);
-}
-EXPORT_SYMBOL(ttm_agp_tt_unpopulate);
-
-#endif
diff --git a/drivers/gpu/drm/ttm/ttm_backup.c b/drivers/gpu/drm/ttm/ttm_backup.c
new file mode 100644
index 000000000000..32530c75f038
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_backup.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include <drm/ttm/ttm_backup.h>
+
+#include <linux/export.h>
+#include <linux/page-flags.h>
+#include <linux/swap.h>
+
+/*
+ * Need to map shmem indices to handle since a handle value
+ * of 0 means error, following the swp_entry_t convention.
+ */
+static unsigned long ttm_backup_shmem_idx_to_handle(pgoff_t idx)
+{
+ return (unsigned long)idx + 1;
+}
+
+static pgoff_t ttm_backup_handle_to_shmem_idx(pgoff_t handle)
+{
+ return handle - 1;
+}
+
+/**
+ * ttm_backup_drop() - release memory associated with a handle
+ * @backup: The struct backup pointer used to obtain the handle
+ * @handle: The handle obtained from the @backup_page function.
+ */
+void ttm_backup_drop(struct file *backup, pgoff_t handle)
+{
+ loff_t start = ttm_backup_handle_to_shmem_idx(handle);
+
+ start <<= PAGE_SHIFT;
+ shmem_truncate_range(file_inode(backup), start,
+ start + PAGE_SIZE - 1);
+}
+
+/**
+ * ttm_backup_copy_page() - Copy the contents of a previously backed
+ * up page
+ * @backup: The struct backup pointer used to back up the page.
+ * @dst: The struct page to copy into.
+ * @handle: The handle returned when the page was backed up.
+ * @intr: Try to perform waits interruptible or at least killable.
+ *
+ * Return: 0 on success, Negative error code on failure, notably
+ * -EINTR if @intr was set to true and a signal is pending.
+ */
+int ttm_backup_copy_page(struct file *backup, struct page *dst,
+ pgoff_t handle, bool intr)
+{
+ struct address_space *mapping = backup->f_mapping;
+ struct folio *from_folio;
+ pgoff_t idx = ttm_backup_handle_to_shmem_idx(handle);
+
+ from_folio = shmem_read_folio(mapping, idx);
+ if (IS_ERR(from_folio))
+ return PTR_ERR(from_folio);
+
+ copy_highpage(dst, folio_file_page(from_folio, idx));
+ folio_put(from_folio);
+
+ return 0;
+}
+
+/**
+ * ttm_backup_backup_page() - Backup a page
+ * @backup: The struct backup pointer to use.
+ * @page: The page to back up.
+ * @writeback: Whether to perform immediate writeback of the page.
+ * This may have performance implications.
+ * @idx: A unique integer for each page and each struct backup.
+ * This allows the backup implementation to avoid managing
+ * its address space separately.
+ * @page_gfp: The gfp value used when the page was allocated.
+ * This is used for accounting purposes.
+ * @alloc_gfp: The gfp to be used when allocating memory.
+ *
+ * Context: If called from reclaim context, the caller needs to
+ * assert that the shrinker gfp has __GFP_FS set, to avoid
+ * deadlocking on lock_page(). If @writeback is set to true and
+ * called from reclaim context, the caller also needs to assert
+ * that the shrinker gfp has __GFP_IO set, since without it,
+ * we're not allowed to start backup IO.
+ *
+ * Return: A handle on success. Negative error code on failure.
+ *
+ * Note: This function could be extended to back up a folio and
+ * implementations would then split the folio internally if needed.
+ * Drawback is that the caller would then have to keep track of
+ * the folio size- and usage.
+ */
+s64
+ttm_backup_backup_page(struct file *backup, struct page *page,
+ bool writeback, pgoff_t idx, gfp_t page_gfp,
+ gfp_t alloc_gfp)
+{
+ struct address_space *mapping = backup->f_mapping;
+ unsigned long handle = 0;
+ struct folio *to_folio;
+ int ret;
+
+ to_folio = shmem_read_folio_gfp(mapping, idx, alloc_gfp);
+ if (IS_ERR(to_folio))
+ return PTR_ERR(to_folio);
+
+ folio_mark_accessed(to_folio);
+ folio_lock(to_folio);
+ folio_mark_dirty(to_folio);
+ copy_highpage(folio_file_page(to_folio, idx), page);
+ handle = ttm_backup_shmem_idx_to_handle(idx);
+
+ if (writeback && !folio_mapped(to_folio) &&
+ folio_clear_dirty_for_io(to_folio)) {
+ folio_set_reclaim(to_folio);
+ ret = shmem_writeout(to_folio, NULL, NULL);
+ if (!folio_test_writeback(to_folio))
+ folio_clear_reclaim(to_folio);
+ /*
+ * If writeout succeeds, it unlocks the folio. errors
+ * are otherwise dropped, since writeout is only best
+ * effort here.
+ */
+ if (ret)
+ folio_unlock(to_folio);
+ } else {
+ folio_unlock(to_folio);
+ }
+
+ folio_put(to_folio);
+
+ return handle;
+}
+
+/**
+ * ttm_backup_fini() - Free the struct backup resources after last use.
+ * @backup: Pointer to the struct backup whose resources to free.
+ *
+ * After a call to this function, it's illegal to use the @backup pointer.
+ */
+void ttm_backup_fini(struct file *backup)
+{
+ fput(backup);
+}
+
+/**
+ * ttm_backup_bytes_avail() - Report the approximate number of bytes of backup space
+ * left for backup.
+ *
+ * This function is intended also for driver use to indicate whether a
+ * backup attempt is meaningful.
+ *
+ * Return: An approximate size of backup space available.
+ */
+u64 ttm_backup_bytes_avail(void)
+{
+ /*
+ * The idea behind backing up to shmem is that shmem objects may
+ * eventually be swapped out. So no point swapping out if there
+ * is no or low swap-space available. But the accuracy of this
+ * number also depends on shmem actually swapping out backed-up
+ * shmem objects without too much buffering.
+ */
+ return (u64)get_nr_swap_pages() << PAGE_SHIFT;
+}
+EXPORT_SYMBOL_GPL(ttm_backup_bytes_avail);
+
+/**
+ * ttm_backup_shmem_create() - Create a shmem-based struct backup.
+ * @size: The maximum size (in bytes) to back up.
+ *
+ * Create a backup utilizing shmem objects.
+ *
+ * Return: A pointer to a struct file on success,
+ * an error pointer on error.
+ */
+struct file *ttm_backup_shmem_create(loff_t size)
+{
+ return shmem_file_setup("ttm shmem backup", size, 0);
+}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index cb9dd674670c..bd27607f8076 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
@@ -30,9 +31,13 @@
#define pr_fmt(fmt) "[TTM] " fmt
-#include <drm/ttm/ttm_module.h>
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/drm_print.h>
+#include <drm/ttm/ttm_allocation.h>
+#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_tt.h>
+
+#include <linux/export.h>
#include <linux/jiffies.h>
#include <linux/slab.h>
#include <linux/sched.h>
@@ -40,345 +45,133 @@
#include <linux/file.h>
#include <linux/module.h>
#include <linux/atomic.h>
+#include <linux/cgroup_dmem.h>
+#include <linux/dma-resv.h>
-#define TTM_ASSERT_LOCKED(param)
-#define TTM_DEBUG(fmt, arg...)
-#define TTM_BO_HASH_ORDER 13
-
-static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
-static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
-static void ttm_bo_global_kobj_release(struct kobject *kobj);
-
-static struct attribute ttm_bo_count = {
- .name = "bo_count",
- .mode = S_IRUGO
-};
-
-static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
-{
- int i;
-
- for (i = 0; i <= TTM_PL_PRIV5; i++)
- if (flags & (1 << i)) {
- *mem_type = i;
- return 0;
- }
- return -EINVAL;
-}
-
-static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
-{
- struct ttm_mem_type_manager *man = &bdev->man[mem_type];
-
- pr_err(" has_type: %d\n", man->has_type);
- pr_err(" use_type: %d\n", man->use_type);
- pr_err(" flags: 0x%08X\n", man->flags);
- pr_err(" gpu_offset: 0x%08lX\n", man->gpu_offset);
- pr_err(" size: %llu\n", man->size);
- pr_err(" available_caching: 0x%08X\n", man->available_caching);
- pr_err(" default_caching: 0x%08X\n", man->default_caching);
- if (mem_type != TTM_PL_SYSTEM)
- (*man->func->debug)(man, TTM_PFX);
-}
+#include "ttm_module.h"
+#include "ttm_bo_internal.h"
static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
- int i, ret, mem_type;
+ struct drm_printer p = drm_dbg_printer(NULL, DRM_UT_CORE, TTM_PFX);
+ struct ttm_resource_manager *man;
+ int i, mem_type;
- pr_err("No space for %p (%lu pages, %luK, %luM)\n",
- bo, bo->mem.num_pages, bo->mem.size >> 10,
- bo->mem.size >> 20);
for (i = 0; i < placement->num_placement; i++) {
- ret = ttm_mem_type_from_flags(placement->placement[i],
- &mem_type);
- if (ret)
- return;
- pr_err(" placement[%d]=0x%08X (%d)\n",
- i, placement->placement[i], mem_type);
- ttm_mem_type_debug(bo->bdev, mem_type);
- }
-}
-
-static ssize_t ttm_bo_global_show(struct kobject *kobj,
- struct attribute *attr,
- char *buffer)
-{
- struct ttm_bo_global *glob =
- container_of(kobj, struct ttm_bo_global, kobj);
-
- return snprintf(buffer, PAGE_SIZE, "%lu\n",
- (unsigned long) atomic_read(&glob->bo_count));
-}
-
-static struct attribute *ttm_bo_global_attrs[] = {
- &ttm_bo_count,
- NULL
-};
-
-static const struct sysfs_ops ttm_bo_global_ops = {
- .show = &ttm_bo_global_show
-};
-
-static struct kobj_type ttm_bo_glob_kobj_type = {
- .release = &ttm_bo_global_kobj_release,
- .sysfs_ops = &ttm_bo_global_ops,
- .default_attrs = ttm_bo_global_attrs
-};
-
-
-static inline uint32_t ttm_bo_type_flags(unsigned type)
-{
- return 1 << (type);
-}
-
-static void ttm_bo_release_list(struct kref *list_kref)
-{
- struct ttm_buffer_object *bo =
- container_of(list_kref, struct ttm_buffer_object, list_kref);
- struct ttm_bo_device *bdev = bo->bdev;
- size_t acc_size = bo->acc_size;
-
- BUG_ON(atomic_read(&bo->list_kref.refcount));
- BUG_ON(atomic_read(&bo->kref.refcount));
- BUG_ON(atomic_read(&bo->cpu_writers));
- BUG_ON(bo->sync_obj != NULL);
- BUG_ON(bo->mem.mm_node != NULL);
- BUG_ON(!list_empty(&bo->lru));
- BUG_ON(!list_empty(&bo->ddestroy));
-
- if (bo->ttm)
- ttm_tt_destroy(bo->ttm);
- atomic_dec(&bo->glob->bo_count);
- if (bo->resv == &bo->ttm_resv)
- reservation_object_fini(&bo->ttm_resv);
-
- if (bo->destroy)
- bo->destroy(bo);
- else {
- kfree(bo);
- }
- ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
-}
-
-void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
-{
- struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_mem_type_manager *man;
-
- lockdep_assert_held(&bo->resv->lock.base);
-
- if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
-
- BUG_ON(!list_empty(&bo->lru));
-
- man = &bdev->man[bo->mem.mem_type];
- list_add_tail(&bo->lru, &man->lru);
- kref_get(&bo->list_kref);
-
- if (bo->ttm != NULL) {
- list_add_tail(&bo->swap, &bo->glob->swap_lru);
- kref_get(&bo->list_kref);
- }
- }
-}
-EXPORT_SYMBOL(ttm_bo_add_to_lru);
-
-int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
-{
- int put_count = 0;
-
- if (!list_empty(&bo->swap)) {
- list_del_init(&bo->swap);
- ++put_count;
+ mem_type = placement->placement[i].mem_type;
+ drm_printf(&p, " placement[%d]=0x%08X (%d)\n",
+ i, placement->placement[i].flags, mem_type);
+ man = ttm_manager_type(bo->bdev, mem_type);
+ ttm_resource_manager_debug(man, &p);
}
- if (!list_empty(&bo->lru)) {
- list_del_init(&bo->lru);
- ++put_count;
- }
-
- /*
- * TODO: Add a driver hook to delete from
- * driver-specific LRU's here.
- */
-
- return put_count;
-}
-
-static void ttm_bo_ref_bug(struct kref *list_kref)
-{
- BUG();
-}
-
-void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
- bool never_free)
-{
- kref_sub(&bo->list_kref, count,
- (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
}
-void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
+/**
+ * ttm_bo_move_to_lru_tail
+ *
+ * @bo: The buffer object.
+ *
+ * Move this BO to the tail of all lru lists used to lookup and reserve an
+ * object. This function must be called with struct ttm_global::lru_lock
+ * held, and is used to make a BO less likely to be considered for eviction.
+ */
+void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
{
- int put_count;
+ dma_resv_assert_held(bo->base.resv);
- spin_lock(&bo->glob->lru_lock);
- put_count = ttm_bo_del_from_lru(bo);
- spin_unlock(&bo->glob->lru_lock);
- ttm_bo_list_ref_sub(bo, put_count, true);
+ if (bo->resource)
+ ttm_resource_move_to_lru_tail(bo->resource);
}
-EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
+EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
-/*
- * Call bo->mutex locked.
+/**
+ * ttm_bo_set_bulk_move - update BOs bulk move object
+ *
+ * @bo: The buffer object.
+ * @bulk: bulk move structure
+ *
+ * Update the BOs bulk move object, making sure that resources are added/removed
+ * as well. A bulk move allows to move many resource on the LRU at once,
+ * resulting in much less overhead of maintaining the LRU.
+ * The only requirement is that the resources stay together on the LRU and are
+ * never separated. This is enforces by setting the bulk_move structure on a BO.
+ * ttm_lru_bulk_move_tail() should be used to move all resources to the tail of
+ * their LRU list.
*/
-static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
+void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
+ struct ttm_lru_bulk_move *bulk)
{
- struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_bo_global *glob = bo->glob;
- int ret = 0;
- uint32_t page_flags = 0;
+ dma_resv_assert_held(bo->base.resv);
- TTM_ASSERT_LOCKED(&bo->mutex);
- bo->ttm = NULL;
-
- if (bdev->need_dma32)
- page_flags |= TTM_PAGE_FLAG_DMA32;
-
- switch (bo->type) {
- case ttm_bo_type_device:
- if (zero_alloc)
- page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
- case ttm_bo_type_kernel:
- bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
- page_flags, glob->dummy_read_page);
- if (unlikely(bo->ttm == NULL))
- ret = -ENOMEM;
- break;
- case ttm_bo_type_sg:
- bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
- page_flags | TTM_PAGE_FLAG_SG,
- glob->dummy_read_page);
- if (unlikely(bo->ttm == NULL)) {
- ret = -ENOMEM;
- break;
- }
- bo->ttm->sg = bo->sg;
- break;
- default:
- pr_err("Illegal buffer object type\n");
- ret = -EINVAL;
- break;
- }
+ if (bo->bulk_move == bulk)
+ return;
- return ret;
+ spin_lock(&bo->bdev->lru_lock);
+ if (bo->resource)
+ ttm_resource_del_bulk_move(bo->resource, bo);
+ bo->bulk_move = bulk;
+ if (bo->resource)
+ ttm_resource_add_bulk_move(bo->resource, bo);
+ spin_unlock(&bo->bdev->lru_lock);
}
+EXPORT_SYMBOL(ttm_bo_set_bulk_move);
static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem,
- bool evict, bool interruptible,
- bool no_wait_gpu)
+ struct ttm_resource *mem, bool evict,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_place *hop)
{
- struct ttm_bo_device *bdev = bo->bdev;
- bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
- bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
- struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
- struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
- int ret = 0;
+ struct ttm_device *bdev = bo->bdev;
+ bool old_use_tt, new_use_tt;
+ int ret;
- if (old_is_pci || new_is_pci ||
- ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
- ret = ttm_mem_io_lock(old_man, true);
- if (unlikely(ret != 0))
- goto out_err;
- ttm_bo_unmap_virtual_locked(bo);
- ttm_mem_io_unlock(old_man);
- }
+ old_use_tt = !bo->resource || ttm_manager_type(bdev, bo->resource->mem_type)->use_tt;
+ new_use_tt = ttm_manager_type(bdev, mem->mem_type)->use_tt;
+
+ ttm_bo_unmap_virtual(bo);
/*
* Create and bind a ttm if required.
*/
- if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
- if (bo->ttm == NULL) {
- bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
- ret = ttm_bo_add_ttm(bo, zero);
- if (ret)
- goto out_err;
- }
-
- ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
+ if (new_use_tt) {
+ /* Zero init the new TTM structure if the old location should
+ * have used one as well.
+ */
+ ret = ttm_tt_create(bo, old_use_tt);
if (ret)
goto out_err;
if (mem->mem_type != TTM_PL_SYSTEM) {
- ret = ttm_tt_bind(bo->ttm, mem);
+ ret = ttm_bo_populate(bo, ctx);
if (ret)
goto out_err;
}
-
- if (bo->mem.mem_type == TTM_PL_SYSTEM) {
- if (bdev->driver->move_notify)
- bdev->driver->move_notify(bo, mem);
- bo->mem = *mem;
- mem->mm_node = NULL;
- goto moved;
- }
}
- if (bdev->driver->move_notify)
- bdev->driver->move_notify(bo, mem);
-
- if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
- !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
- ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
- else if (bdev->driver->move)
- ret = bdev->driver->move(bo, evict, interruptible,
- no_wait_gpu, mem);
- else
- ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
+ ret = dma_resv_reserve_fences(bo->base.resv, 1);
+ if (ret)
+ goto out_err;
+ ret = bdev->funcs->move(bo, evict, ctx, mem, hop);
if (ret) {
- if (bdev->driver->move_notify) {
- struct ttm_mem_reg tmp_mem = *mem;
- *mem = bo->mem;
- bo->mem = tmp_mem;
- bdev->driver->move_notify(bo, mem);
- bo->mem = *mem;
- *mem = tmp_mem;
- }
-
+ if (ret == -EMULTIHOP)
+ return ret;
goto out_err;
}
-moved:
- if (bo->evicted) {
- ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
- if (ret)
- pr_err("Can not flush read caches\n");
- bo->evicted = false;
- }
-
- if (bo->mem.mm_node) {
- bo->offset = (bo->mem.start << PAGE_SHIFT) +
- bdev->man[bo->mem.mem_type].gpu_offset;
- bo->cur_placement = bo->mem.placement;
- } else
- bo->offset = 0;
-
+ ctx->bytes_moved += bo->base.size;
return 0;
out_err:
- new_man = &bdev->man[bo->mem.mem_type];
- if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
- ttm_tt_unbind(bo->ttm);
- ttm_tt_destroy(bo->ttm);
- bo->ttm = NULL;
- }
+ if (!old_use_tt)
+ ttm_bo_tt_destroy(bo);
return ret;
}
-/**
+/*
* Call bo::reserved.
* Will release GPU memory type usage on destruction.
* This is the place to put in driver specific hooks to release
@@ -388,303 +181,210 @@ out_err:
static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
{
- if (bo->bdev->driver->move_notify)
- bo->bdev->driver->move_notify(bo, NULL);
-
- if (bo->ttm) {
- ttm_tt_unbind(bo->ttm);
- ttm_tt_destroy(bo->ttm);
- bo->ttm = NULL;
- }
- ttm_bo_mem_put(bo, &bo->mem);
+ if (bo->bdev->funcs->delete_mem_notify)
+ bo->bdev->funcs->delete_mem_notify(bo);
- ww_mutex_unlock (&bo->resv->lock);
+ ttm_bo_tt_destroy(bo);
+ ttm_resource_free(bo, &bo->resource);
}
-static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
+static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
{
- struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_bo_global *glob = bo->glob;
- struct ttm_bo_driver *driver = bdev->driver;
- void *sync_obj = NULL;
- int put_count;
- int ret;
-
- spin_lock(&glob->lru_lock);
- ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
+ int r;
- spin_lock(&bdev->fence_lock);
- (void) ttm_bo_wait(bo, false, false, true);
- if (!ret && !bo->sync_obj) {
- spin_unlock(&bdev->fence_lock);
- put_count = ttm_bo_del_from_lru(bo);
+ if (bo->base.resv == &bo->base._resv)
+ return 0;
- spin_unlock(&glob->lru_lock);
- ttm_bo_cleanup_memtype_use(bo);
+ BUG_ON(!dma_resv_trylock(&bo->base._resv));
- ttm_bo_list_ref_sub(bo, put_count, true);
+ r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
+ dma_resv_unlock(&bo->base._resv);
+ if (r)
+ return r;
- return;
+ if (bo->type != ttm_bo_type_sg) {
+ /* This works because the BO is about to be destroyed and nobody
+ * reference it any more. The only tricky case is the trylock on
+ * the resv object while holding the lru_lock.
+ */
+ spin_lock(&bo->bdev->lru_lock);
+ bo->base.resv = &bo->base._resv;
+ spin_unlock(&bo->bdev->lru_lock);
}
- if (bo->sync_obj)
- sync_obj = driver->sync_obj_ref(bo->sync_obj);
- spin_unlock(&bdev->fence_lock);
-
- if (!ret)
- ww_mutex_unlock(&bo->resv->lock);
- kref_get(&bo->list_kref);
- list_add_tail(&bo->ddestroy, &bdev->ddestroy);
- spin_unlock(&glob->lru_lock);
+ return r;
+}
- if (sync_obj) {
- driver->sync_obj_flush(sync_obj);
- driver->sync_obj_unref(&sync_obj);
+static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
+{
+ struct dma_resv *resv = &bo->base._resv;
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
+
+ dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
+ if (!fence->ops->signaled)
+ dma_fence_enable_sw_signaling(fence);
}
- schedule_delayed_work(&bdev->wq,
- ((HZ / 100) < 1) ? 1 : HZ / 100);
+ dma_resv_iter_end(&cursor);
}
-/**
- * function ttm_bo_cleanup_refs_and_unlock
- * If bo idle, remove from delayed- and lru lists, and unref.
- * If not idle, do nothing.
- *
- * Must be called with lru_lock and reservation held, this function
- * will drop both before returning.
- *
- * @interruptible Any sleeps should occur interruptibly.
- * @no_wait_gpu Never wait for gpu. Return -EBUSY instead.
+/*
+ * Block for the dma_resv object to become idle, lock the buffer and clean up
+ * the resource and tt object.
*/
-
-static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
- bool interruptible,
- bool no_wait_gpu)
+static void ttm_bo_delayed_delete(struct work_struct *work)
{
- struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_bo_driver *driver = bdev->driver;
- struct ttm_bo_global *glob = bo->glob;
- int put_count;
- int ret;
-
- spin_lock(&bdev->fence_lock);
- ret = ttm_bo_wait(bo, false, false, true);
-
- if (ret && !no_wait_gpu) {
- void *sync_obj;
-
- /*
- * Take a reference to the fence and unreserve,
- * at this point the buffer should be dead, so
- * no new sync objects can be attached.
- */
- sync_obj = driver->sync_obj_ref(bo->sync_obj);
- spin_unlock(&bdev->fence_lock);
-
- ww_mutex_unlock(&bo->resv->lock);
- spin_unlock(&glob->lru_lock);
-
- ret = driver->sync_obj_wait(sync_obj, false, interruptible);
- driver->sync_obj_unref(&sync_obj);
- if (ret)
- return ret;
-
- /*
- * remove sync_obj with ttm_bo_wait, the wait should be
- * finished, and no new wait object should have been added.
- */
- spin_lock(&bdev->fence_lock);
- ret = ttm_bo_wait(bo, false, false, true);
- WARN_ON(ret);
- spin_unlock(&bdev->fence_lock);
- if (ret)
- return ret;
-
- spin_lock(&glob->lru_lock);
- ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
-
- /*
- * We raced, and lost, someone else holds the reservation now,
- * and is probably busy in ttm_bo_cleanup_memtype_use.
- *
- * Even if it's not the case, because we finished waiting any
- * delayed destruction would succeed, so just return success
- * here.
- */
- if (ret) {
- spin_unlock(&glob->lru_lock);
- return 0;
- }
- } else
- spin_unlock(&bdev->fence_lock);
-
- if (ret || unlikely(list_empty(&bo->ddestroy))) {
- ww_mutex_unlock(&bo->resv->lock);
- spin_unlock(&glob->lru_lock);
- return ret;
- }
+ struct ttm_buffer_object *bo;
- put_count = ttm_bo_del_from_lru(bo);
- list_del_init(&bo->ddestroy);
- ++put_count;
+ bo = container_of(work, typeof(*bo), delayed_delete);
- spin_unlock(&glob->lru_lock);
+ dma_resv_wait_timeout(&bo->base._resv, DMA_RESV_USAGE_BOOKKEEP, false,
+ MAX_SCHEDULE_TIMEOUT);
+ dma_resv_lock(bo->base.resv, NULL);
ttm_bo_cleanup_memtype_use(bo);
-
- ttm_bo_list_ref_sub(bo, put_count, true);
-
- return 0;
+ dma_resv_unlock(bo->base.resv);
+ ttm_bo_put(bo);
}
-/**
- * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
- * encountered buffers.
- */
-
-static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
+static void ttm_bo_release(struct kref *kref)
{
- struct ttm_bo_global *glob = bdev->glob;
- struct ttm_buffer_object *entry = NULL;
- int ret = 0;
-
- spin_lock(&glob->lru_lock);
- if (list_empty(&bdev->ddestroy))
- goto out_unlock;
-
- entry = list_first_entry(&bdev->ddestroy,
- struct ttm_buffer_object, ddestroy);
- kref_get(&entry->list_kref);
+ struct ttm_buffer_object *bo =
+ container_of(kref, struct ttm_buffer_object, kref);
+ struct ttm_device *bdev = bo->bdev;
+ int ret;
- for (;;) {
- struct ttm_buffer_object *nentry = NULL;
+ WARN_ON_ONCE(bo->pin_count);
+ WARN_ON_ONCE(bo->bulk_move);
- if (entry->ddestroy.next != &bdev->ddestroy) {
- nentry = list_first_entry(&entry->ddestroy,
- struct ttm_buffer_object, ddestroy);
- kref_get(&nentry->list_kref);
+ if (!bo->deleted) {
+ ret = ttm_bo_individualize_resv(bo);
+ if (ret) {
+ /* Last resort, if we fail to allocate memory for the
+ * fences block for the BO to become idle
+ */
+ dma_resv_wait_timeout(bo->base.resv,
+ DMA_RESV_USAGE_BOOKKEEP, false,
+ 30 * HZ);
}
- ret = ttm_bo_reserve_nolru(entry, false, true, false, 0);
- if (remove_all && ret) {
- spin_unlock(&glob->lru_lock);
- ret = ttm_bo_reserve_nolru(entry, false, false,
- false, 0);
- spin_lock(&glob->lru_lock);
- }
+ if (bo->bdev->funcs->release_notify)
+ bo->bdev->funcs->release_notify(bo);
+
+ drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
+ ttm_mem_io_free(bdev, bo->resource);
+
+ if (!dma_resv_test_signaled(&bo->base._resv,
+ DMA_RESV_USAGE_BOOKKEEP) ||
+ (want_init_on_free() && (bo->ttm != NULL)) ||
+ bo->type == ttm_bo_type_sg ||
+ !dma_resv_trylock(bo->base.resv)) {
+ /* The BO is not idle, resurrect it for delayed destroy */
+ ttm_bo_flush_all_fences(bo);
+ bo->deleted = true;
+
+ spin_lock(&bo->bdev->lru_lock);
+
+ /*
+ * Make pinned bos immediately available to
+ * shrinkers, now that they are queued for
+ * destruction.
+ *
+ * FIXME: QXL is triggering this. Can be removed when the
+ * driver is fixed.
+ */
+ if (bo->pin_count) {
+ bo->pin_count = 0;
+ ttm_resource_move_to_lru_tail(bo->resource);
+ }
- if (!ret)
- ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
- !remove_all);
- else
- spin_unlock(&glob->lru_lock);
+ kref_init(&bo->kref);
+ spin_unlock(&bo->bdev->lru_lock);
- kref_put(&entry->list_kref, ttm_bo_release_list);
- entry = nentry;
+ INIT_WORK(&bo->delayed_delete, ttm_bo_delayed_delete);
- if (ret || !entry)
- goto out;
+ /* Schedule the worker on the closest NUMA node. This
+ * improves performance since system memory might be
+ * cleared on free and that is best done on a CPU core
+ * close to it.
+ */
+ queue_work_node(bdev->pool.nid, bdev->wq, &bo->delayed_delete);
+ return;
+ }
- spin_lock(&glob->lru_lock);
- if (list_empty(&entry->ddestroy))
- break;
+ ttm_bo_cleanup_memtype_use(bo);
+ dma_resv_unlock(bo->base.resv);
}
-out_unlock:
- spin_unlock(&glob->lru_lock);
-out:
- if (entry)
- kref_put(&entry->list_kref, ttm_bo_release_list);
- return ret;
+ atomic_dec(&ttm_glob.bo_count);
+ bo->destroy(bo);
}
-static void ttm_bo_delayed_workqueue(struct work_struct *work)
+/* TODO: remove! */
+void ttm_bo_put(struct ttm_buffer_object *bo)
{
- struct ttm_bo_device *bdev =
- container_of(work, struct ttm_bo_device, wq.work);
-
- if (ttm_bo_delayed_delete(bdev, false)) {
- schedule_delayed_work(&bdev->wq,
- ((HZ / 100) < 1) ? 1 : HZ / 100);
- }
+ kref_put(&bo->kref, ttm_bo_release);
}
-static void ttm_bo_release(struct kref *kref)
+void ttm_bo_fini(struct ttm_buffer_object *bo)
{
- struct ttm_buffer_object *bo =
- container_of(kref, struct ttm_buffer_object, kref);
- struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
-
- write_lock(&bdev->vm_lock);
- if (likely(bo->vm_node != NULL)) {
- rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
- drm_mm_put_block(bo->vm_node);
- bo->vm_node = NULL;
- }
- write_unlock(&bdev->vm_lock);
- ttm_mem_io_lock(man, false);
- ttm_mem_io_free_vm(bo);
- ttm_mem_io_unlock(man);
- ttm_bo_cleanup_refs_or_queue(bo);
- kref_put(&bo->list_kref, ttm_bo_release_list);
+ ttm_bo_put(bo);
}
+EXPORT_SYMBOL(ttm_bo_fini);
-void ttm_bo_unref(struct ttm_buffer_object **p_bo)
+static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_place *hop)
{
- struct ttm_buffer_object *bo = *p_bo;
-
- *p_bo = NULL;
- kref_put(&bo->kref, ttm_bo_release);
-}
-EXPORT_SYMBOL(ttm_bo_unref);
+ struct ttm_placement hop_placement;
+ struct ttm_resource *hop_mem;
+ int ret;
-int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
-{
- return cancel_delayed_work_sync(&bdev->wq);
-}
-EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
+ hop_placement.num_placement = 1;
+ hop_placement.placement = hop;
-void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
-{
- if (resched)
- schedule_delayed_work(&bdev->wq,
- ((HZ / 100) < 1) ? 1 : HZ / 100);
+ /* find space in the bounce domain */
+ ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx);
+ if (ret)
+ return ret;
+ /* move to the bounce domain */
+ ret = ttm_bo_handle_move_mem(bo, hop_mem, false, ctx, NULL);
+ if (ret) {
+ ttm_resource_free(bo, &hop_mem);
+ return ret;
+ }
+ return 0;
}
-EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
-static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
- bool no_wait_gpu)
+static int ttm_bo_evict(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx)
{
- struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_mem_reg evict_mem;
+ struct ttm_device *bdev = bo->bdev;
+ struct ttm_resource *evict_mem;
struct ttm_placement placement;
+ struct ttm_place hop;
int ret = 0;
- spin_lock(&bdev->fence_lock);
- ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
- spin_unlock(&bdev->fence_lock);
+ memset(&hop, 0, sizeof(hop));
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS) {
- pr_err("Failed to expire sync object before buffer eviction\n");
- }
- goto out;
- }
+ dma_resv_assert_held(bo->base.resv);
- lockdep_assert_held(&bo->resv->lock.base);
+ placement.num_placement = 0;
+ bdev->funcs->evict_flags(bo, &placement);
+
+ if (!placement.num_placement) {
+ ret = ttm_bo_wait_ctx(bo, ctx);
+ if (ret)
+ return ret;
- evict_mem = bo->mem;
- evict_mem.mm_node = NULL;
- evict_mem.bus.io_reserved_vm = false;
- evict_mem.bus.io_reserved_count = 0;
+ /*
+ * Since we've already synced, this frees backing store
+ * immediately.
+ */
+ return ttm_bo_pipeline_gutting(bo);
+ }
- placement.fpfn = 0;
- placement.lpfn = 0;
- placement.num_placement = 0;
- placement.num_busy_placement = 0;
- bdev->driver->evict_flags(bo, &placement);
- ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
- no_wait_gpu);
+ ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
if (ret) {
if (ret != -ERESTARTSYS) {
pr_err("Failed to find memory space for buffer 0x%p eviction\n",
@@ -694,360 +394,500 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
goto out;
}
- ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
- no_wait_gpu);
+ do {
+ ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
+ if (ret != -EMULTIHOP)
+ break;
+
+ ret = ttm_bo_bounce_temp_buffer(bo, ctx, &hop);
+ } while (!ret);
+
if (ret) {
- if (ret != -ERESTARTSYS)
+ ttm_resource_free(bo, &evict_mem);
+ if (ret != -ERESTARTSYS && ret != -EINTR)
pr_err("Buffer eviction failed\n");
- ttm_bo_mem_put(bo, &evict_mem);
- goto out;
}
- bo->evicted = true;
out:
return ret;
}
-static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
- uint32_t mem_type,
- bool interruptible,
- bool no_wait_gpu)
+/**
+ * ttm_bo_eviction_valuable
+ *
+ * @bo: The buffer object to evict
+ * @place: the placement we need to make room for
+ *
+ * Check if it is valuable to evict the BO to make room for the given placement.
+ */
+bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
+ const struct ttm_place *place)
+{
+ struct ttm_resource *res = bo->resource;
+ struct ttm_device *bdev = bo->bdev;
+
+ dma_resv_assert_held(bo->base.resv);
+ if (bo->resource->mem_type == TTM_PL_SYSTEM)
+ return true;
+
+ /* Don't evict this BO if it's outside of the
+ * requested placement range
+ */
+ return ttm_resource_intersects(bdev, res, place, bo->base.size);
+}
+EXPORT_SYMBOL(ttm_bo_eviction_valuable);
+
+/**
+ * ttm_bo_evict_first() - Evict the first bo on the manager's LRU list.
+ * @bdev: The ttm device.
+ * @man: The manager whose bo to evict.
+ * @ctx: The TTM operation ctx governing the eviction.
+ *
+ * Return: 0 if successful or the resource disappeared. Negative error code on error.
+ */
+int ttm_bo_evict_first(struct ttm_device *bdev, struct ttm_resource_manager *man,
+ struct ttm_operation_ctx *ctx)
{
- struct ttm_bo_global *glob = bdev->glob;
- struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+ struct ttm_resource_cursor cursor;
struct ttm_buffer_object *bo;
- int ret = -EBUSY, put_count;
+ struct ttm_resource *res;
+ unsigned int mem_type;
+ int ret = 0;
- spin_lock(&glob->lru_lock);
- list_for_each_entry(bo, &man->lru, lru) {
- ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
- if (!ret)
- break;
+ spin_lock(&bdev->lru_lock);
+ ttm_resource_cursor_init(&cursor, man);
+ res = ttm_resource_manager_first(&cursor);
+ ttm_resource_cursor_fini(&cursor);
+ if (!res) {
+ ret = -ENOENT;
+ goto out_no_ref;
}
+ bo = res->bo;
+ if (!ttm_bo_get_unless_zero(bo))
+ goto out_no_ref;
+ mem_type = res->mem_type;
+ spin_unlock(&bdev->lru_lock);
+ ret = ttm_bo_reserve(bo, ctx->interruptible, ctx->no_wait_gpu, NULL);
+ if (ret)
+ goto out_no_lock;
+ if (!bo->resource || bo->resource->mem_type != mem_type)
+ goto out_bo_moved;
- if (ret) {
- spin_unlock(&glob->lru_lock);
- return ret;
+ if (bo->deleted) {
+ ret = ttm_bo_wait_ctx(bo, ctx);
+ if (!ret)
+ ttm_bo_cleanup_memtype_use(bo);
+ } else {
+ ret = ttm_bo_evict(bo, ctx);
}
+out_bo_moved:
+ dma_resv_unlock(bo->base.resv);
+out_no_lock:
+ ttm_bo_put(bo);
+ return ret;
- kref_get(&bo->list_kref);
+out_no_ref:
+ spin_unlock(&bdev->lru_lock);
+ return ret;
+}
- if (!list_empty(&bo->ddestroy)) {
- ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
- no_wait_gpu);
- kref_put(&bo->list_kref, ttm_bo_release_list);
- return ret;
- }
+/**
+ * struct ttm_bo_evict_walk - Parameters for the evict walk.
+ */
+struct ttm_bo_evict_walk {
+ /** @walk: The walk base parameters. */
+ struct ttm_lru_walk walk;
+ /** @place: The place passed to the resource allocation. */
+ const struct ttm_place *place;
+ /** @evictor: The buffer object we're trying to make room for. */
+ struct ttm_buffer_object *evictor;
+ /** @res: The allocated resource if any. */
+ struct ttm_resource **res;
+ /** @evicted: Number of successful evictions. */
+ unsigned long evicted;
+
+ /** @limit_pool: Which pool limit we should test against */
+ struct dmem_cgroup_pool_state *limit_pool;
+ /** @try_low: Whether we should attempt to evict BO's with low watermark threshold */
+ bool try_low;
+ /** @hit_low: If we cannot evict a bo when @try_low is false (first pass) */
+ bool hit_low;
+};
- put_count = ttm_bo_del_from_lru(bo);
- spin_unlock(&glob->lru_lock);
+static s64 ttm_bo_evict_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_evict_walk *evict_walk =
+ container_of(walk, typeof(*evict_walk), walk);
+ s64 lret;
- BUG_ON(ret != 0);
+ if (!dmem_cgroup_state_evict_valuable(evict_walk->limit_pool, bo->resource->css,
+ evict_walk->try_low, &evict_walk->hit_low))
+ return 0;
- ttm_bo_list_ref_sub(bo, put_count, true);
+ if (bo->pin_count || !bo->bdev->funcs->eviction_valuable(bo, evict_walk->place))
+ return 0;
- ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
- ttm_bo_unreserve(bo);
+ if (bo->deleted) {
+ lret = ttm_bo_wait_ctx(bo, walk->arg.ctx);
+ if (!lret)
+ ttm_bo_cleanup_memtype_use(bo);
+ } else {
+ lret = ttm_bo_evict(bo, walk->arg.ctx);
+ }
- kref_put(&bo->list_kref, ttm_bo_release_list);
- return ret;
-}
+ if (lret)
+ goto out;
-void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
-{
- struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
+ evict_walk->evicted++;
+ if (evict_walk->res)
+ lret = ttm_resource_alloc(evict_walk->evictor, evict_walk->place,
+ evict_walk->res, NULL);
+ if (lret == 0)
+ return 1;
+out:
+ /* Errors that should terminate the walk. */
+ if (lret == -ENOSPC)
+ return -EBUSY;
- if (mem->mm_node)
- (*man->func->put_node)(man, mem);
+ return lret;
}
-EXPORT_SYMBOL(ttm_bo_mem_put);
-/**
- * Repeatedly evict memory from the LRU for @mem_type until we create enough
- * space, or we've evicted everything and there isn't enough space.
- */
-static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
- uint32_t mem_type,
- struct ttm_placement *placement,
- struct ttm_mem_reg *mem,
- bool interruptible,
- bool no_wait_gpu)
+static const struct ttm_lru_walk_ops ttm_evict_walk_ops = {
+ .process_bo = ttm_bo_evict_cb,
+};
+
+static int ttm_bo_evict_alloc(struct ttm_device *bdev,
+ struct ttm_resource_manager *man,
+ const struct ttm_place *place,
+ struct ttm_buffer_object *evictor,
+ struct ttm_operation_ctx *ctx,
+ struct ww_acquire_ctx *ticket,
+ struct ttm_resource **res,
+ struct dmem_cgroup_pool_state *limit_pool)
{
- struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_mem_type_manager *man = &bdev->man[mem_type];
- int ret;
+ struct ttm_bo_evict_walk evict_walk = {
+ .walk = {
+ .ops = &ttm_evict_walk_ops,
+ .arg = {
+ .ctx = ctx,
+ .ticket = ticket,
+ }
+ },
+ .place = place,
+ .evictor = evictor,
+ .res = res,
+ .limit_pool = limit_pool,
+ };
+ s64 lret;
+
+ evict_walk.walk.arg.trylock_only = true;
+ lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
+
+ /* One more attempt if we hit low limit? */
+ if (!lret && evict_walk.hit_low) {
+ evict_walk.try_low = true;
+ lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
+ }
+ if (lret || !ticket)
+ goto out;
+ /* Reset low limit */
+ evict_walk.try_low = evict_walk.hit_low = false;
+ /* If ticket-locking, repeat while making progress. */
+ evict_walk.walk.arg.trylock_only = false;
+
+retry:
do {
- ret = (*man->func->get_node)(man, bo, placement, mem);
- if (unlikely(ret != 0))
- return ret;
- if (mem->mm_node)
- break;
- ret = ttm_mem_evict_first(bdev, mem_type,
- interruptible, no_wait_gpu);
- if (unlikely(ret != 0))
- return ret;
- } while (1);
- if (mem->mm_node == NULL)
- return -ENOMEM;
- mem->mem_type = mem_type;
+ /* The walk may clear the evict_walk.walk.ticket field */
+ evict_walk.walk.arg.ticket = ticket;
+ evict_walk.evicted = 0;
+ lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
+ } while (!lret && evict_walk.evicted);
+
+ /* We hit the low limit? Try once more */
+ if (!lret && evict_walk.hit_low && !evict_walk.try_low) {
+ evict_walk.try_low = true;
+ goto retry;
+ }
+out:
+ if (lret < 0)
+ return lret;
+ if (lret == 0)
+ return -EBUSY;
return 0;
}
-static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
- uint32_t cur_placement,
- uint32_t proposed_placement)
+/**
+ * ttm_bo_pin - Pin the buffer object.
+ * @bo: The buffer object to pin
+ *
+ * Make sure the buffer is not evicted any more during memory pressure.
+ * @bo must be unpinned again by calling ttm_bo_unpin().
+ */
+void ttm_bo_pin(struct ttm_buffer_object *bo)
{
- uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
- uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
+ dma_resv_assert_held(bo->base.resv);
+ WARN_ON_ONCE(!kref_read(&bo->kref));
+ spin_lock(&bo->bdev->lru_lock);
+ if (bo->resource)
+ ttm_resource_del_bulk_move(bo->resource, bo);
+ if (!bo->pin_count++ && bo->resource)
+ ttm_resource_move_to_lru_tail(bo->resource);
+ spin_unlock(&bo->bdev->lru_lock);
+}
+EXPORT_SYMBOL(ttm_bo_pin);
- /**
- * Keep current caching if possible.
- */
+/**
+ * ttm_bo_unpin - Unpin the buffer object.
+ * @bo: The buffer object to unpin
+ *
+ * Allows the buffer object to be evicted again during memory pressure.
+ */
+void ttm_bo_unpin(struct ttm_buffer_object *bo)
+{
+ dma_resv_assert_held(bo->base.resv);
+ WARN_ON_ONCE(!kref_read(&bo->kref));
+ if (WARN_ON_ONCE(!bo->pin_count))
+ return;
- if ((cur_placement & caching) != 0)
- result |= (cur_placement & caching);
- else if ((man->default_caching & caching) != 0)
- result |= man->default_caching;
- else if ((TTM_PL_FLAG_CACHED & caching) != 0)
- result |= TTM_PL_FLAG_CACHED;
- else if ((TTM_PL_FLAG_WC & caching) != 0)
- result |= TTM_PL_FLAG_WC;
- else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
- result |= TTM_PL_FLAG_UNCACHED;
-
- return result;
+ spin_lock(&bo->bdev->lru_lock);
+ if (!--bo->pin_count && bo->resource) {
+ ttm_resource_add_bulk_move(bo->resource, bo);
+ ttm_resource_move_to_lru_tail(bo->resource);
+ }
+ spin_unlock(&bo->bdev->lru_lock);
}
+EXPORT_SYMBOL(ttm_bo_unpin);
-static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
- uint32_t mem_type,
- uint32_t proposed_placement,
- uint32_t *masked_placement)
+/*
+ * Add the pipelined eviction fencesto the BO as kernel dependency and reserve new
+ * fence slots.
+ */
+static int ttm_bo_add_pipelined_eviction_fences(struct ttm_buffer_object *bo,
+ struct ttm_resource_manager *man,
+ bool no_wait_gpu)
{
- uint32_t cur_flags = ttm_bo_type_flags(mem_type);
-
- if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
- return false;
+ struct dma_fence *fence;
+ int i;
- if ((proposed_placement & man->available_caching) == 0)
- return false;
+ spin_lock(&man->eviction_lock);
+ for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) {
+ fence = man->eviction_fences[i];
+ if (!fence)
+ continue;
- cur_flags |= (proposed_placement & man->available_caching);
+ if (no_wait_gpu) {
+ if (!dma_fence_is_signaled(fence)) {
+ spin_unlock(&man->eviction_lock);
+ return -EBUSY;
+ }
+ } else {
+ dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
+ }
+ }
+ spin_unlock(&man->eviction_lock);
- *masked_placement = cur_flags;
- return true;
+ /* TODO: this call should be removed. */
+ return dma_resv_reserve_fences(bo->base.resv, 1);
}
/**
- * Creates space for memory region @mem according to its type.
+ * ttm_bo_alloc_resource - Allocate backing store for a BO
+ *
+ * @bo: Pointer to a struct ttm_buffer_object of which we want a resource for
+ * @placement: Proposed new placement for the buffer object
+ * @ctx: if and how to sleep, lock buffers and alloc memory
+ * @force_space: If we should evict buffers to force space
+ * @res: The resulting struct ttm_resource.
*
- * This function first searches for free space in compatible memory types in
- * the priority order defined by the driver. If free space isn't found, then
- * ttm_bo_mem_force_space is attempted in priority order to evict and find
- * space.
+ * Allocates a resource for the buffer object pointed to by @bo, using the
+ * placement flags in @placement, potentially evicting other buffer objects when
+ * @force_space is true.
+ * This function may sleep while waiting for resources to become available.
+ * Returns:
+ * -EBUSY: No space available (only if no_wait == true).
+ * -ENOSPC: Could not allocate space for the buffer object, either due to
+ * fragmentation or concurrent allocators.
+ * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
*/
-int ttm_bo_mem_space(struct ttm_buffer_object *bo,
- struct ttm_placement *placement,
- struct ttm_mem_reg *mem,
- bool interruptible,
- bool no_wait_gpu)
+static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_operation_ctx *ctx,
+ bool force_space,
+ struct ttm_resource **res)
{
- struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_mem_type_manager *man;
- uint32_t mem_type = TTM_PL_SYSTEM;
- uint32_t cur_flags = 0;
- bool type_found = false;
- bool type_ok = false;
- bool has_erestartsys = false;
+ struct ttm_device *bdev = bo->bdev;
+ struct ww_acquire_ctx *ticket;
int i, ret;
- mem->mm_node = NULL;
- for (i = 0; i < placement->num_placement; ++i) {
- ret = ttm_mem_type_from_flags(placement->placement[i],
- &mem_type);
- if (ret)
- return ret;
- man = &bdev->man[mem_type];
+ ticket = dma_resv_locking_ctx(bo->base.resv);
+ ret = dma_resv_reserve_fences(bo->base.resv, TTM_NUM_MOVE_FENCES);
+ if (unlikely(ret))
+ return ret;
- type_ok = ttm_bo_mt_compatible(man,
- mem_type,
- placement->placement[i],
- &cur_flags);
+ for (i = 0; i < placement->num_placement; ++i) {
+ const struct ttm_place *place = &placement->placement[i];
+ struct dmem_cgroup_pool_state *limit_pool = NULL;
+ struct ttm_resource_manager *man;
+ bool may_evict;
- if (!type_ok)
+ man = ttm_manager_type(bdev, place->mem_type);
+ if (!man || !ttm_resource_manager_used(man))
continue;
- cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
- cur_flags);
- /*
- * Use the access and other non-mapping-related flag bits from
- * the memory placement flags to the current flags
- */
- ttm_flag_masked(&cur_flags, placement->placement[i],
- ~TTM_PL_MASK_MEMTYPE);
+ if (place->flags & (force_space ? TTM_PL_FLAG_DESIRED :
+ TTM_PL_FLAG_FALLBACK))
+ continue;
- if (mem_type == TTM_PL_SYSTEM)
- break;
+ may_evict = (force_space && place->mem_type != TTM_PL_SYSTEM);
+ ret = ttm_resource_alloc(bo, place, res, force_space ? &limit_pool : NULL);
+ if (ret) {
+ if (ret != -ENOSPC && ret != -EAGAIN) {
+ dmem_cgroup_pool_state_put(limit_pool);
+ return ret;
+ }
+ if (!may_evict) {
+ dmem_cgroup_pool_state_put(limit_pool);
+ continue;
+ }
- if (man->has_type && man->use_type) {
- type_found = true;
- ret = (*man->func->get_node)(man, bo, placement, mem);
- if (unlikely(ret))
+ ret = ttm_bo_evict_alloc(bdev, man, place, bo, ctx,
+ ticket, res, limit_pool);
+ dmem_cgroup_pool_state_put(limit_pool);
+ if (ret == -EBUSY)
+ continue;
+ if (ret)
return ret;
}
- if (mem->mm_node)
- break;
- }
-
- if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
- mem->mem_type = mem_type;
- mem->placement = cur_flags;
- return 0;
- }
- if (!type_found)
- return -EINVAL;
+ ret = ttm_bo_add_pipelined_eviction_fences(bo, man, ctx->no_wait_gpu);
+ if (unlikely(ret)) {
+ ttm_resource_free(bo, res);
+ if (ret == -EBUSY)
+ continue;
- for (i = 0; i < placement->num_busy_placement; ++i) {
- ret = ttm_mem_type_from_flags(placement->busy_placement[i],
- &mem_type);
- if (ret)
return ret;
- man = &bdev->man[mem_type];
- if (!man->has_type)
- continue;
- if (!ttm_bo_mt_compatible(man,
- mem_type,
- placement->busy_placement[i],
- &cur_flags))
- continue;
-
- cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
- cur_flags);
- /*
- * Use the access and other non-mapping-related flag bits from
- * the memory placement flags to the current flags
- */
- ttm_flag_masked(&cur_flags, placement->busy_placement[i],
- ~TTM_PL_MASK_MEMTYPE);
-
-
- if (mem_type == TTM_PL_SYSTEM) {
- mem->mem_type = mem_type;
- mem->placement = cur_flags;
- mem->mm_node = NULL;
- return 0;
}
-
- ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
- interruptible, no_wait_gpu);
- if (ret == 0 && mem->mm_node) {
- mem->placement = cur_flags;
- return 0;
- }
- if (ret == -ERESTARTSYS)
- has_erestartsys = true;
+ return 0;
}
- ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
- return ret;
-}
-EXPORT_SYMBOL(ttm_bo_mem_space);
-
-int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
- struct ttm_placement *placement,
- bool interruptible,
- bool no_wait_gpu)
-{
- int ret = 0;
- struct ttm_mem_reg mem;
- struct ttm_bo_device *bdev = bo->bdev;
-
- lockdep_assert_held(&bo->resv->lock.base);
- /*
- * FIXME: It's possible to pipeline buffer moves.
- * Have the driver move function wait for idle when necessary,
- * instead of doing it here.
- */
- spin_lock(&bdev->fence_lock);
- ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
- spin_unlock(&bdev->fence_lock);
- if (ret)
- return ret;
- mem.num_pages = bo->num_pages;
- mem.size = mem.num_pages << PAGE_SHIFT;
- mem.page_alignment = bo->mem.page_alignment;
- mem.bus.io_reserved_vm = false;
- mem.bus.io_reserved_count = 0;
- /*
- * Determine where to move the buffer.
- */
- ret = ttm_bo_mem_space(bo, placement, &mem,
- interruptible, no_wait_gpu);
- if (ret)
- goto out_unlock;
- ret = ttm_bo_handle_move_mem(bo, &mem, false,
- interruptible, no_wait_gpu);
-out_unlock:
- if (ret && mem.mm_node)
- ttm_bo_mem_put(bo, &mem);
- return ret;
+ return -ENOSPC;
}
-static int ttm_bo_mem_compat(struct ttm_placement *placement,
- struct ttm_mem_reg *mem)
+/*
+ * ttm_bo_mem_space - Wrapper around ttm_bo_alloc_resource
+ *
+ * @bo: Pointer to a struct ttm_buffer_object of which we want a resource for
+ * @placement: Proposed new placement for the buffer object
+ * @res: The resulting struct ttm_resource.
+ * @ctx: if and how to sleep, lock buffers and alloc memory
+ *
+ * Tries both idle allocation and forcefully eviction of buffers. See
+ * ttm_bo_alloc_resource for details.
+ */
+int ttm_bo_mem_space(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_resource **res,
+ struct ttm_operation_ctx *ctx)
{
- int i;
+ bool force_space = false;
+ int ret;
- if (mem->mm_node && placement->lpfn != 0 &&
- (mem->start < placement->fpfn ||
- mem->start + mem->num_pages > placement->lpfn))
- return -1;
+ do {
+ ret = ttm_bo_alloc_resource(bo, placement, ctx,
+ force_space, res);
+ force_space = !force_space;
+ } while (ret == -ENOSPC && force_space);
- for (i = 0; i < placement->num_placement; i++) {
- if ((placement->placement[i] & mem->placement &
- TTM_PL_MASK_CACHING) &&
- (placement->placement[i] & mem->placement &
- TTM_PL_MASK_MEM))
- return i;
- }
- return -1;
+ return ret;
}
+EXPORT_SYMBOL(ttm_bo_mem_space);
+/**
+ * ttm_bo_validate
+ *
+ * @bo: The buffer object.
+ * @placement: Proposed placement for the buffer object.
+ * @ctx: validation parameters.
+ *
+ * Changes placement and caching policy of the buffer object
+ * according proposed placement.
+ * Returns
+ * -EINVAL on invalid proposed placement.
+ * -ENOMEM on out-of-memory condition.
+ * -EBUSY if no_wait is true and buffer busy.
+ * -ERESTARTSYS if interrupted by a signal.
+ */
int ttm_bo_validate(struct ttm_buffer_object *bo,
- struct ttm_placement *placement,
- bool interruptible,
- bool no_wait_gpu)
+ struct ttm_placement *placement,
+ struct ttm_operation_ctx *ctx)
{
+ struct ttm_resource *res;
+ struct ttm_place hop;
+ bool force_space;
int ret;
- lockdep_assert_held(&bo->resv->lock.base);
- /* Check that range is valid */
- if (placement->lpfn || placement->fpfn)
- if (placement->fpfn > placement->lpfn ||
- (placement->lpfn - placement->fpfn) < bo->num_pages)
- return -EINVAL;
+ dma_resv_assert_held(bo->base.resv);
+
/*
- * Check whether we need to move buffer.
+ * Remove the backing store if no placement is given.
*/
- ret = ttm_bo_mem_compat(placement, &bo->mem);
- if (ret < 0) {
- ret = ttm_bo_move_buffer(bo, placement, interruptible,
- no_wait_gpu);
- if (ret)
- return ret;
- } else {
+ if (!placement->num_placement)
+ return ttm_bo_pipeline_gutting(bo);
+
+ force_space = false;
+ do {
+ /* Check whether we need to move buffer. */
+ if (bo->resource &&
+ ttm_resource_compatible(bo->resource, placement,
+ force_space))
+ return 0;
+
+ /* Moving of pinned BOs is forbidden */
+ if (bo->pin_count)
+ return -EINVAL;
+
/*
- * Use the access and other non-mapping-related flag bits from
- * the compatible memory placement flags to the active flags
+ * Determine where to move the buffer.
+ *
+ * If driver determines move is going to need
+ * an extra step then it will return -EMULTIHOP
+ * and the buffer will be moved to the temporary
+ * stop and the driver will be called to make
+ * the second hop.
*/
- ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
- ~TTM_PL_MASK_MEMTYPE);
- }
+ ret = ttm_bo_alloc_resource(bo, placement, ctx, force_space,
+ &res);
+ force_space = !force_space;
+ if (ret == -ENOSPC)
+ continue;
+ if (ret)
+ return ret;
+
+bounce:
+ ret = ttm_bo_handle_move_mem(bo, res, false, ctx, &hop);
+ if (ret == -EMULTIHOP) {
+ ret = ttm_bo_bounce_temp_buffer(bo, ctx, &hop);
+ /* try and move to final place now. */
+ if (!ret)
+ goto bounce;
+ }
+ if (ret) {
+ ttm_resource_free(bo, &res);
+ return ret;
+ }
+
+ } while (ret && force_space);
+
+ /* For backward compatibility with userspace */
+ if (ret == -ENOSPC)
+ return bo->bdev->alloc_flags & TTM_ALLOCATION_PROPAGATE_ENOSPC ?
+ ret : -ENOMEM;
+
/*
* We might need to add a TTM.
*/
- if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
- ret = ttm_bo_add_ttm(bo, true);
+ if (!bo->resource || bo->resource->mem_type == TTM_PL_SYSTEM) {
+ ret = ttm_tt_create(bo, true);
if (ret)
return ret;
}
@@ -1055,718 +895,409 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_validate);
-int ttm_bo_check_placement(struct ttm_buffer_object *bo,
- struct ttm_placement *placement)
-{
- BUG_ON((placement->fpfn || placement->lpfn) &&
- (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
-
- return 0;
-}
-
-int ttm_bo_init(struct ttm_bo_device *bdev,
- struct ttm_buffer_object *bo,
- unsigned long size,
- enum ttm_bo_type type,
- struct ttm_placement *placement,
- uint32_t page_alignment,
- bool interruptible,
- struct file *persistent_swap_storage,
- size_t acc_size,
- struct sg_table *sg,
- void (*destroy) (struct ttm_buffer_object *))
+/**
+ * ttm_bo_init_reserved
+ *
+ * @bdev: Pointer to a ttm_device struct.
+ * @bo: Pointer to a ttm_buffer_object to be initialized.
+ * @type: Requested type of buffer object.
+ * @placement: Initial placement for buffer object.
+ * @alignment: Data alignment in pages.
+ * @ctx: TTM operation context for memory allocation.
+ * @sg: Scatter-gather table.
+ * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
+ * @destroy: Destroy function. Use NULL for kfree().
+ *
+ * This function initializes a pre-allocated struct ttm_buffer_object.
+ * As this object may be part of a larger structure, this function,
+ * together with the @destroy function, enables driver-specific objects
+ * derived from a ttm_buffer_object.
+ *
+ * On successful return, the caller owns an object kref to @bo. The kref and
+ * list_kref are usually set to 1, but note that in some situations, other
+ * tasks may already be holding references to @bo as well.
+ * Furthermore, if resv == NULL, the buffer's reservation lock will be held,
+ * and it is the caller's responsibility to call ttm_bo_unreserve.
+ *
+ * If a failure occurs, the function will call the @destroy function. Thus,
+ * after a failure, dereferencing @bo is illegal and will likely cause memory
+ * corruption.
+ *
+ * Returns
+ * -ENOMEM: Out of memory.
+ * -EINVAL: Invalid placement flags.
+ * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
+ */
+int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
+ enum ttm_bo_type type, struct ttm_placement *placement,
+ uint32_t alignment, struct ttm_operation_ctx *ctx,
+ struct sg_table *sg, struct dma_resv *resv,
+ void (*destroy) (struct ttm_buffer_object *))
{
- int ret = 0;
- unsigned long num_pages;
- struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
- bool locked;
-
- ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
- if (ret) {
- pr_err("Out of kernel memory\n");
- if (destroy)
- (*destroy)(bo);
- else
- kfree(bo);
- return -ENOMEM;
- }
-
- num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- if (num_pages == 0) {
- pr_err("Illegal buffer object size\n");
- if (destroy)
- (*destroy)(bo);
- else
- kfree(bo);
- ttm_mem_global_free(mem_glob, acc_size);
- return -EINVAL;
- }
- bo->destroy = destroy;
+ int ret;
kref_init(&bo->kref);
- kref_init(&bo->list_kref);
- atomic_set(&bo->cpu_writers, 0);
- INIT_LIST_HEAD(&bo->lru);
- INIT_LIST_HEAD(&bo->ddestroy);
- INIT_LIST_HEAD(&bo->swap);
- INIT_LIST_HEAD(&bo->io_reserve_lru);
bo->bdev = bdev;
- bo->glob = bdev->glob;
bo->type = type;
- bo->num_pages = num_pages;
- bo->mem.size = num_pages << PAGE_SHIFT;
- bo->mem.mem_type = TTM_PL_SYSTEM;
- bo->mem.num_pages = bo->num_pages;
- bo->mem.mm_node = NULL;
- bo->mem.page_alignment = page_alignment;
- bo->mem.bus.io_reserved_vm = false;
- bo->mem.bus.io_reserved_count = 0;
- bo->priv_flags = 0;
- bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
- bo->persistent_swap_storage = persistent_swap_storage;
- bo->acc_size = acc_size;
+ bo->page_alignment = alignment;
+ bo->destroy = destroy;
+ bo->pin_count = 0;
bo->sg = sg;
- bo->resv = &bo->ttm_resv;
- reservation_object_init(bo->resv);
- atomic_inc(&bo->glob->bo_count);
-
- ret = ttm_bo_check_placement(bo, placement);
+ bo->bulk_move = NULL;
+ if (resv)
+ bo->base.resv = resv;
+ else
+ bo->base.resv = &bo->base._resv;
+ atomic_inc(&ttm_glob.bo_count);
/*
* For ttm_bo_type_device buffers, allocate
* address space from the device.
*/
- if (likely(!ret) &&
- (bo->type == ttm_bo_type_device ||
- bo->type == ttm_bo_type_sg))
- ret = ttm_bo_setup_vm(bo);
-
- locked = ww_mutex_trylock(&bo->resv->lock);
- WARN_ON(!locked);
-
- if (likely(!ret))
- ret = ttm_bo_validate(bo, placement, interruptible, false);
+ if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) {
+ ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
+ PFN_UP(bo->base.size));
+ if (ret)
+ goto err_put;
+ }
- ttm_bo_unreserve(bo);
+ /* passed reservation objects should already be locked,
+ * since otherwise lockdep will be angered in radeon.
+ */
+ if (!resv)
+ WARN_ON(!dma_resv_trylock(bo->base.resv));
+ else
+ dma_resv_assert_held(resv);
+ ret = ttm_bo_validate(bo, placement, ctx);
if (unlikely(ret))
- ttm_bo_unref(&bo);
-
- return ret;
-}
-EXPORT_SYMBOL(ttm_bo_init);
-
-size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
- unsigned long bo_size,
- unsigned struct_size)
-{
- unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
- size_t size = 0;
-
- size += ttm_round_pot(struct_size);
- size += PAGE_ALIGN(npages * sizeof(void *));
- size += ttm_round_pot(sizeof(struct ttm_tt));
- return size;
-}
-EXPORT_SYMBOL(ttm_bo_acc_size);
-
-size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
- unsigned long bo_size,
- unsigned struct_size)
-{
- unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
- size_t size = 0;
-
- size += ttm_round_pot(struct_size);
- size += PAGE_ALIGN(npages * sizeof(void *));
- size += PAGE_ALIGN(npages * sizeof(dma_addr_t));
- size += ttm_round_pot(sizeof(struct ttm_dma_tt));
- return size;
-}
-EXPORT_SYMBOL(ttm_bo_dma_acc_size);
-
-int ttm_bo_create(struct ttm_bo_device *bdev,
- unsigned long size,
- enum ttm_bo_type type,
- struct ttm_placement *placement,
- uint32_t page_alignment,
- bool interruptible,
- struct file *persistent_swap_storage,
- struct ttm_buffer_object **p_bo)
-{
- struct ttm_buffer_object *bo;
- size_t acc_size;
- int ret;
+ goto err_unlock;
- bo = kzalloc(sizeof(*bo), GFP_KERNEL);
- if (unlikely(bo == NULL))
- return -ENOMEM;
+ return 0;
- acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
- ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
- interruptible, persistent_swap_storage, acc_size,
- NULL, NULL);
- if (likely(ret == 0))
- *p_bo = bo;
+err_unlock:
+ if (!resv)
+ dma_resv_unlock(bo->base.resv);
+err_put:
+ ttm_bo_put(bo);
return ret;
}
-EXPORT_SYMBOL(ttm_bo_create);
+EXPORT_SYMBOL(ttm_bo_init_reserved);
-static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
- unsigned mem_type, bool allow_errors)
+/**
+ * ttm_bo_init_validate
+ *
+ * @bdev: Pointer to a ttm_device struct.
+ * @bo: Pointer to a ttm_buffer_object to be initialized.
+ * @type: Requested type of buffer object.
+ * @placement: Initial placement for buffer object.
+ * @alignment: Data alignment in pages.
+ * @interruptible: If needing to sleep to wait for GPU resources,
+ * sleep interruptible.
+ * pinned in physical memory. If this behaviour is not desired, this member
+ * holds a pointer to a persistent shmem object. Typically, this would
+ * point to the shmem object backing a GEM object if TTM is used to back a
+ * GEM user interface.
+ * @sg: Scatter-gather table.
+ * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
+ * @destroy: Destroy function. Use NULL for kfree().
+ *
+ * This function initializes a pre-allocated struct ttm_buffer_object.
+ * As this object may be part of a larger structure, this function,
+ * together with the @destroy function,
+ * enables driver-specific objects derived from a ttm_buffer_object.
+ *
+ * On successful return, the caller owns an object kref to @bo. The kref and
+ * list_kref are usually set to 1, but note that in some situations, other
+ * tasks may already be holding references to @bo as well.
+ *
+ * If a failure occurs, the function will call the @destroy function, Thus,
+ * after a failure, dereferencing @bo is illegal and will likely cause memory
+ * corruption.
+ *
+ * Returns
+ * -ENOMEM: Out of memory.
+ * -EINVAL: Invalid placement flags.
+ * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
+ */
+int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo,
+ enum ttm_bo_type type, struct ttm_placement *placement,
+ uint32_t alignment, bool interruptible,
+ struct sg_table *sg, struct dma_resv *resv,
+ void (*destroy) (struct ttm_buffer_object *))
{
- struct ttm_mem_type_manager *man = &bdev->man[mem_type];
- struct ttm_bo_global *glob = bdev->glob;
+ struct ttm_operation_ctx ctx = { interruptible, false };
int ret;
- /*
- * Can't use standard list traversal since we're unlocking.
- */
-
- spin_lock(&glob->lru_lock);
- while (!list_empty(&man->lru)) {
- spin_unlock(&glob->lru_lock);
- ret = ttm_mem_evict_first(bdev, mem_type, false, false);
- if (ret) {
- if (allow_errors) {
- return ret;
- } else {
- pr_err("Cleanup eviction failed\n");
- }
- }
- spin_lock(&glob->lru_lock);
- }
- spin_unlock(&glob->lru_lock);
- return 0;
-}
-
-int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
-{
- struct ttm_mem_type_manager *man;
- int ret = -EINVAL;
-
- if (mem_type >= TTM_NUM_MEM_TYPES) {
- pr_err("Illegal memory type %d\n", mem_type);
- return ret;
- }
- man = &bdev->man[mem_type];
-
- if (!man->has_type) {
- pr_err("Trying to take down uninitialized memory manager type %u\n",
- mem_type);
+ ret = ttm_bo_init_reserved(bdev, bo, type, placement, alignment, &ctx,
+ sg, resv, destroy);
+ if (ret)
return ret;
- }
-
- man->use_type = false;
- man->has_type = false;
- ret = 0;
- if (mem_type > 0) {
- ttm_bo_force_list_clean(bdev, mem_type, false);
+ if (!resv)
+ ttm_bo_unreserve(bo);
- ret = (*man->func->takedown)(man);
- }
-
- return ret;
+ return 0;
}
-EXPORT_SYMBOL(ttm_bo_clean_mm);
-
-int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
-{
- struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+EXPORT_SYMBOL(ttm_bo_init_validate);
- if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
- pr_err("Illegal memory manager memory type %u\n", mem_type);
- return -EINVAL;
- }
+/*
+ * buffer object vm functions.
+ */
- if (!man->has_type) {
- pr_err("Memory type %u has not been initialized\n", mem_type);
- return 0;
- }
+/**
+ * ttm_bo_unmap_virtual
+ *
+ * @bo: tear down the virtual mappings for this BO
+ */
+void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
+{
+ struct ttm_device *bdev = bo->bdev;
- return ttm_bo_force_list_clean(bdev, mem_type, true);
+ drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
+ ttm_mem_io_free(bdev, bo->resource);
}
-EXPORT_SYMBOL(ttm_bo_evict_mm);
+EXPORT_SYMBOL(ttm_bo_unmap_virtual);
-int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
- unsigned long p_size)
+/**
+ * ttm_bo_wait_ctx - wait for buffer idle.
+ *
+ * @bo: The buffer object.
+ * @ctx: defines how to wait
+ *
+ * Waits for the buffer to be idle. Used timeout depends on the context.
+ * Returns -EBUSY if wait timed outt, -ERESTARTSYS if interrupted by a signal or
+ * zero on success.
+ */
+int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
{
- int ret = -EINVAL;
- struct ttm_mem_type_manager *man;
-
- BUG_ON(type >= TTM_NUM_MEM_TYPES);
- man = &bdev->man[type];
- BUG_ON(man->has_type);
- man->io_reserve_fastpath = true;
- man->use_io_reserve_lru = false;
- mutex_init(&man->io_reserve_mutex);
- INIT_LIST_HEAD(&man->io_reserve_lru);
-
- ret = bdev->driver->init_mem_type(bdev, type, man);
- if (ret)
- return ret;
- man->bdev = bdev;
+ long ret;
- ret = 0;
- if (type != TTM_PL_SYSTEM) {
- ret = (*man->func->init)(man, p_size);
- if (ret)
- return ret;
+ if (ctx->no_wait_gpu) {
+ if (dma_resv_test_signaled(bo->base.resv,
+ DMA_RESV_USAGE_BOOKKEEP))
+ return 0;
+ else
+ return -EBUSY;
}
- man->has_type = true;
- man->use_type = true;
- man->size = p_size;
-
- INIT_LIST_HEAD(&man->lru);
+ ret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
+ ctx->interruptible, 15 * HZ);
+ if (unlikely(ret < 0))
+ return ret;
+ if (unlikely(ret == 0))
+ return -EBUSY;
return 0;
}
-EXPORT_SYMBOL(ttm_bo_init_mm);
+EXPORT_SYMBOL(ttm_bo_wait_ctx);
-static void ttm_bo_global_kobj_release(struct kobject *kobj)
-{
- struct ttm_bo_global *glob =
- container_of(kobj, struct ttm_bo_global, kobj);
-
- ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
- __free_page(glob->dummy_read_page);
- kfree(glob);
-}
-
-void ttm_bo_global_release(struct drm_global_reference *ref)
-{
- struct ttm_bo_global *glob = ref->object;
-
- kobject_del(&glob->kobj);
- kobject_put(&glob->kobj);
-}
-EXPORT_SYMBOL(ttm_bo_global_release);
+/**
+ * struct ttm_bo_swapout_walk - Parameters for the swapout walk
+ */
+struct ttm_bo_swapout_walk {
+ /** @walk: The walk base parameters. */
+ struct ttm_lru_walk walk;
+ /** @gfp_flags: The gfp flags to use for ttm_tt_swapout() */
+ gfp_t gfp_flags;
+ /** @hit_low: Whether we should attempt to swap BO's with low watermark threshold */
+ /** @evict_low: If we cannot swap a bo when @try_low is false (first pass) */
+ bool hit_low, evict_low;
+};
-int ttm_bo_global_init(struct drm_global_reference *ref)
+static s64
+ttm_bo_swapout_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
{
- struct ttm_bo_global_ref *bo_ref =
- container_of(ref, struct ttm_bo_global_ref, ref);
- struct ttm_bo_global *glob = ref->object;
- int ret;
+ struct ttm_place place = {.mem_type = bo->resource->mem_type};
+ struct ttm_bo_swapout_walk *swapout_walk =
+ container_of(walk, typeof(*swapout_walk), walk);
+ struct ttm_operation_ctx *ctx = walk->arg.ctx;
+ s64 ret;
- mutex_init(&glob->device_list_mutex);
- spin_lock_init(&glob->lru_lock);
- glob->mem_glob = bo_ref->mem_glob;
- glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
+ /*
+ * While the bo may already reside in SYSTEM placement, set
+ * SYSTEM as new placement to cover also the move further below.
+ * The driver may use the fact that we're moving from SYSTEM
+ * as an indication that we're about to swap out.
+ */
+ if (bo->pin_count || !bo->bdev->funcs->eviction_valuable(bo, &place)) {
+ ret = -EBUSY;
+ goto out;
+ }
- if (unlikely(glob->dummy_read_page == NULL)) {
- ret = -ENOMEM;
- goto out_no_drp;
+ if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
+ bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL ||
+ bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED) {
+ ret = -EBUSY;
+ goto out;
}
- INIT_LIST_HEAD(&glob->swap_lru);
- INIT_LIST_HEAD(&glob->device_list);
+ if (bo->deleted) {
+ pgoff_t num_pages = bo->ttm->num_pages;
- ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
- ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
- if (unlikely(ret != 0)) {
- pr_err("Could not register buffer object swapout\n");
- goto out_no_shrink;
- }
+ ret = ttm_bo_wait_ctx(bo, ctx);
+ if (ret)
+ goto out;
- atomic_set(&glob->bo_count, 0);
+ ttm_bo_cleanup_memtype_use(bo);
+ ret = num_pages;
+ goto out;
+ }
- ret = kobject_init_and_add(
- &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
- if (unlikely(ret != 0))
- kobject_put(&glob->kobj);
- return ret;
-out_no_shrink:
- __free_page(glob->dummy_read_page);
-out_no_drp:
- kfree(glob);
- return ret;
-}
-EXPORT_SYMBOL(ttm_bo_global_init);
+ /*
+ * Move to system cached
+ */
+ if (bo->resource->mem_type != TTM_PL_SYSTEM) {
+ struct ttm_resource *evict_mem;
+ struct ttm_place hop;
+ memset(&hop, 0, sizeof(hop));
+ place.mem_type = TTM_PL_SYSTEM;
+ ret = ttm_resource_alloc(bo, &place, &evict_mem, NULL);
+ if (ret)
+ goto out;
-int ttm_bo_device_release(struct ttm_bo_device *bdev)
-{
- int ret = 0;
- unsigned i = TTM_NUM_MEM_TYPES;
- struct ttm_mem_type_manager *man;
- struct ttm_bo_global *glob = bdev->glob;
-
- while (i--) {
- man = &bdev->man[i];
- if (man->has_type) {
- man->use_type = false;
- if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
- ret = -EBUSY;
- pr_err("DRM memory manager type %d is not clean\n",
- i);
- }
- man->has_type = false;
+ ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
+ if (ret) {
+ WARN(ret == -EMULTIHOP,
+ "Unexpected multihop in swapout - likely driver bug.\n");
+ ttm_resource_free(bo, &evict_mem);
+ goto out;
}
}
- mutex_lock(&glob->device_list_mutex);
- list_del(&bdev->device_list);
- mutex_unlock(&glob->device_list_mutex);
+ /*
+ * Make sure BO is idle.
+ */
+ ret = ttm_bo_wait_ctx(bo, ctx);
+ if (ret)
+ goto out;
- cancel_delayed_work_sync(&bdev->wq);
+ ttm_bo_unmap_virtual(bo);
+ if (bo->bdev->funcs->swap_notify)
+ bo->bdev->funcs->swap_notify(bo);
- while (ttm_bo_delayed_delete(bdev, true))
- ;
+ if (ttm_tt_is_populated(bo->ttm)) {
+ spin_lock(&bo->bdev->lru_lock);
+ ttm_resource_del_bulk_move(bo->resource, bo);
+ spin_unlock(&bo->bdev->lru_lock);
- spin_lock(&glob->lru_lock);
- if (list_empty(&bdev->ddestroy))
- TTM_DEBUG("Delayed destroy list was clean\n");
+ ret = ttm_tt_swapout(bo->bdev, bo->ttm, swapout_walk->gfp_flags);
- if (list_empty(&bdev->man[0].lru))
- TTM_DEBUG("Swap list was clean\n");
- spin_unlock(&glob->lru_lock);
+ spin_lock(&bo->bdev->lru_lock);
+ if (ret)
+ ttm_resource_add_bulk_move(bo->resource, bo);
+ ttm_resource_move_to_lru_tail(bo->resource);
+ spin_unlock(&bo->bdev->lru_lock);
+ }
- BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
- write_lock(&bdev->vm_lock);
- drm_mm_takedown(&bdev->addr_space_mm);
- write_unlock(&bdev->vm_lock);
+out:
+ /* Consider -ENOMEM and -ENOSPC non-fatal. */
+ if (ret == -ENOMEM || ret == -ENOSPC)
+ ret = -EBUSY;
return ret;
}
-EXPORT_SYMBOL(ttm_bo_device_release);
-
-int ttm_bo_device_init(struct ttm_bo_device *bdev,
- struct ttm_bo_global *glob,
- struct ttm_bo_driver *driver,
- uint64_t file_page_offset,
- bool need_dma32)
-{
- int ret = -EINVAL;
-
- rwlock_init(&bdev->vm_lock);
- bdev->driver = driver;
-
- memset(bdev->man, 0, sizeof(bdev->man));
- /*
- * Initialize the system memory buffer type.
- * Other types need to be driver / IOCTL initialized.
- */
- ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
- if (unlikely(ret != 0))
- goto out_no_sys;
-
- bdev->addr_space_rb = RB_ROOT;
- drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
-
- INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
- INIT_LIST_HEAD(&bdev->ddestroy);
- bdev->dev_mapping = NULL;
- bdev->glob = glob;
- bdev->need_dma32 = need_dma32;
- bdev->val_seq = 0;
- spin_lock_init(&bdev->fence_lock);
- mutex_lock(&glob->device_list_mutex);
- list_add_tail(&bdev->device_list, &glob->device_list);
- mutex_unlock(&glob->device_list_mutex);
-
- return 0;
-out_no_sys:
- return ret;
-}
-EXPORT_SYMBOL(ttm_bo_device_init);
+const struct ttm_lru_walk_ops ttm_swap_ops = {
+ .process_bo = ttm_bo_swapout_cb,
+};
-/*
- * buffer object vm functions.
+/**
+ * ttm_bo_swapout() - Swap out buffer objects on the LRU list to shmem.
+ * @bdev: The ttm device.
+ * @ctx: The ttm_operation_ctx governing the swapout operation.
+ * @man: The resource manager whose resources / buffer objects are
+ * goint to be swapped out.
+ * @gfp_flags: The gfp flags used for shmem page allocations.
+ * @target: The desired number of bytes to swap out.
+ *
+ * Return: The number of bytes actually swapped out, or negative error code
+ * on error.
*/
-
-bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+s64 ttm_bo_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
+ struct ttm_resource_manager *man, gfp_t gfp_flags,
+ s64 target)
{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
-
- if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
- if (mem->mem_type == TTM_PL_SYSTEM)
- return false;
-
- if (man->flags & TTM_MEMTYPE_FLAG_CMA)
- return false;
-
- if (mem->placement & TTM_PL_FLAG_CACHED)
- return false;
- }
- return true;
+ struct ttm_bo_swapout_walk swapout_walk = {
+ .walk = {
+ .ops = &ttm_swap_ops,
+ .arg = {
+ .ctx = ctx,
+ .trylock_only = true,
+ },
+ },
+ .gfp_flags = gfp_flags,
+ };
+
+ return ttm_lru_walk_for_evict(&swapout_walk.walk, bdev, man, target);
}
-void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
+void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
{
- struct ttm_bo_device *bdev = bo->bdev;
- loff_t offset = (loff_t) bo->addr_space_offset;
- loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
-
- if (!bdev->dev_mapping)
+ if (bo->ttm == NULL)
return;
- unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
- ttm_mem_io_free_vm(bo);
-}
-
-void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
-{
- struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
-
- ttm_mem_io_lock(man, false);
- ttm_bo_unmap_virtual_locked(bo);
- ttm_mem_io_unlock(man);
-}
-
-EXPORT_SYMBOL(ttm_bo_unmap_virtual);
-
-static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
-{
- struct ttm_bo_device *bdev = bo->bdev;
- struct rb_node **cur = &bdev->addr_space_rb.rb_node;
- struct rb_node *parent = NULL;
- struct ttm_buffer_object *cur_bo;
- unsigned long offset = bo->vm_node->start;
- unsigned long cur_offset;
-
- while (*cur) {
- parent = *cur;
- cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
- cur_offset = cur_bo->vm_node->start;
- if (offset < cur_offset)
- cur = &parent->rb_left;
- else if (offset > cur_offset)
- cur = &parent->rb_right;
- else
- BUG();
- }
-
- rb_link_node(&bo->vm_rb, parent, cur);
- rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
+ ttm_tt_unpopulate(bo->bdev, bo->ttm);
+ ttm_tt_destroy(bo->bdev, bo->ttm);
+ bo->ttm = NULL;
}
/**
- * ttm_bo_setup_vm:
+ * ttm_bo_populate() - Ensure that a buffer object has backing pages
+ * @bo: The buffer object
+ * @ctx: The ttm_operation_ctx governing the operation.
*
- * @bo: the buffer to allocate address space for
+ * For buffer objects in a memory type whose manager uses
+ * struct ttm_tt for backing pages, ensure those backing pages
+ * are present and with valid content. The bo's resource is also
+ * placed on the correct LRU list if it was previously swapped
+ * out.
*
- * Allocate address space in the drm device so that applications
- * can mmap the buffer and access the contents. This only
- * applies to ttm_bo_type_device objects as others are not
- * placed in the drm device address space.
+ * Return: 0 if successful, negative error code on failure.
+ * Note: May return -EINTR or -ERESTARTSYS if @ctx::interruptible
+ * is set to true.
*/
-
-static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
+int ttm_bo_populate(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx)
{
- struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_tt *tt = bo->ttm;
+ bool swapped;
int ret;
-retry_pre_get:
- ret = drm_mm_pre_get(&bdev->addr_space_mm);
- if (unlikely(ret != 0))
- return ret;
-
- write_lock(&bdev->vm_lock);
- bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
- bo->mem.num_pages, 0, 0);
+ dma_resv_assert_held(bo->base.resv);
- if (unlikely(bo->vm_node == NULL)) {
- ret = -ENOMEM;
- goto out_unlock;
- }
-
- bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
- bo->mem.num_pages, 0);
-
- if (unlikely(bo->vm_node == NULL)) {
- write_unlock(&bdev->vm_lock);
- goto retry_pre_get;
- }
-
- ttm_bo_vm_insert_rb(bo);
- write_unlock(&bdev->vm_lock);
- bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
-
- return 0;
-out_unlock:
- write_unlock(&bdev->vm_lock);
- return ret;
-}
-
-int ttm_bo_wait(struct ttm_buffer_object *bo,
- bool lazy, bool interruptible, bool no_wait)
-{
- struct ttm_bo_driver *driver = bo->bdev->driver;
- struct ttm_bo_device *bdev = bo->bdev;
- void *sync_obj;
- int ret = 0;
-
- if (likely(bo->sync_obj == NULL))
+ if (!tt)
return 0;
- while (bo->sync_obj) {
-
- if (driver->sync_obj_signaled(bo->sync_obj)) {
- void *tmp_obj = bo->sync_obj;
- bo->sync_obj = NULL;
- clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
- spin_unlock(&bdev->fence_lock);
- driver->sync_obj_unref(&tmp_obj);
- spin_lock(&bdev->fence_lock);
- continue;
- }
-
- if (no_wait)
- return -EBUSY;
+ swapped = ttm_tt_is_swapped(tt);
+ ret = ttm_tt_populate(bo->bdev, tt, ctx);
+ if (ret)
+ return ret;
- sync_obj = driver->sync_obj_ref(bo->sync_obj);
- spin_unlock(&bdev->fence_lock);
- ret = driver->sync_obj_wait(sync_obj,
- lazy, interruptible);
- if (unlikely(ret != 0)) {
- driver->sync_obj_unref(&sync_obj);
- spin_lock(&bdev->fence_lock);
- return ret;
- }
- spin_lock(&bdev->fence_lock);
- if (likely(bo->sync_obj == sync_obj)) {
- void *tmp_obj = bo->sync_obj;
- bo->sync_obj = NULL;
- clear_bit(TTM_BO_PRIV_FLAG_MOVING,
- &bo->priv_flags);
- spin_unlock(&bdev->fence_lock);
- driver->sync_obj_unref(&sync_obj);
- driver->sync_obj_unref(&tmp_obj);
- spin_lock(&bdev->fence_lock);
- } else {
- spin_unlock(&bdev->fence_lock);
- driver->sync_obj_unref(&sync_obj);
- spin_lock(&bdev->fence_lock);
- }
+ if (swapped && !ttm_tt_is_swapped(tt) && !bo->pin_count &&
+ bo->resource) {
+ spin_lock(&bo->bdev->lru_lock);
+ ttm_resource_add_bulk_move(bo->resource, bo);
+ ttm_resource_move_to_lru_tail(bo->resource);
+ spin_unlock(&bo->bdev->lru_lock);
}
- return 0;
-}
-EXPORT_SYMBOL(ttm_bo_wait);
-
-int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
-{
- struct ttm_bo_device *bdev = bo->bdev;
- int ret = 0;
-
- /*
- * Using ttm_bo_reserve makes sure the lru lists are updated.
- */
- ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
- if (unlikely(ret != 0))
- return ret;
- spin_lock(&bdev->fence_lock);
- ret = ttm_bo_wait(bo, false, true, no_wait);
- spin_unlock(&bdev->fence_lock);
- if (likely(ret == 0))
- atomic_inc(&bo->cpu_writers);
- ttm_bo_unreserve(bo);
- return ret;
+ return 0;
}
-EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
+EXPORT_SYMBOL(ttm_bo_populate);
-void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
+int ttm_bo_setup_export(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx)
{
- atomic_dec(&bo->cpu_writers);
-}
-EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
-
-/**
- * A buffer object shrink method that tries to swap out the first
- * buffer object on the bo_global::swap_lru list.
- */
-
-static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
-{
- struct ttm_bo_global *glob =
- container_of(shrink, struct ttm_bo_global, shrink);
- struct ttm_buffer_object *bo;
- int ret = -EBUSY;
- int put_count;
- uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
-
- spin_lock(&glob->lru_lock);
- list_for_each_entry(bo, &glob->swap_lru, swap) {
- ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
- if (!ret)
- break;
- }
-
- if (ret) {
- spin_unlock(&glob->lru_lock);
- return ret;
- }
-
- kref_get(&bo->list_kref);
+ int ret;
- if (!list_empty(&bo->ddestroy)) {
- ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
- kref_put(&bo->list_kref, ttm_bo_release_list);
+ ret = ttm_bo_reserve(bo, false, false, NULL);
+ if (ret != 0)
return ret;
- }
-
- put_count = ttm_bo_del_from_lru(bo);
- spin_unlock(&glob->lru_lock);
- ttm_bo_list_ref_sub(bo, put_count, true);
-
- /**
- * Wait for GPU, then move to system cached.
- */
-
- spin_lock(&bo->bdev->fence_lock);
- ret = ttm_bo_wait(bo, false, false, false);
- spin_unlock(&bo->bdev->fence_lock);
-
- if (unlikely(ret != 0))
- goto out;
-
- if ((bo->mem.placement & swap_placement) != swap_placement) {
- struct ttm_mem_reg evict_mem;
-
- evict_mem = bo->mem;
- evict_mem.mm_node = NULL;
- evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
- evict_mem.mem_type = TTM_PL_SYSTEM;
-
- ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
- false, false);
- if (unlikely(ret != 0))
- goto out;
- }
-
- ttm_bo_unmap_virtual(bo);
-
- /**
- * Swap out. Buffer will be swapped in again as soon as
- * anyone tries to access a ttm page.
- */
-
- if (bo->bdev->driver->swap_notify)
- bo->bdev->driver->swap_notify(bo);
-
- ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
-out:
-
- /**
- *
- * Unreserve without putting on LRU to avoid swapping out an
- * already swapped buffer.
- */
-
- ww_mutex_unlock(&bo->resv->lock);
- kref_put(&bo->list_kref, ttm_bo_release_list);
+ ret = ttm_bo_populate(bo, ctx);
+ ttm_bo_unreserve(bo);
return ret;
}
-
-void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
-{
- while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
- ;
-}
-EXPORT_SYMBOL(ttm_bo_swapout_all);
+EXPORT_SYMBOL(ttm_bo_setup_export);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_internal.h b/drivers/gpu/drm/ttm/ttm_bo_internal.h
new file mode 100644
index 000000000000..e0d48eac74b0
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_bo_internal.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+
+#ifndef _TTM_BO_INTERNAL_H_
+#define _TTM_BO_INTERNAL_H_
+
+#include <drm/ttm/ttm_bo.h>
+
+/**
+ * ttm_bo_get - reference a struct ttm_buffer_object
+ *
+ * @bo: The buffer object.
+ */
+static inline void ttm_bo_get(struct ttm_buffer_object *bo)
+{
+ kref_get(&bo->kref);
+}
+
+/**
+ * ttm_bo_get_unless_zero - reference a struct ttm_buffer_object unless
+ * its refcount has already reached zero.
+ * @bo: The buffer object.
+ *
+ * Used to reference a TTM buffer object in lookups where the object is removed
+ * from the lookup structure during the destructor and for RCU lookups.
+ *
+ * Returns: @bo if the referencing was successful, NULL otherwise.
+ */
+static inline __must_check struct ttm_buffer_object *
+ttm_bo_get_unless_zero(struct ttm_buffer_object *bo)
+{
+ if (!kref_get_unless_zero(&bo->kref))
+ return NULL;
+ return bo;
+}
+
+void ttm_bo_put(struct ttm_buffer_object *bo);
+
+#endif
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
deleted file mode 100644
index e4367f91472a..000000000000
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ /dev/null
@@ -1,151 +0,0 @@
-/**************************************************************************
- *
- * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- */
-
-#include <drm/ttm/ttm_module.h>
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_placement.h>
-#include <drm/drm_mm.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/module.h>
-
-/**
- * Currently we use a spinlock for the lock, but a mutex *may* be
- * more appropriate to reduce scheduling latency if the range manager
- * ends up with very fragmented allocation patterns.
- */
-
-struct ttm_range_manager {
- struct drm_mm mm;
- spinlock_t lock;
-};
-
-static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
- struct ttm_buffer_object *bo,
- struct ttm_placement *placement,
- struct ttm_mem_reg *mem)
-{
- struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
- struct drm_mm *mm = &rman->mm;
- struct drm_mm_node *node = NULL;
- unsigned long lpfn;
- int ret;
-
- lpfn = placement->lpfn;
- if (!lpfn)
- lpfn = man->size;
- do {
- ret = drm_mm_pre_get(mm);
- if (unlikely(ret))
- return ret;
-
- spin_lock(&rman->lock);
- node = drm_mm_search_free_in_range(mm,
- mem->num_pages, mem->page_alignment,
- placement->fpfn, lpfn, 1);
- if (unlikely(node == NULL)) {
- spin_unlock(&rman->lock);
- return 0;
- }
- node = drm_mm_get_block_atomic_range(node, mem->num_pages,
- mem->page_alignment,
- placement->fpfn,
- lpfn);
- spin_unlock(&rman->lock);
- } while (node == NULL);
-
- mem->mm_node = node;
- mem->start = node->start;
- return 0;
-}
-
-static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
- struct ttm_mem_reg *mem)
-{
- struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
-
- if (mem->mm_node) {
- spin_lock(&rman->lock);
- drm_mm_put_block(mem->mm_node);
- spin_unlock(&rman->lock);
- mem->mm_node = NULL;
- }
-}
-
-static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
- unsigned long p_size)
-{
- struct ttm_range_manager *rman;
-
- rman = kzalloc(sizeof(*rman), GFP_KERNEL);
- if (!rman)
- return -ENOMEM;
-
- drm_mm_init(&rman->mm, 0, p_size);
- spin_lock_init(&rman->lock);
- man->priv = rman;
- return 0;
-}
-
-static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
-{
- struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
- struct drm_mm *mm = &rman->mm;
-
- spin_lock(&rman->lock);
- if (drm_mm_clean(mm)) {
- drm_mm_takedown(mm);
- spin_unlock(&rman->lock);
- kfree(rman);
- man->priv = NULL;
- return 0;
- }
- spin_unlock(&rman->lock);
- return -EBUSY;
-}
-
-static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
- const char *prefix)
-{
- struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
-
- spin_lock(&rman->lock);
- drm_mm_debug_table(&rman->mm, prefix);
- spin_unlock(&rman->lock);
-}
-
-const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
- ttm_bo_man_init,
- ttm_bo_man_takedown,
- ttm_bo_man_get_node,
- ttm_bo_man_put_node,
- ttm_bo_man_debug
-};
-EXPORT_SYMBOL(ttm_bo_manager_func);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 319cf4127c5b..2ff35d55e462 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
* Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
@@ -28,388 +29,187 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_placement.h>
-#include <linux/io.h>
-#include <linux/highmem.h>
-#include <linux/wait.h>
-#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/swap.h>
#include <linux/vmalloc.h>
-#include <linux/module.h>
-
-void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
-{
- ttm_bo_mem_put(bo, &bo->mem);
-}
-
-int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
- bool evict,
- bool no_wait_gpu, struct ttm_mem_reg *new_mem)
-{
- struct ttm_tt *ttm = bo->ttm;
- struct ttm_mem_reg *old_mem = &bo->mem;
- int ret;
-
- if (old_mem->mem_type != TTM_PL_SYSTEM) {
- ttm_tt_unbind(ttm);
- ttm_bo_free_old_node(bo);
- ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
- TTM_PL_MASK_MEM);
- old_mem->mem_type = TTM_PL_SYSTEM;
- }
- ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
- if (unlikely(ret != 0))
- return ret;
+#include <drm/ttm/ttm_bo.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_tt.h>
- if (new_mem->mem_type != TTM_PL_SYSTEM) {
- ret = ttm_tt_bind(ttm, new_mem);
- if (unlikely(ret != 0))
- return ret;
- }
+#include <drm/drm_cache.h>
- *old_mem = *new_mem;
- new_mem->mm_node = NULL;
+#include "ttm_bo_internal.h"
- return 0;
-}
-EXPORT_SYMBOL(ttm_bo_move_ttm);
+struct ttm_transfer_obj {
+ struct ttm_buffer_object base;
+ struct ttm_buffer_object *bo;
+};
-int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
+int ttm_mem_io_reserve(struct ttm_device *bdev,
+ struct ttm_resource *mem)
{
- if (likely(man->io_reserve_fastpath))
+ if (mem->bus.offset || mem->bus.addr)
return 0;
- if (interruptible)
- return mutex_lock_interruptible(&man->io_reserve_mutex);
+ mem->bus.is_iomem = false;
+ if (!bdev->funcs->io_mem_reserve)
+ return 0;
- mutex_lock(&man->io_reserve_mutex);
- return 0;
+ return bdev->funcs->io_mem_reserve(bdev, mem);
}
-EXPORT_SYMBOL(ttm_mem_io_lock);
-void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
+void ttm_mem_io_free(struct ttm_device *bdev,
+ struct ttm_resource *mem)
{
- if (likely(man->io_reserve_fastpath))
+ if (!mem)
return;
- mutex_unlock(&man->io_reserve_mutex);
-}
-EXPORT_SYMBOL(ttm_mem_io_unlock);
-
-static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
-{
- struct ttm_buffer_object *bo;
-
- if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
- return -EAGAIN;
-
- bo = list_first_entry(&man->io_reserve_lru,
- struct ttm_buffer_object,
- io_reserve_lru);
- list_del_init(&bo->io_reserve_lru);
- ttm_bo_unmap_virtual_locked(bo);
-
- return 0;
-}
-
-
-int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
-{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
- int ret = 0;
-
- if (!bdev->driver->io_mem_reserve)
- return 0;
- if (likely(man->io_reserve_fastpath))
- return bdev->driver->io_mem_reserve(bdev, mem);
-
- if (bdev->driver->io_mem_reserve &&
- mem->bus.io_reserved_count++ == 0) {
-retry:
- ret = bdev->driver->io_mem_reserve(bdev, mem);
- if (ret == -EAGAIN) {
- ret = ttm_mem_io_evict(man);
- if (ret == 0)
- goto retry;
- }
- }
- return ret;
-}
-EXPORT_SYMBOL(ttm_mem_io_reserve);
-
-void ttm_mem_io_free(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
-{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
-
- if (likely(man->io_reserve_fastpath))
+ if (!mem->bus.offset && !mem->bus.addr)
return;
- if (bdev->driver->io_mem_reserve &&
- --mem->bus.io_reserved_count == 0 &&
- bdev->driver->io_mem_free)
- bdev->driver->io_mem_free(bdev, mem);
+ if (bdev->funcs->io_mem_free)
+ bdev->funcs->io_mem_free(bdev, mem);
-}
-EXPORT_SYMBOL(ttm_mem_io_free);
-
-int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
-{
- struct ttm_mem_reg *mem = &bo->mem;
- int ret;
-
- if (!mem->bus.io_reserved_vm) {
- struct ttm_mem_type_manager *man =
- &bo->bdev->man[mem->mem_type];
-
- ret = ttm_mem_io_reserve(bo->bdev, mem);
- if (unlikely(ret != 0))
- return ret;
- mem->bus.io_reserved_vm = true;
- if (man->use_io_reserve_lru)
- list_add_tail(&bo->io_reserve_lru,
- &man->io_reserve_lru);
- }
- return 0;
-}
-
-void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
-{
- struct ttm_mem_reg *mem = &bo->mem;
-
- if (mem->bus.io_reserved_vm) {
- mem->bus.io_reserved_vm = false;
- list_del_init(&bo->io_reserve_lru);
- ttm_mem_io_free(bo->bdev, mem);
- }
+ mem->bus.offset = 0;
+ mem->bus.addr = NULL;
}
-int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
- void **virtual)
+/**
+ * ttm_move_memcpy - Helper to perform a memcpy ttm move operation.
+ * @clear: Whether to clear rather than copy.
+ * @num_pages: Number of pages of the operation.
+ * @dst_iter: A struct ttm_kmap_iter representing the destination resource.
+ * @src_iter: A struct ttm_kmap_iter representing the source resource.
+ *
+ * This function is intended to be able to move out async under a
+ * dma-fence if desired.
+ */
+void ttm_move_memcpy(bool clear,
+ u32 num_pages,
+ struct ttm_kmap_iter *dst_iter,
+ struct ttm_kmap_iter *src_iter)
{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
- int ret;
- void *addr;
+ const struct ttm_kmap_iter_ops *dst_ops = dst_iter->ops;
+ const struct ttm_kmap_iter_ops *src_ops = src_iter->ops;
+ struct iosys_map src_map, dst_map;
+ pgoff_t i;
- *virtual = NULL;
- (void) ttm_mem_io_lock(man, false);
- ret = ttm_mem_io_reserve(bdev, mem);
- ttm_mem_io_unlock(man);
- if (ret || !mem->bus.is_iomem)
- return ret;
+ /* Single TTM move. NOP */
+ if (dst_ops->maps_tt && src_ops->maps_tt)
+ return;
- if (mem->bus.addr) {
- addr = mem->bus.addr;
- } else {
- if (mem->placement & TTM_PL_FLAG_WC)
- addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
- else
- addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
- if (!addr) {
- (void) ttm_mem_io_lock(man, false);
- ttm_mem_io_free(bdev, mem);
- ttm_mem_io_unlock(man);
- return -ENOMEM;
+ /* Don't move nonexistent data. Clear destination instead. */
+ if (clear) {
+ for (i = 0; i < num_pages; ++i) {
+ dst_ops->map_local(dst_iter, &dst_map, i);
+ if (dst_map.is_iomem)
+ memset_io(dst_map.vaddr_iomem, 0, PAGE_SIZE);
+ else
+ memset(dst_map.vaddr, 0, PAGE_SIZE);
+ if (dst_ops->unmap_local)
+ dst_ops->unmap_local(dst_iter, &dst_map);
}
+ return;
}
- *virtual = addr;
- return 0;
-}
-
-void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
- void *virtual)
-{
- struct ttm_mem_type_manager *man;
-
- man = &bdev->man[mem->mem_type];
-
- if (virtual && mem->bus.addr == NULL)
- iounmap(virtual);
- (void) ttm_mem_io_lock(man, false);
- ttm_mem_io_free(bdev, mem);
- ttm_mem_io_unlock(man);
-}
-
-static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
-{
- uint32_t *dstP =
- (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
- uint32_t *srcP =
- (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
-
- int i;
- for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
- iowrite32(ioread32(srcP++), dstP++);
- return 0;
-}
-
-static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
- unsigned long page,
- pgprot_t prot)
-{
- struct page *d = ttm->pages[page];
- void *dst;
-
- if (!d)
- return -ENOMEM;
- src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
+ for (i = 0; i < num_pages; ++i) {
+ dst_ops->map_local(dst_iter, &dst_map, i);
+ src_ops->map_local(src_iter, &src_map, i);
-#ifdef CONFIG_X86
- dst = kmap_atomic_prot(d, prot);
-#else
- if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
- dst = vmap(&d, 1, 0, prot);
- else
- dst = kmap(d);
-#endif
- if (!dst)
- return -ENOMEM;
-
- memcpy_fromio(dst, src, PAGE_SIZE);
-
-#ifdef CONFIG_X86
- kunmap_atomic(dst);
-#else
- if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
- vunmap(dst);
- else
- kunmap(d);
-#endif
-
- return 0;
-}
-
-static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
- unsigned long page,
- pgprot_t prot)
-{
- struct page *s = ttm->pages[page];
- void *src;
+ drm_memcpy_from_wc(&dst_map, &src_map, PAGE_SIZE);
- if (!s)
- return -ENOMEM;
-
- dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
-#ifdef CONFIG_X86
- src = kmap_atomic_prot(s, prot);
-#else
- if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
- src = vmap(&s, 1, 0, prot);
- else
- src = kmap(s);
-#endif
- if (!src)
- return -ENOMEM;
-
- memcpy_toio(dst, src, PAGE_SIZE);
-
-#ifdef CONFIG_X86
- kunmap_atomic(src);
-#else
- if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
- vunmap(src);
- else
- kunmap(s);
-#endif
-
- return 0;
+ if (src_ops->unmap_local)
+ src_ops->unmap_local(src_iter, &src_map);
+ if (dst_ops->unmap_local)
+ dst_ops->unmap_local(dst_iter, &dst_map);
+ }
}
+EXPORT_SYMBOL(ttm_move_memcpy);
+/**
+ * ttm_bo_move_memcpy
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @ctx: operation context
+ * @dst_mem: struct ttm_resource indicating where to move.
+ *
+ * Fallback move function for a mappable buffer object in mappable memory.
+ * The function will, if successful,
+ * free any old aperture space, and set (@new_mem)->mm_node to NULL,
+ * and update the (@bo)->mem placement flags. If unsuccessful, the old
+ * data remains untouched, and it's up to the caller to free the
+ * memory space indicated by @new_mem.
+ * Returns:
+ * !0: Failure.
+ */
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
- bool evict, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+ struct ttm_operation_ctx *ctx,
+ struct ttm_resource *dst_mem)
{
- struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
+ struct ttm_device *bdev = bo->bdev;
+ struct ttm_resource_manager *dst_man =
+ ttm_manager_type(bo->bdev, dst_mem->mem_type);
struct ttm_tt *ttm = bo->ttm;
- struct ttm_mem_reg *old_mem = &bo->mem;
- struct ttm_mem_reg old_copy = *old_mem;
- void *old_iomap;
- void *new_iomap;
- int ret;
- unsigned long i;
- unsigned long page;
- unsigned long add = 0;
- int dir;
+ struct ttm_resource *src_mem = bo->resource;
+ struct ttm_resource_manager *src_man;
+ union {
+ struct ttm_kmap_iter_tt tt;
+ struct ttm_kmap_iter_linear_io io;
+ } _dst_iter, _src_iter;
+ struct ttm_kmap_iter *dst_iter, *src_iter;
+ bool clear;
+ int ret = 0;
- ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
- if (ret)
- return ret;
- ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
- if (ret)
- goto out;
+ if (WARN_ON(!src_mem))
+ return -EINVAL;
- if (old_iomap == NULL && new_iomap == NULL)
- goto out2;
- if (old_iomap == NULL && ttm == NULL)
- goto out2;
+ src_man = ttm_manager_type(bdev, src_mem->mem_type);
+ if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) ||
+ dst_man->use_tt)) {
+ ret = ttm_bo_populate(bo, ctx);
+ if (ret)
+ return ret;
+ }
- if (ttm->state == tt_unpopulated) {
- ret = ttm->bdev->driver->ttm_tt_populate(ttm);
- if (ret) {
- /* if we fail here don't nuke the mm node
- * as the bo still owns it */
- old_copy.mm_node = NULL;
- goto out1;
- }
+ dst_iter = ttm_kmap_iter_linear_io_init(&_dst_iter.io, bdev, dst_mem);
+ if (PTR_ERR(dst_iter) == -EINVAL && dst_man->use_tt)
+ dst_iter = ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm);
+ if (IS_ERR(dst_iter))
+ return PTR_ERR(dst_iter);
+
+ src_iter = ttm_kmap_iter_linear_io_init(&_src_iter.io, bdev, src_mem);
+ if (PTR_ERR(src_iter) == -EINVAL && src_man->use_tt)
+ src_iter = ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm);
+ if (IS_ERR(src_iter)) {
+ ret = PTR_ERR(src_iter);
+ goto out_src_iter;
}
- add = 0;
- dir = 1;
+ clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm));
+ if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)))
+ ttm_move_memcpy(clear, PFN_UP(dst_mem->size), dst_iter, src_iter);
- if ((old_mem->mem_type == new_mem->mem_type) &&
- (new_mem->start < old_mem->start + old_mem->size)) {
- dir = -1;
- add = new_mem->num_pages - 1;
- }
+ if (!src_iter->ops->maps_tt)
+ ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
+ ttm_bo_move_sync_cleanup(bo, dst_mem);
- for (i = 0; i < new_mem->num_pages; ++i) {
- page = i * dir + add;
- if (old_iomap == NULL) {
- pgprot_t prot = ttm_io_prot(old_mem->placement,
- PAGE_KERNEL);
- ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
- prot);
- } else if (new_iomap == NULL) {
- pgprot_t prot = ttm_io_prot(new_mem->placement,
- PAGE_KERNEL);
- ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
- prot);
- } else
- ret = ttm_copy_io_page(new_iomap, old_iomap, page);
- if (ret) {
- /* failing here, means keep old copy as-is */
- old_copy.mm_node = NULL;
- goto out1;
- }
- }
- mb();
-out2:
- old_copy = *old_mem;
- *old_mem = *new_mem;
- new_mem->mm_node = NULL;
-
- if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
- ttm_tt_unbind(ttm);
- ttm_tt_destroy(ttm);
- bo->ttm = NULL;
- }
+out_src_iter:
+ if (!dst_iter->ops->maps_tt)
+ ttm_kmap_iter_linear_io_fini(&_dst_iter.io, bdev, dst_mem);
-out1:
- ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
-out:
- ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
- ttm_bo_mem_put(bo, &old_copy);
return ret;
}
EXPORT_SYMBOL(ttm_bo_move_memcpy);
static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
{
- kfree(bo);
+ struct ttm_transfer_obj *fbo;
+
+ fbo = container_of(bo, struct ttm_transfer_obj, base);
+ dma_resv_fini(&fbo->base.base._resv);
+ ttm_bo_put(fbo->bo);
+ kfree(fbo);
}
/**
@@ -430,74 +230,84 @@ static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
struct ttm_buffer_object **new_obj)
{
- struct ttm_buffer_object *fbo;
- struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_bo_driver *driver = bdev->driver;
+ struct ttm_transfer_obj *fbo;
int ret;
fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
if (!fbo)
return -ENOMEM;
- *fbo = *bo;
+ fbo->base = *bo;
/**
* Fix up members that we shouldn't copy directly:
* TODO: Explicit member copy would probably be better here.
*/
- INIT_LIST_HEAD(&fbo->ddestroy);
- INIT_LIST_HEAD(&fbo->lru);
- INIT_LIST_HEAD(&fbo->swap);
- INIT_LIST_HEAD(&fbo->io_reserve_lru);
- fbo->vm_node = NULL;
- atomic_set(&fbo->cpu_writers, 0);
+ atomic_inc(&ttm_glob.bo_count);
+ drm_vma_node_reset(&fbo->base.base.vma_node);
- spin_lock(&bdev->fence_lock);
- if (bo->sync_obj)
- fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
- else
- fbo->sync_obj = NULL;
- spin_unlock(&bdev->fence_lock);
- kref_init(&fbo->list_kref);
- kref_init(&fbo->kref);
- fbo->destroy = &ttm_transfered_destroy;
- fbo->acc_size = 0;
- fbo->resv = &fbo->ttm_resv;
- reservation_object_init(fbo->resv);
- ret = ww_mutex_trylock(&fbo->resv->lock);
+ kref_init(&fbo->base.kref);
+ fbo->base.destroy = &ttm_transfered_destroy;
+ fbo->base.pin_count = 0;
+ if (bo->type != ttm_bo_type_sg)
+ fbo->base.base.resv = &fbo->base.base._resv;
+
+ dma_resv_init(&fbo->base.base._resv);
+ fbo->base.base.dev = NULL;
+ ret = dma_resv_trylock(&fbo->base.base._resv);
WARN_ON(!ret);
- *new_obj = fbo;
+ ret = dma_resv_reserve_fences(&fbo->base.base._resv, TTM_NUM_MOVE_FENCES);
+ if (ret) {
+ dma_resv_unlock(&fbo->base.base._resv);
+ kfree(fbo);
+ return ret;
+ }
+
+ if (fbo->base.resource) {
+ ttm_resource_set_bo(fbo->base.resource, &fbo->base);
+ bo->resource = NULL;
+ ttm_bo_set_bulk_move(&fbo->base, NULL);
+ } else {
+ fbo->base.bulk_move = NULL;
+ }
+
+ ttm_bo_get(bo);
+ fbo->bo = bo;
+
+ ttm_bo_move_to_lru_tail_unlocked(&fbo->base);
+
+ *new_obj = &fbo->base;
return 0;
}
-pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
+/**
+ * ttm_io_prot
+ *
+ * @bo: ttm buffer object
+ * @res: ttm resource object
+ * @tmp: Page protection flag for a normal, cached mapping.
+ *
+ * Utility function that returns the pgprot_t that should be used for
+ * setting up a PTE with the caching model indicated by @c_state.
+ */
+pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
+ pgprot_t tmp)
{
-#if defined(__i386__) || defined(__x86_64__)
- if (caching_flags & TTM_PL_FLAG_WC)
- tmp = pgprot_writecombine(tmp);
- else if (boot_cpu_data.x86 > 3)
- tmp = pgprot_noncached(tmp);
-
-#elif defined(__powerpc__)
- if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
- pgprot_val(tmp) |= _PAGE_NO_CACHE;
- if (caching_flags & TTM_PL_FLAG_UNCACHED)
- pgprot_val(tmp) |= _PAGE_GUARDED;
+ struct ttm_resource_manager *man;
+ enum ttm_caching caching;
+
+ man = ttm_manager_type(bo->bdev, res->mem_type);
+ if (man->use_tt) {
+ caching = bo->ttm->caching;
+ if (bo->ttm->page_flags & TTM_TT_FLAG_DECRYPTED)
+ tmp = pgprot_decrypted(tmp);
+ } else {
+ caching = res->bus.caching;
}
-#endif
-#if defined(__ia64__)
- if (caching_flags & TTM_PL_FLAG_WC)
- tmp = pgprot_writecombine(tmp);
- else
- tmp = pgprot_noncached(tmp);
-#endif
-#if defined(__sparc__) || defined(__mips__)
- if (!(caching_flags & TTM_PL_FLAG_CACHED))
- tmp = pgprot_noncached(tmp);
-#endif
- return tmp;
+
+ return ttm_prot_from_caching(caching, tmp);
}
EXPORT_SYMBOL(ttm_io_prot);
@@ -506,19 +316,23 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
unsigned long size,
struct ttm_bo_kmap_obj *map)
{
- struct ttm_mem_reg *mem = &bo->mem;
+ struct ttm_resource *mem = bo->resource;
- if (bo->mem.bus.addr) {
+ if (bo->resource->bus.addr) {
map->bo_kmap_type = ttm_bo_map_premapped;
- map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
+ map->virtual = ((u8 *)bo->resource->bus.addr) + offset;
} else {
+ resource_size_t res = bo->resource->bus.offset + offset;
+
map->bo_kmap_type = ttm_bo_map_iomap;
- if (mem->placement & TTM_PL_FLAG_WC)
- map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
- size);
+ if (mem->bus.caching == ttm_write_combined)
+ map->virtual = ioremap_wc(res, size);
+#ifdef CONFIG_X86
+ else if (mem->bus.caching == ttm_cached)
+ map->virtual = ioremap_cache(res, size);
+#endif
else
- map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
- size);
+ map->virtual = ioremap(res, size);
}
return (!map->virtual) ? -ENOMEM : 0;
}
@@ -528,19 +342,25 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
{
- struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
+ struct ttm_resource *mem = bo->resource;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
struct ttm_tt *ttm = bo->ttm;
+ struct ttm_resource_manager *man =
+ ttm_manager_type(bo->bdev, bo->resource->mem_type);
+ pgprot_t prot;
int ret;
BUG_ON(!ttm);
- if (ttm->state == tt_unpopulated) {
- ret = ttm->bdev->driver->ttm_tt_populate(ttm);
- if (ret)
- return ret;
- }
+ ret = ttm_bo_populate(bo, &ctx);
+ if (ret)
+ return ret;
- if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
+ if (num_pages == 1 && ttm->caching == ttm_cached &&
+ !(man->use_tt && (ttm->page_flags & TTM_TT_FLAG_DECRYPTED))) {
/*
* We're mapping a single page, and the desired
* page protection is consistent with the bo.
@@ -554,9 +374,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
* We need to use vmap to get the desired page protection
* or to make the buffer object look contiguous.
*/
- prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
- PAGE_KERNEL :
- ttm_io_prot(mem->placement, PAGE_KERNEL);
+ prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
map->bo_kmap_type = ttm_bo_map_vmap;
map->virtual = vmap(ttm->pages + start_page, num_pages,
0, prot);
@@ -564,32 +382,66 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
return (!map->virtual) ? -ENOMEM : 0;
}
+/**
+ * ttm_bo_kmap_try_from_panic
+ *
+ * @bo: The buffer object
+ * @page: The page to map
+ *
+ * Sets up a kernel virtual mapping using kmap_local_page_try_from_panic().
+ * This should only be called from the panic handler, if you make sure the bo
+ * is the one being displayed, so is properly allocated, and protected.
+ *
+ * Returns the vaddr, that you can use to write to the bo, and that you should
+ * pass to kunmap_local() when you're done with this page, or NULL if the bo
+ * is in iomem.
+ */
+void *ttm_bo_kmap_try_from_panic(struct ttm_buffer_object *bo, unsigned long page)
+{
+ if (page + 1 > PFN_UP(bo->resource->size))
+ return NULL;
+
+ if (!bo->resource->bus.is_iomem && bo->ttm->pages && bo->ttm->pages[page])
+ return kmap_local_page_try_from_panic(bo->ttm->pages[page]);
+
+ return NULL;
+}
+EXPORT_SYMBOL(ttm_bo_kmap_try_from_panic);
+
+/**
+ * ttm_bo_kmap
+ *
+ * @bo: The buffer object.
+ * @start_page: The first page to map.
+ * @num_pages: Number of pages to map.
+ * @map: pointer to a struct ttm_bo_kmap_obj representing the map.
+ *
+ * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
+ * data in the buffer object. The ttm_kmap_obj_virtual function can then be
+ * used to obtain a virtual address to the data.
+ *
+ * Returns
+ * -ENOMEM: Out of memory.
+ * -EINVAL: Invalid range.
+ */
int ttm_bo_kmap(struct ttm_buffer_object *bo,
unsigned long start_page, unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
{
- struct ttm_mem_type_manager *man =
- &bo->bdev->man[bo->mem.mem_type];
unsigned long offset, size;
int ret;
- BUG_ON(!list_empty(&bo->swap));
map->virtual = NULL;
map->bo = bo;
- if (num_pages > bo->num_pages)
+ if (num_pages > PFN_UP(bo->resource->size))
return -EINVAL;
- if (start_page > bo->num_pages)
+ if ((start_page + num_pages) > PFN_UP(bo->resource->size))
return -EINVAL;
-#if 0
- if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
- return -EPERM;
-#endif
- (void) ttm_mem_io_lock(man, false);
- ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
- ttm_mem_io_unlock(man);
+
+ ret = ttm_mem_io_reserve(bo->bdev, bo->resource);
if (ret)
return ret;
- if (!bo->mem.bus.is_iomem) {
+ if (!bo->resource->bus.is_iomem) {
return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
} else {
offset = start_page << PAGE_SHIFT;
@@ -599,12 +451,15 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_kmap);
+/**
+ * ttm_bo_kunmap
+ *
+ * @map: Object describing the map to unmap.
+ *
+ * Unmaps a kernel map set up by ttm_bo_kmap.
+ */
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
{
- struct ttm_buffer_object *bo = map->bo;
- struct ttm_mem_type_manager *man =
- &bo->bdev->man[bo->mem.mem_type];
-
if (!map->virtual)
return;
switch (map->bo_kmap_type) {
@@ -622,85 +477,702 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
default:
BUG();
}
- (void) ttm_mem_io_lock(man, false);
- ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
- ttm_mem_io_unlock(man);
+ ttm_mem_io_free(map->bo->bdev, map->bo->resource);
map->virtual = NULL;
map->page = NULL;
}
EXPORT_SYMBOL(ttm_bo_kunmap);
-int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
- void *sync_obj,
- bool evict,
- bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+/**
+ * ttm_bo_vmap
+ *
+ * @bo: The buffer object.
+ * @map: pointer to a struct iosys_map representing the map.
+ *
+ * Sets up a kernel virtual mapping, using ioremap or vmap to the
+ * data in the buffer object. The parameter @map returns the virtual
+ * address as struct iosys_map. Unmap the buffer with ttm_bo_vunmap().
+ *
+ * Returns
+ * -ENOMEM: Out of memory.
+ * -EINVAL: Invalid range.
+ */
+int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map)
{
- struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_bo_driver *driver = bdev->driver;
- struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
- struct ttm_mem_reg *old_mem = &bo->mem;
+ struct ttm_resource *mem = bo->resource;
int ret;
- struct ttm_buffer_object *ghost_obj;
- void *tmp_obj = NULL;
- spin_lock(&bdev->fence_lock);
- if (bo->sync_obj) {
- tmp_obj = bo->sync_obj;
- bo->sync_obj = NULL;
- }
- bo->sync_obj = driver->sync_obj_ref(sync_obj);
- if (evict) {
- ret = ttm_bo_wait(bo, false, false, false);
- spin_unlock(&bdev->fence_lock);
- if (tmp_obj)
- driver->sync_obj_unref(&tmp_obj);
+ dma_resv_assert_held(bo->base.resv);
+
+ ret = ttm_mem_io_reserve(bo->bdev, mem);
+ if (ret)
+ return ret;
+
+ if (mem->bus.is_iomem) {
+ void __iomem *vaddr_iomem;
+
+ if (mem->bus.addr)
+ vaddr_iomem = (void __iomem *)mem->bus.addr;
+ else if (mem->bus.caching == ttm_write_combined)
+ vaddr_iomem = ioremap_wc(mem->bus.offset,
+ bo->base.size);
+#ifdef CONFIG_X86
+ else if (mem->bus.caching == ttm_cached)
+ vaddr_iomem = ioremap_cache(mem->bus.offset,
+ bo->base.size);
+#endif
+ else
+ vaddr_iomem = ioremap(mem->bus.offset, bo->base.size);
+
+ if (!vaddr_iomem)
+ return -ENOMEM;
+
+ iosys_map_set_vaddr_iomem(map, vaddr_iomem);
+
+ } else {
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
+ struct ttm_tt *ttm = bo->ttm;
+ pgprot_t prot;
+ void *vaddr;
+
+ ret = ttm_bo_populate(bo, &ctx);
if (ret)
return ret;
- if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
- (bo->ttm != NULL)) {
- ttm_tt_unbind(bo->ttm);
- ttm_tt_destroy(bo->ttm);
- bo->ttm = NULL;
+ /*
+ * We need to use vmap to get the desired page protection
+ * or to make the buffer object look contiguous.
+ */
+ prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
+ vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot);
+ if (!vaddr)
+ return -ENOMEM;
+
+ iosys_map_set_vaddr(map, vaddr);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ttm_bo_vmap);
+
+/**
+ * ttm_bo_vunmap
+ *
+ * @bo: The buffer object.
+ * @map: Object describing the map to unmap.
+ *
+ * Unmaps a kernel map set up by ttm_bo_vmap().
+ */
+void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map)
+{
+ struct ttm_resource *mem = bo->resource;
+
+ dma_resv_assert_held(bo->base.resv);
+
+ if (iosys_map_is_null(map))
+ return;
+
+ if (!map->is_iomem)
+ vunmap(map->vaddr);
+ else if (!mem->bus.addr)
+ iounmap(map->vaddr_iomem);
+ iosys_map_clear(map);
+
+ ttm_mem_io_free(bo->bdev, bo->resource);
+}
+EXPORT_SYMBOL(ttm_bo_vunmap);
+
+static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
+ bool dst_use_tt)
+{
+ long ret;
+
+ ret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
+ false, 15 * HZ);
+ if (ret == 0)
+ return -EBUSY;
+ if (ret < 0)
+ return ret;
+
+ if (!dst_use_tt)
+ ttm_bo_tt_destroy(bo);
+ ttm_resource_free(bo, &bo->resource);
+ return 0;
+}
+
+static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
+ struct dma_fence *fence,
+ bool dst_use_tt)
+{
+ struct ttm_buffer_object *ghost_obj;
+ int ret;
+
+ /**
+ * This should help pipeline ordinary buffer moves.
+ *
+ * Hang old buffer memory on a new buffer object,
+ * and leave it to be released when the GPU
+ * operation has completed.
+ */
+
+ ret = ttm_buffer_object_transfer(bo, &ghost_obj);
+ if (ret)
+ return ret;
+
+ dma_resv_add_fence(&ghost_obj->base._resv, fence,
+ DMA_RESV_USAGE_KERNEL);
+
+ /**
+ * If we're not moving to fixed memory, the TTM object
+ * needs to stay alive. Otherwhise hang it on the ghost
+ * bo to be unbound and destroyed.
+ */
+
+ if (dst_use_tt)
+ ghost_obj->ttm = NULL;
+ else
+ bo->ttm = NULL;
+
+ dma_resv_unlock(&ghost_obj->base._resv);
+ ttm_bo_put(ghost_obj);
+ return 0;
+}
+
+static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
+ struct dma_fence *fence)
+{
+ struct ttm_device *bdev = bo->bdev;
+ struct ttm_resource_manager *from;
+ struct dma_fence *tmp;
+ int i;
+
+ from = ttm_manager_type(bdev, bo->resource->mem_type);
+
+ /**
+ * BO doesn't have a TTM we need to bind/unbind. Just remember
+ * this eviction and free up the allocation.
+ * The fence will be saved in the first free slot or in the slot
+ * already used to store a fence from the same context. Since
+ * drivers can't use more than TTM_NUM_MOVE_FENCES contexts for
+ * evictions we should always find a slot to use.
+ */
+ spin_lock(&from->eviction_lock);
+ for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) {
+ tmp = from->eviction_fences[i];
+ if (!tmp)
+ break;
+ if (fence->context != tmp->context)
+ continue;
+ if (dma_fence_is_later(fence, tmp)) {
+ dma_fence_put(tmp);
+ break;
}
- ttm_bo_free_old_node(bo);
+ goto unlock;
+ }
+ if (i < TTM_NUM_MOVE_FENCES) {
+ from->eviction_fences[i] = dma_fence_get(fence);
} else {
- /**
- * This should help pipeline ordinary buffer moves.
- *
- * Hang old buffer memory on a new buffer object,
- * and leave it to be released when the GPU
- * operation has completed.
+ WARN(1, "not enough fence slots for all fence contexts");
+ spin_unlock(&from->eviction_lock);
+ dma_fence_wait(fence, false);
+ goto end;
+ }
+
+unlock:
+ spin_unlock(&from->eviction_lock);
+end:
+ ttm_resource_free(bo, &bo->resource);
+}
+
+/**
+ * ttm_bo_move_accel_cleanup - cleanup helper for hw copies
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @fence: A fence object that signals when moving is complete.
+ * @evict: This is an evict move. Don't return until the buffer is idle.
+ * @pipeline: evictions are to be pipelined.
+ * @new_mem: struct ttm_resource indicating where to move.
+ *
+ * Accelerated move function to be called when an accelerated move
+ * has been scheduled. The function will create a new temporary buffer object
+ * representing the old placement, and put the sync object on both buffer
+ * objects. After that the newly created buffer object is unref'd to be
+ * destroyed when the move is complete. This will help pipeline
+ * buffer moves.
+ */
+int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
+ struct dma_fence *fence,
+ bool evict,
+ bool pipeline,
+ struct ttm_resource *new_mem)
+{
+ struct ttm_device *bdev = bo->bdev;
+ struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->resource->mem_type);
+ struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
+ int ret = 0;
+
+ dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
+ if (!evict)
+ ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
+ else if (!from->use_tt && pipeline)
+ ttm_bo_move_pipeline_evict(bo, fence);
+ else
+ ret = ttm_bo_wait_free_node(bo, man->use_tt);
+
+ if (ret)
+ return ret;
+
+ ttm_bo_assign_mem(bo, new_mem);
+
+ return 0;
+}
+EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
+
+/**
+ * ttm_bo_move_sync_cleanup - cleanup by waiting for the move to finish
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @new_mem: struct ttm_resource indicating where to move.
+ *
+ * Special case of ttm_bo_move_accel_cleanup where the bo is guaranteed
+ * by the caller to be idle. Typically used after memcpy buffer moves.
+ */
+void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
+ struct ttm_resource *new_mem)
+{
+ struct ttm_device *bdev = bo->bdev;
+ struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
+ int ret;
+
+ ret = ttm_bo_wait_free_node(bo, man->use_tt);
+ if (WARN_ON(ret))
+ return;
+
+ ttm_bo_assign_mem(bo, new_mem);
+}
+EXPORT_SYMBOL(ttm_bo_move_sync_cleanup);
+
+/**
+ * ttm_bo_pipeline_gutting - purge the contents of a bo
+ * @bo: The buffer object
+ *
+ * Purge the contents of a bo, async if the bo is not idle.
+ * After a successful call, the bo is left unpopulated in
+ * system placement. The function may wait uninterruptible
+ * for idle on OOM.
+ *
+ * Return: 0 if successful, negative error code on failure.
+ */
+int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
+{
+ struct ttm_buffer_object *ghost;
+ struct ttm_tt *ttm;
+ int ret;
+
+ /* If already idle, no need for ghost object dance. */
+ if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP)) {
+ if (!bo->ttm) {
+ /* See comment below about clearing. */
+ ret = ttm_tt_create(bo, true);
+ if (ret)
+ return ret;
+ } else {
+ ttm_tt_unpopulate(bo->bdev, bo->ttm);
+ if (bo->type == ttm_bo_type_device)
+ ttm_tt_mark_for_clear(bo->ttm);
+ }
+ ttm_resource_free(bo, &bo->resource);
+ return 0;
+ }
+
+ /*
+ * We need an unpopulated ttm_tt after giving our current one,
+ * if any, to the ghost object. And we can't afford to fail
+ * creating one *after* the operation. If the bo subsequently gets
+ * resurrected, make sure it's cleared (if ttm_bo_type_device)
+ * to avoid leaking sensitive information to user-space.
+ */
+
+ ttm = bo->ttm;
+ bo->ttm = NULL;
+ ret = ttm_tt_create(bo, true);
+ swap(bo->ttm, ttm);
+ if (ret)
+ return ret;
+
+ ret = ttm_buffer_object_transfer(bo, &ghost);
+ if (ret)
+ goto error_destroy_tt;
+
+ ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
+ /* Last resort, wait for the BO to be idle when we are OOM */
+ if (ret) {
+ dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+ }
+
+ dma_resv_unlock(&ghost->base._resv);
+ ttm_bo_put(ghost);
+ bo->ttm = ttm;
+ return 0;
+
+error_destroy_tt:
+ ttm_tt_destroy(bo->bdev, ttm);
+ return ret;
+}
+
+static bool ttm_lru_walk_trylock(struct ttm_bo_lru_cursor *curs,
+ struct ttm_buffer_object *bo)
+{
+ struct ttm_operation_ctx *ctx = curs->arg->ctx;
+
+ curs->needs_unlock = false;
+
+ if (dma_resv_trylock(bo->base.resv)) {
+ curs->needs_unlock = true;
+ return true;
+ }
+
+ if (bo->base.resv == ctx->resv && ctx->allow_res_evict) {
+ dma_resv_assert_held(bo->base.resv);
+ return true;
+ }
+
+ return false;
+}
+
+static int ttm_lru_walk_ticketlock(struct ttm_bo_lru_cursor *curs,
+ struct ttm_buffer_object *bo)
+{
+ struct ttm_lru_walk_arg *arg = curs->arg;
+ struct dma_resv *resv = bo->base.resv;
+ int ret;
+
+ if (arg->ctx->interruptible)
+ ret = dma_resv_lock_interruptible(resv, arg->ticket);
+ else
+ ret = dma_resv_lock(resv, arg->ticket);
+
+ if (!ret) {
+ curs->needs_unlock = true;
+ /*
+ * Only a single ticketlock per loop. Ticketlocks are prone
+ * to return -EDEADLK causing the eviction to fail, so
+ * after waiting for the ticketlock, revert back to
+ * trylocking for this walk.
*/
+ arg->ticket = NULL;
+ } else if (ret == -EDEADLK) {
+ /* Caller needs to exit the ww transaction. */
+ ret = -ENOSPC;
+ }
- set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
- spin_unlock(&bdev->fence_lock);
- if (tmp_obj)
- driver->sync_obj_unref(&tmp_obj);
+ return ret;
+}
- ret = ttm_buffer_object_transfer(bo, &ghost_obj);
- if (ret)
- return ret;
+/**
+ * ttm_lru_walk_for_evict() - Perform a LRU list walk, with actions taken on
+ * valid items.
+ * @walk: describe the walks and actions taken
+ * @bdev: The TTM device.
+ * @man: The struct ttm_resource manager whose LRU lists we're walking.
+ * @target: The end condition for the walk.
+ *
+ * The LRU lists of @man are walk, and for each struct ttm_resource encountered,
+ * the corresponding ttm_buffer_object is locked and taken a reference on, and
+ * the LRU lock is dropped. the LRU lock may be dropped before locking and, in
+ * that case, it's verified that the item actually remains on the LRU list after
+ * the lock, and that the buffer object didn't switch resource in between.
+ *
+ * With a locked object, the actions indicated by @walk->process_bo are
+ * performed, and after that, the bo is unlocked, the refcount dropped and the
+ * next struct ttm_resource is processed. Here, the walker relies on
+ * TTM's restartable LRU list implementation.
+ *
+ * Typically @walk->process_bo() would return the number of pages evicted,
+ * swapped or shrunken, so that when the total exceeds @target, or when the
+ * LRU list has been walked in full, iteration is terminated. It's also terminated
+ * on error. Note that the definition of @target is done by the caller, it
+ * could have a different meaning than the number of pages.
+ *
+ * Note that the way dma_resv individualization is done, locking needs to be done
+ * either with the LRU lock held (trylocking only) or with a reference on the
+ * object.
+ *
+ * Return: The progress made towards target or negative error code on error.
+ */
+s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
+ struct ttm_resource_manager *man, s64 target)
+{
+ struct ttm_bo_lru_cursor cursor;
+ struct ttm_buffer_object *bo;
+ s64 progress = 0;
+ s64 lret;
+
+ ttm_bo_lru_for_each_reserved_guarded(&cursor, man, &walk->arg, bo) {
+ lret = walk->ops->process_bo(walk, bo);
+ if (lret == -EBUSY || lret == -EALREADY)
+ lret = 0;
+ progress = (lret < 0) ? lret : progress + lret;
+ if (progress < 0 || progress >= target)
+ break;
+ }
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ return progress;
+}
+EXPORT_SYMBOL(ttm_lru_walk_for_evict);
+
+static void ttm_bo_lru_cursor_cleanup_bo(struct ttm_bo_lru_cursor *curs)
+{
+ struct ttm_buffer_object *bo = curs->bo;
+
+ if (bo) {
+ if (curs->needs_unlock)
+ dma_resv_unlock(bo->base.resv);
+ ttm_bo_put(bo);
+ curs->bo = NULL;
+ }
+}
+
+/**
+ * ttm_bo_lru_cursor_fini() - Stop using a struct ttm_bo_lru_cursor
+ * and clean up any iteration it was used for.
+ * @curs: The cursor.
+ */
+void ttm_bo_lru_cursor_fini(struct ttm_bo_lru_cursor *curs)
+{
+ spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock;
+
+ ttm_bo_lru_cursor_cleanup_bo(curs);
+ spin_lock(lru_lock);
+ ttm_resource_cursor_fini(&curs->res_curs);
+ spin_unlock(lru_lock);
+}
+EXPORT_SYMBOL(ttm_bo_lru_cursor_fini);
+
+/**
+ * ttm_bo_lru_cursor_init() - Initialize a struct ttm_bo_lru_cursor
+ * @curs: The ttm_bo_lru_cursor to initialize.
+ * @man: The ttm resource_manager whose LRU lists to iterate over.
+ * @arg: The ttm_lru_walk_arg to govern the walk.
+ *
+ * Initialize a struct ttm_bo_lru_cursor.
+ *
+ * Return: Pointer to @curs. The function does not fail.
+ */
+struct ttm_bo_lru_cursor *
+ttm_bo_lru_cursor_init(struct ttm_bo_lru_cursor *curs,
+ struct ttm_resource_manager *man,
+ struct ttm_lru_walk_arg *arg)
+{
+ memset(curs, 0, sizeof(*curs));
+ ttm_resource_cursor_init(&curs->res_curs, man);
+ curs->arg = arg;
+
+ return curs;
+}
+EXPORT_SYMBOL(ttm_bo_lru_cursor_init);
+
+static struct ttm_buffer_object *
+__ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs)
+{
+ spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock;
+ struct ttm_resource *res = NULL;
+ struct ttm_buffer_object *bo;
+ struct ttm_lru_walk_arg *arg = curs->arg;
+ bool first = !curs->bo;
- /**
- * If we're not moving to fixed memory, the TTM object
- * needs to stay alive. Otherwhise hang it on the ghost
- * bo to be unbound and destroyed.
+ ttm_bo_lru_cursor_cleanup_bo(curs);
+
+ spin_lock(lru_lock);
+ for (;;) {
+ int mem_type, ret = 0;
+ bool bo_locked = false;
+
+ if (first) {
+ res = ttm_resource_manager_first(&curs->res_curs);
+ first = false;
+ } else {
+ res = ttm_resource_manager_next(&curs->res_curs);
+ }
+ if (!res)
+ break;
+
+ bo = res->bo;
+ if (ttm_lru_walk_trylock(curs, bo))
+ bo_locked = true;
+ else if (!arg->ticket || arg->ctx->no_wait_gpu || arg->trylock_only)
+ continue;
+
+ if (!ttm_bo_get_unless_zero(bo)) {
+ if (curs->needs_unlock)
+ dma_resv_unlock(bo->base.resv);
+ continue;
+ }
+
+ mem_type = res->mem_type;
+ spin_unlock(lru_lock);
+ if (!bo_locked)
+ ret = ttm_lru_walk_ticketlock(curs, bo);
+
+ /*
+ * Note that in between the release of the lru lock and the
+ * ticketlock, the bo may have switched resource,
+ * and also memory type, since the resource may have been
+ * freed and allocated again with a different memory type.
+ * In that case, just skip it.
*/
+ curs->bo = bo;
+ if (!ret && bo->resource && bo->resource->mem_type == mem_type)
+ return bo;
- if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
- ghost_obj->ttm = NULL;
- else
- bo->ttm = NULL;
+ ttm_bo_lru_cursor_cleanup_bo(curs);
+ if (ret && ret != -EALREADY)
+ return ERR_PTR(ret);
- ttm_bo_unreserve(ghost_obj);
- ttm_bo_unref(&ghost_obj);
+ spin_lock(lru_lock);
}
- *old_mem = *new_mem;
- new_mem->mm_node = NULL;
+ spin_unlock(lru_lock);
+ return res ? bo : NULL;
+}
- return 0;
+/**
+ * ttm_bo_lru_cursor_next() - Continue iterating a manager's LRU lists
+ * to find and lock buffer object.
+ * @curs: The cursor initialized using ttm_bo_lru_cursor_init() and
+ * ttm_bo_lru_cursor_first().
+ *
+ * Return: A pointer to a locked and reference-counted buffer object,
+ * or NULL if none could be found and looping should be terminated.
+ */
+struct ttm_buffer_object *ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs)
+{
+ return __ttm_bo_lru_cursor_next(curs);
}
-EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
+EXPORT_SYMBOL(ttm_bo_lru_cursor_next);
+
+/**
+ * ttm_bo_lru_cursor_first() - Start iterating a manager's LRU lists
+ * to find and lock buffer object.
+ * @curs: The cursor initialized using ttm_bo_lru_cursor_init().
+ *
+ * Return: A pointer to a locked and reference-counted buffer object,
+ * or NULL if none could be found and looping should be terminated.
+ */
+struct ttm_buffer_object *ttm_bo_lru_cursor_first(struct ttm_bo_lru_cursor *curs)
+{
+ ttm_bo_lru_cursor_cleanup_bo(curs);
+ return __ttm_bo_lru_cursor_next(curs);
+}
+EXPORT_SYMBOL(ttm_bo_lru_cursor_first);
+
+/**
+ * ttm_bo_shrink() - Helper to shrink a ttm buffer object.
+ * @ctx: The struct ttm_operation_ctx used for the shrinking operation.
+ * @bo: The buffer object.
+ * @flags: Flags governing the shrinking behaviour.
+ *
+ * The function uses the ttm_tt_back_up functionality to back up or
+ * purge a struct ttm_tt. If the bo is not in system, it's first
+ * moved there.
+ *
+ * Return: The number of pages shrunken or purged, or
+ * negative error code on failure.
+ */
+long ttm_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
+ const struct ttm_bo_shrink_flags flags)
+{
+ static const struct ttm_place sys_placement_flags = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .mem_type = TTM_PL_SYSTEM,
+ .flags = 0,
+ };
+ static struct ttm_placement sys_placement = {
+ .num_placement = 1,
+ .placement = &sys_placement_flags,
+ };
+ struct ttm_tt *tt = bo->ttm;
+ long lret;
+
+ dma_resv_assert_held(bo->base.resv);
+
+ if (flags.allow_move && bo->resource->mem_type != TTM_PL_SYSTEM) {
+ int ret = ttm_bo_validate(bo, &sys_placement, ctx);
+
+ /* Consider -ENOMEM and -ENOSPC non-fatal. */
+ if (ret) {
+ if (ret == -ENOMEM || ret == -ENOSPC)
+ ret = -EBUSY;
+ return ret;
+ }
+ }
+
+ ttm_bo_unmap_virtual(bo);
+ lret = ttm_bo_wait_ctx(bo, ctx);
+ if (lret < 0)
+ return lret;
+
+ if (bo->bulk_move) {
+ spin_lock(&bo->bdev->lru_lock);
+ ttm_resource_del_bulk_move(bo->resource, bo);
+ spin_unlock(&bo->bdev->lru_lock);
+ }
+
+ lret = ttm_tt_backup(bo->bdev, tt, (struct ttm_backup_flags)
+ {.purge = flags.purge,
+ .writeback = flags.writeback});
+
+ if (lret <= 0 && bo->bulk_move) {
+ spin_lock(&bo->bdev->lru_lock);
+ ttm_resource_add_bulk_move(bo->resource, bo);
+ spin_unlock(&bo->bdev->lru_lock);
+ }
+
+ if (lret < 0 && lret != -EINTR)
+ return -EBUSY;
+
+ return lret;
+}
+EXPORT_SYMBOL(ttm_bo_shrink);
+
+/**
+ * ttm_bo_shrink_suitable() - Whether a bo is suitable for shinking
+ * @ctx: The struct ttm_operation_ctx governing the shrinking.
+ * @bo: The candidate for shrinking.
+ *
+ * Check whether the object, given the information available to TTM,
+ * is suitable for shinking, This function can and should be used
+ * before attempting to shrink an object.
+ *
+ * Return: true if suitable. false if not.
+ */
+bool ttm_bo_shrink_suitable(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
+{
+ return bo->ttm && ttm_tt_is_populated(bo->ttm) && !bo->pin_count &&
+ (!ctx->no_wait_gpu ||
+ dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP));
+}
+EXPORT_SYMBOL(ttm_bo_shrink_suitable);
+
+/**
+ * ttm_bo_shrink_avoid_wait() - Whether to avoid waiting for GPU
+ * during shrinking
+ *
+ * In some situations, like direct reclaim, waiting (in particular gpu waiting)
+ * should be avoided since it may stall a system that could otherwise make progress
+ * shrinking something else less time consuming.
+ *
+ * Return: true if gpu waiting should be avoided, false if not.
+ */
+bool ttm_bo_shrink_avoid_wait(void)
+{
+ return !current_is_kswapd();
+}
+EXPORT_SYMBOL(ttm_bo_shrink_avoid_wait);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 3df9f16b041c..b47020fca199 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
@@ -30,436 +31,479 @@
#define pr_fmt(fmt) "[TTM] " fmt
-#include <ttm/ttm_module.h>
-#include <ttm/ttm_bo_driver.h>
-#include <ttm/ttm_placement.h>
-#include <linux/mm.h>
-#include <linux/rbtree.h>
-#include <linux/module.h>
-#include <linux/uaccess.h>
+#include <linux/export.h>
-#define TTM_BO_VM_NUM_PREFAULT 16
+#include <drm/ttm/ttm_bo.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_tt.h>
-static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
- unsigned long page_start,
- unsigned long num_pages)
+#include <drm/drm_drv.h>
+#include <drm/drm_managed.h>
+
+static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
+ struct vm_fault *vmf)
{
- struct rb_node *cur = bdev->addr_space_rb.rb_node;
- unsigned long cur_offset;
- struct ttm_buffer_object *bo;
- struct ttm_buffer_object *best_bo = NULL;
-
- while (likely(cur != NULL)) {
- bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
- cur_offset = bo->vm_node->start;
- if (page_start >= cur_offset) {
- cur = cur->rb_right;
- best_bo = bo;
- if (page_start == cur_offset)
- break;
- } else
- cur = cur->rb_left;
- }
+ long err = 0;
- if (unlikely(best_bo == NULL))
- return NULL;
+ /*
+ * Quick non-stalling check for idle.
+ */
+ if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_KERNEL))
+ return 0;
- if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
- (page_start + num_pages)))
- return NULL;
+ /*
+ * If possible, avoid waiting for GPU with mmap_lock
+ * held. We only do this if the fault allows retry and this
+ * is the first attempt.
+ */
+ if (fault_flag_allow_retry_first(vmf->flags)) {
+ if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
+ return VM_FAULT_RETRY;
+
+ drm_gem_object_get(&bo->base);
+ mmap_read_unlock(vmf->vma->vm_mm);
+ (void)dma_resv_wait_timeout(bo->base.resv,
+ DMA_RESV_USAGE_KERNEL, true,
+ MAX_SCHEDULE_TIMEOUT);
+ dma_resv_unlock(bo->base.resv);
+ drm_gem_object_put(&bo->base);
+ return VM_FAULT_RETRY;
+ }
- return best_bo;
+ /*
+ * Ordinary wait.
+ */
+ err = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_KERNEL, true,
+ MAX_SCHEDULE_TIMEOUT);
+ if (unlikely(err < 0)) {
+ return (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
+ VM_FAULT_NOPAGE;
+ }
+
+ return 0;
}
-static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
+ unsigned long page_offset)
{
- struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
- vma->vm_private_data;
- struct ttm_bo_device *bdev = bo->bdev;
- unsigned long page_offset;
- unsigned long page_last;
- unsigned long pfn;
- struct ttm_tt *ttm = NULL;
- struct page *page;
- int ret;
- int i;
- unsigned long address = (unsigned long)vmf->virtual_address;
- int retval = VM_FAULT_NOPAGE;
- struct ttm_mem_type_manager *man =
- &bdev->man[bo->mem.mem_type];
+ struct ttm_device *bdev = bo->bdev;
+ if (bdev->funcs->io_mem_pfn)
+ return bdev->funcs->io_mem_pfn(bo, page_offset);
+
+ return (bo->resource->bus.offset >> PAGE_SHIFT) + page_offset;
+}
+
+/**
+ * ttm_bo_vm_reserve - Reserve a buffer object in a retryable vm callback
+ * @bo: The buffer object
+ * @vmf: The fault structure handed to the callback
+ *
+ * vm callbacks like fault() and *_mkwrite() allow for the mmap_lock to be dropped
+ * during long waits, and after the wait the callback will be restarted. This
+ * is to allow other threads using the same virtual memory space concurrent
+ * access to map(), unmap() completely unrelated buffer objects. TTM buffer
+ * object reservations sometimes wait for GPU and should therefore be
+ * considered long waits. This function reserves the buffer object interruptibly
+ * taking this into account. Starvation is avoided by the vm system not
+ * allowing too many repeated restarts.
+ * This function is intended to be used in customized fault() and _mkwrite()
+ * handlers.
+ *
+ * Return:
+ * 0 on success and the bo was reserved.
+ * VM_FAULT_RETRY if blocking wait.
+ * VM_FAULT_NOPAGE if blocking wait and retrying was not allowed.
+ */
+vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
+ struct vm_fault *vmf)
+{
/*
* Work around locking order reversal in fault / nopfn
- * between mmap_sem and bo_reserve: Perform a trylock operation
- * for reserve, and if it fails, retry the fault after scheduling.
+ * between mmap_lock and bo_reserve: Perform a trylock operation
+ * for reserve, and if it fails, retry the fault after waiting
+ * for the buffer to become unreserved.
*/
+ if (unlikely(!dma_resv_trylock(bo->base.resv))) {
+ /*
+ * If the fault allows retry and this is the first
+ * fault attempt, we try to release the mmap_lock
+ * before waiting
+ */
+ if (fault_flag_allow_retry_first(vmf->flags)) {
+ if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
+ drm_gem_object_get(&bo->base);
+ mmap_read_unlock(vmf->vma->vm_mm);
+ if (!dma_resv_lock_interruptible(bo->base.resv,
+ NULL))
+ dma_resv_unlock(bo->base.resv);
+ drm_gem_object_put(&bo->base);
+ }
+
+ return VM_FAULT_RETRY;
+ }
- ret = ttm_bo_reserve(bo, true, true, false, 0);
- if (unlikely(ret != 0)) {
- if (ret == -EBUSY)
- set_need_resched();
- return VM_FAULT_NOPAGE;
+ if (dma_resv_lock_interruptible(bo->base.resv, NULL))
+ return VM_FAULT_NOPAGE;
}
- if (bdev->driver->fault_reserve_notify) {
- ret = bdev->driver->fault_reserve_notify(bo);
- switch (ret) {
- case 0:
- break;
- case -EBUSY:
- set_need_resched();
- case -ERESTARTSYS:
- retval = VM_FAULT_NOPAGE;
- goto out_unlock;
- default:
- retval = VM_FAULT_SIGBUS;
- goto out_unlock;
+ /*
+ * Refuse to fault imported pages. This should be handled
+ * (if at all) by redirecting mmap to the exporter.
+ */
+ if (bo->ttm && (bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
+ if (!(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)) {
+ dma_resv_unlock(bo->base.resv);
+ return VM_FAULT_SIGBUS;
}
}
+ return 0;
+}
+EXPORT_SYMBOL(ttm_bo_vm_reserve);
+
+/**
+ * ttm_bo_vm_fault_reserved - TTM fault helper
+ * @vmf: The struct vm_fault given as argument to the fault callback
+ * @prot: The page protection to be used for this memory area.
+ * @num_prefault: Maximum number of prefault pages. The caller may want to
+ * specify this based on madvice settings and the size of the GPU object
+ * backed by the memory.
+ *
+ * This function inserts one or more page table entries pointing to the
+ * memory backing the buffer object, and then returns a return code
+ * instructing the caller to retry the page access.
+ *
+ * Return:
+ * VM_FAULT_NOPAGE on success or pending signal
+ * VM_FAULT_SIGBUS on unspecified error
+ * VM_FAULT_OOM on out-of-memory
+ * VM_FAULT_RETRY if retryable wait
+ */
+vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
+ pgprot_t prot,
+ pgoff_t num_prefault)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct ttm_buffer_object *bo = vma->vm_private_data;
+ struct ttm_device *bdev = bo->bdev;
+ unsigned long page_offset;
+ unsigned long page_last;
+ unsigned long pfn;
+ struct ttm_tt *ttm = NULL;
+ struct page *page;
+ int err;
+ pgoff_t i;
+ vm_fault_t ret = VM_FAULT_NOPAGE;
+ unsigned long address = vmf->address;
+
/*
* Wait for buffer data in transit, due to a pipelined
* move.
*/
+ ret = ttm_bo_vm_fault_idle(bo, vmf);
+ if (unlikely(ret != 0))
+ return ret;
- spin_lock(&bdev->fence_lock);
- if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
- ret = ttm_bo_wait(bo, false, true, false);
- spin_unlock(&bdev->fence_lock);
- if (unlikely(ret != 0)) {
- retval = (ret != -ERESTARTSYS) ?
- VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
- goto out_unlock;
- }
- } else
- spin_unlock(&bdev->fence_lock);
-
- ret = ttm_mem_io_lock(man, true);
- if (unlikely(ret != 0)) {
- retval = VM_FAULT_NOPAGE;
- goto out_unlock;
- }
- ret = ttm_mem_io_reserve_vm(bo);
- if (unlikely(ret != 0)) {
- retval = VM_FAULT_SIGBUS;
- goto out_io_unlock;
- }
+ err = ttm_mem_io_reserve(bdev, bo->resource);
+ if (unlikely(err != 0))
+ return VM_FAULT_SIGBUS;
page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
- bo->vm_node->start - vma->vm_pgoff;
- page_last = vma_pages(vma) +
- bo->vm_node->start - vma->vm_pgoff;
+ vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node);
+ page_last = vma_pages(vma) + vma->vm_pgoff -
+ drm_vma_node_start(&bo->base.vma_node);
- if (unlikely(page_offset >= bo->num_pages)) {
- retval = VM_FAULT_SIGBUS;
- goto out_io_unlock;
- }
+ if (unlikely(page_offset >= PFN_UP(bo->base.size)))
+ return VM_FAULT_SIGBUS;
+
+ prot = ttm_io_prot(bo, bo->resource, prot);
+ if (!bo->resource->bus.is_iomem) {
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false,
+ };
- /*
- * Strictly, we're not allowed to modify vma->vm_page_prot here,
- * since the mmap_sem is only held in read mode. However, we
- * modify only the caching bits of vma->vm_page_prot and
- * consider those bits protected by
- * the bo->mutex, as we should be the only writers.
- * There shouldn't really be any readers of these bits except
- * within vm_insert_mixed()? fork?
- *
- * TODO: Add a list of vmas to the bo, and change the
- * vma->vm_page_prot when the object changes caching policy, with
- * the correct locks held.
- */
- if (bo->mem.bus.is_iomem) {
- vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
- vma->vm_page_prot);
- } else {
ttm = bo->ttm;
- vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
- vm_get_page_prot(vma->vm_flags) :
- ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
-
- /* Allocate all page at once, most common usage */
- if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
- retval = VM_FAULT_OOM;
- goto out_io_unlock;
+ err = ttm_bo_populate(bo, &ctx);
+ if (err) {
+ if (err == -EINTR || err == -ERESTARTSYS ||
+ err == -EAGAIN)
+ return VM_FAULT_NOPAGE;
+
+ pr_debug("TTM fault hit %pe.\n", ERR_PTR(err));
+ return VM_FAULT_SIGBUS;
}
+ } else {
+ /* Iomem should not be marked encrypted */
+ prot = pgprot_decrypted(prot);
}
/*
* Speculatively prefault a number of pages. Only error on
* first page.
*/
- for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
- if (bo->mem.bus.is_iomem)
- pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
- else {
+ for (i = 0; i < num_prefault; ++i) {
+ if (bo->resource->bus.is_iomem) {
+ pfn = ttm_bo_io_mem_pfn(bo, page_offset);
+ } else {
page = ttm->pages[page_offset];
if (unlikely(!page && i == 0)) {
- retval = VM_FAULT_OOM;
- goto out_io_unlock;
+ return VM_FAULT_OOM;
} else if (unlikely(!page)) {
break;
}
pfn = page_to_pfn(page);
}
- ret = vm_insert_mixed(vma, address, pfn);
/*
- * Somebody beat us to this PTE or prefaulting to
- * an already populated PTE, or prefaulting error.
+ * Note that the value of @prot at this point may differ from
+ * the value of @vma->vm_page_prot in the caching- and
+ * encryption bits. This is because the exact location of the
+ * data may not be known at mmap() time and may also change
+ * at arbitrary times while the data is mmap'ed.
+ * See vmf_insert_pfn_prot() for a discussion.
*/
+ ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
- if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
- break;
- else if (unlikely(ret != 0)) {
- retval =
- (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
- goto out_io_unlock;
+ /* Never error on prefaulted PTEs */
+ if (unlikely((ret & VM_FAULT_ERROR))) {
+ if (i == 0)
+ return VM_FAULT_NOPAGE;
+ else
+ break;
}
address += PAGE_SIZE;
if (unlikely(++page_offset >= page_last))
break;
}
-out_io_unlock:
- ttm_mem_io_unlock(man);
-out_unlock:
- ttm_bo_unreserve(bo);
- return retval;
+ return ret;
}
+EXPORT_SYMBOL(ttm_bo_vm_fault_reserved);
-static void ttm_bo_vm_open(struct vm_area_struct *vma)
+static void ttm_bo_release_dummy_page(struct drm_device *dev, void *res)
{
- struct ttm_buffer_object *bo =
- (struct ttm_buffer_object *)vma->vm_private_data;
+ struct page *dummy_page = (struct page *)res;
- (void)ttm_bo_reference(bo);
+ __free_page(dummy_page);
}
-static void ttm_bo_vm_close(struct vm_area_struct *vma)
+vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot)
{
- struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
+ struct vm_area_struct *vma = vmf->vma;
+ struct ttm_buffer_object *bo = vma->vm_private_data;
+ struct drm_device *ddev = bo->base.dev;
+ vm_fault_t ret = VM_FAULT_NOPAGE;
+ unsigned long address;
+ unsigned long pfn;
+ struct page *page;
- ttm_bo_unref(&bo);
- vma->vm_private_data = NULL;
-}
+ /* Allocate new dummy page to map all the VA range in this VMA to it*/
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page)
+ return VM_FAULT_OOM;
-static const struct vm_operations_struct ttm_bo_vm_ops = {
- .fault = ttm_bo_vm_fault,
- .open = ttm_bo_vm_open,
- .close = ttm_bo_vm_close
-};
+ /* Set the page to be freed using drmm release action */
+ if (drmm_add_action_or_reset(ddev, ttm_bo_release_dummy_page, page))
+ return VM_FAULT_OOM;
-int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
- struct ttm_bo_device *bdev)
-{
- struct ttm_bo_driver *driver;
- struct ttm_buffer_object *bo;
- int ret;
+ pfn = page_to_pfn(page);
- read_lock(&bdev->vm_lock);
- bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
- vma_pages(vma));
- if (likely(bo != NULL) && !kref_get_unless_zero(&bo->kref))
- bo = NULL;
- read_unlock(&bdev->vm_lock);
+ /* Prefault the entire VMA range right away to avoid further faults */
+ for (address = vma->vm_start; address < vma->vm_end;
+ address += PAGE_SIZE)
+ ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
- if (unlikely(bo == NULL)) {
- pr_err("Could not find buffer object to map\n");
- return -EINVAL;
- }
+ return ret;
+}
+EXPORT_SYMBOL(ttm_bo_vm_dummy_page);
+
+vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ pgprot_t prot;
+ struct ttm_buffer_object *bo = vma->vm_private_data;
+ struct drm_device *ddev = bo->base.dev;
+ vm_fault_t ret;
+ int idx;
+
+ ret = ttm_bo_vm_reserve(bo, vmf);
+ if (ret)
+ return ret;
- driver = bo->bdev->driver;
- if (unlikely(!driver->verify_access)) {
- ret = -EPERM;
- goto out_unref;
+ prot = vma->vm_page_prot;
+ if (drm_dev_enter(ddev, &idx)) {
+ ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
+ drm_dev_exit(idx);
+ } else {
+ ret = ttm_bo_vm_dummy_page(vmf, prot);
}
- ret = driver->verify_access(bo, filp);
- if (unlikely(ret != 0))
- goto out_unref;
+ if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+ return ret;
- vma->vm_ops = &ttm_bo_vm_ops;
+ dma_resv_unlock(bo->base.resv);
- /*
- * Note: We're transferring the bo reference to
- * vma->vm_private_data here.
- */
-
- vma->vm_private_data = bo;
- vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
- return 0;
-out_unref:
- ttm_bo_unref(&bo);
return ret;
}
-EXPORT_SYMBOL(ttm_bo_mmap);
+EXPORT_SYMBOL(ttm_bo_vm_fault);
-int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
+void ttm_bo_vm_open(struct vm_area_struct *vma)
{
- if (vma->vm_pgoff != 0)
- return -EACCES;
+ struct ttm_buffer_object *bo = vma->vm_private_data;
- vma->vm_ops = &ttm_bo_vm_ops;
- vma->vm_private_data = ttm_bo_reference(bo);
- vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
- return 0;
+ WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
+
+ drm_gem_object_get(&bo->base);
}
-EXPORT_SYMBOL(ttm_fbdev_mmap);
+EXPORT_SYMBOL(ttm_bo_vm_open);
+void ttm_bo_vm_close(struct vm_area_struct *vma)
+{
+ struct ttm_buffer_object *bo = vma->vm_private_data;
-ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
- const char __user *wbuf, char __user *rbuf, size_t count,
- loff_t *f_pos, bool write)
+ drm_gem_object_put(&bo->base);
+ vma->vm_private_data = NULL;
+}
+EXPORT_SYMBOL(ttm_bo_vm_close);
+
+static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
+ unsigned long offset,
+ uint8_t *buf, int len, int write)
{
- struct ttm_buffer_object *bo;
- struct ttm_bo_driver *driver;
- struct ttm_bo_kmap_obj map;
- unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
- unsigned long kmap_offset;
- unsigned long kmap_end;
- unsigned long kmap_num;
- size_t io_size;
- unsigned int page_offset;
- char *virtual;
+ unsigned long page = offset >> PAGE_SHIFT;
+ unsigned long bytes_left = len;
int ret;
- bool no_wait = false;
- bool dummy;
-
- read_lock(&bdev->vm_lock);
- bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
- if (likely(bo != NULL))
- ttm_bo_reference(bo);
- read_unlock(&bdev->vm_lock);
-
- if (unlikely(bo == NULL))
- return -EFAULT;
-
- driver = bo->bdev->driver;
- if (unlikely(!driver->verify_access)) {
- ret = -EPERM;
- goto out_unref;
- }
-
- ret = driver->verify_access(bo, filp);
- if (unlikely(ret != 0))
- goto out_unref;
- kmap_offset = dev_offset - bo->vm_node->start;
- if (unlikely(kmap_offset >= bo->num_pages)) {
- ret = -EFBIG;
- goto out_unref;
- }
+ /* Copy a page at a time, that way no extra virtual address
+ * mapping is needed
+ */
+ offset -= page << PAGE_SHIFT;
+ do {
+ unsigned long bytes = min(bytes_left, PAGE_SIZE - offset);
+ struct ttm_bo_kmap_obj map;
+ void *ptr;
+ bool is_iomem;
+
+ ret = ttm_bo_kmap(bo, page, 1, &map);
+ if (ret)
+ return ret;
+
+ ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset;
+ WARN_ON_ONCE(is_iomem);
+ if (write)
+ memcpy(ptr, buf, bytes);
+ else
+ memcpy(buf, ptr, bytes);
+ ttm_bo_kunmap(&map);
+
+ page++;
+ buf += bytes;
+ bytes_left -= bytes;
+ offset = 0;
+ } while (bytes_left);
+
+ return len;
+}
- page_offset = *f_pos & ~PAGE_MASK;
- io_size = bo->num_pages - kmap_offset;
- io_size = (io_size << PAGE_SHIFT) - page_offset;
- if (count < io_size)
- io_size = count;
+/**
+ * ttm_bo_access - Helper to access a buffer object
+ *
+ * @bo: ttm buffer object
+ * @offset: access offset into buffer object
+ * @buf: pointer to caller memory to read into or write from
+ * @len: length of access
+ * @write: write access
+ *
+ * Utility function to access a buffer object. Useful when buffer object cannot
+ * be easily mapped (non-contiguous, non-visible, etc...). Should not directly
+ * be exported to user space via a peak / poke interface.
+ *
+ * Returns:
+ * @len if successful, negative error code on failure.
+ */
+int ttm_bo_access(struct ttm_buffer_object *bo, unsigned long offset,
+ void *buf, int len, int write)
+{
+ int ret;
- kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
- kmap_num = kmap_end - kmap_offset + 1;
+ if (len < 1 || (offset + len) > bo->base.size)
+ return -EIO;
- ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
+ ret = ttm_bo_reserve(bo, true, false, NULL);
+ if (ret)
+ return ret;
- switch (ret) {
- case 0:
+ switch (bo->resource->mem_type) {
+ case TTM_PL_SYSTEM:
+ fallthrough;
+ case TTM_PL_TT:
+ ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write);
break;
- case -EBUSY:
- ret = -EAGAIN;
- goto out_unref;
default:
- goto out_unref;
- }
-
- ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
- if (unlikely(ret != 0)) {
- ttm_bo_unreserve(bo);
- goto out_unref;
+ if (bo->bdev->funcs->access_memory)
+ ret = bo->bdev->funcs->access_memory
+ (bo, offset, buf, len, write);
+ else
+ ret = -EIO;
}
- virtual = ttm_kmap_obj_virtual(&map, &dummy);
- virtual += page_offset;
-
- if (write)
- ret = copy_from_user(virtual, wbuf, io_size);
- else
- ret = copy_to_user(rbuf, virtual, io_size);
-
- ttm_bo_kunmap(&map);
ttm_bo_unreserve(bo);
- ttm_bo_unref(&bo);
-
- if (unlikely(ret != 0))
- return -EFBIG;
- *f_pos += io_size;
-
- return io_size;
-out_unref:
- ttm_bo_unref(&bo);
return ret;
}
+EXPORT_SYMBOL(ttm_bo_access);
-ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
- char __user *rbuf, size_t count, loff_t *f_pos,
- bool write)
+int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
+ void *buf, int len, int write)
{
- struct ttm_bo_kmap_obj map;
- unsigned long kmap_offset;
- unsigned long kmap_end;
- unsigned long kmap_num;
- size_t io_size;
- unsigned int page_offset;
- char *virtual;
- int ret;
- bool no_wait = false;
- bool dummy;
-
- kmap_offset = (*f_pos >> PAGE_SHIFT);
- if (unlikely(kmap_offset >= bo->num_pages))
- return -EFBIG;
-
- page_offset = *f_pos & ~PAGE_MASK;
- io_size = bo->num_pages - kmap_offset;
- io_size = (io_size << PAGE_SHIFT) - page_offset;
- if (count < io_size)
- io_size = count;
-
- kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
- kmap_num = kmap_end - kmap_offset + 1;
-
- ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
+ struct ttm_buffer_object *bo = vma->vm_private_data;
+ unsigned long offset = (addr) - vma->vm_start +
+ ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node))
+ << PAGE_SHIFT);
- switch (ret) {
- case 0:
- break;
- case -EBUSY:
- return -EAGAIN;
- default:
- return ret;
- }
+ return ttm_bo_access(bo, offset, buf, len, write);
+}
+EXPORT_SYMBOL(ttm_bo_vm_access);
- ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
- if (unlikely(ret != 0)) {
- ttm_bo_unreserve(bo);
- return ret;
- }
+static const struct vm_operations_struct ttm_bo_vm_ops = {
+ .fault = ttm_bo_vm_fault,
+ .open = ttm_bo_vm_open,
+ .close = ttm_bo_vm_close,
+ .access = ttm_bo_vm_access,
+};
- virtual = ttm_kmap_obj_virtual(&map, &dummy);
- virtual += page_offset;
+/**
+ * ttm_bo_mmap_obj - mmap memory backed by a ttm buffer object.
+ *
+ * @vma: vma as input from the fbdev mmap method.
+ * @bo: The bo backing the address space.
+ *
+ * Maps a buffer object.
+ */
+int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
+{
+ /* Enforce no COW since would have really strange behavior with it. */
+ if (is_cow_mapping(vma->vm_flags))
+ return -EINVAL;
- if (write)
- ret = copy_from_user(virtual, wbuf, io_size);
- else
- ret = copy_to_user(rbuf, virtual, io_size);
+ drm_gem_object_get(&bo->base);
- ttm_bo_kunmap(&map);
- ttm_bo_unreserve(bo);
- ttm_bo_unref(&bo);
+ /*
+ * Drivers may want to override the vm_ops field. Otherwise we
+ * use TTM's default callbacks.
+ */
+ if (!vma->vm_ops)
+ vma->vm_ops = &ttm_bo_vm_ops;
- if (unlikely(ret != 0))
- return ret;
+ /*
+ * Note: We're transferring the bo reference to
+ * vma->vm_private_data here.
+ */
- *f_pos += io_size;
+ vma->vm_private_data = bo;
- return io_size;
+ vm_flags_set(vma, VM_PFNMAP | VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
+ return 0;
}
+EXPORT_SYMBOL(ttm_bo_mmap_obj);
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
new file mode 100644
index 000000000000..9a51afaf0749
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -0,0 +1,322 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+/*
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#define pr_fmt(fmt) "[TTM DEVICE] " fmt
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/mm.h>
+
+#include <drm/ttm/ttm_allocation.h>
+#include <drm/ttm/ttm_bo.h>
+#include <drm/ttm/ttm_device.h>
+#include <drm/ttm/ttm_tt.h>
+#include <drm/ttm/ttm_placement.h>
+
+#include "ttm_module.h"
+#include "ttm_bo_internal.h"
+
+/*
+ * ttm_global_mutex - protecting the global state
+ */
+static DEFINE_MUTEX(ttm_global_mutex);
+static unsigned ttm_glob_use_count;
+struct ttm_global ttm_glob;
+EXPORT_SYMBOL(ttm_glob);
+
+struct dentry *ttm_debugfs_root;
+
+static void ttm_global_release(void)
+{
+ struct ttm_global *glob = &ttm_glob;
+
+ mutex_lock(&ttm_global_mutex);
+ if (--ttm_glob_use_count > 0)
+ goto out;
+
+ ttm_pool_mgr_fini();
+ debugfs_remove(ttm_debugfs_root);
+
+ __free_page(glob->dummy_read_page);
+ memset(glob, 0, sizeof(*glob));
+out:
+ mutex_unlock(&ttm_global_mutex);
+}
+
+static int ttm_global_init(void)
+{
+ struct ttm_global *glob = &ttm_glob;
+ unsigned long num_pages, num_dma32;
+ struct sysinfo si;
+ int ret = 0;
+
+ mutex_lock(&ttm_global_mutex);
+ if (++ttm_glob_use_count > 1)
+ goto out;
+
+ si_meminfo(&si);
+
+ ttm_debugfs_root = debugfs_create_dir("ttm", NULL);
+ if (IS_ERR(ttm_debugfs_root)) {
+ ttm_debugfs_root = NULL;
+ }
+
+ /* Limit the number of pages in the pool to about 50% of the total
+ * system memory.
+ */
+ num_pages = ((u64)si.totalram * si.mem_unit) >> PAGE_SHIFT;
+ num_pages /= 2;
+
+ /* But for DMA32 we limit ourself to only use 2GiB maximum. */
+ num_dma32 = (u64)(si.totalram - si.totalhigh) * si.mem_unit
+ >> PAGE_SHIFT;
+ num_dma32 = min(num_dma32, 2UL << (30 - PAGE_SHIFT));
+
+ ttm_pool_mgr_init(num_pages);
+ ttm_tt_mgr_init(num_pages, num_dma32);
+
+ glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32 |
+ __GFP_NOWARN);
+
+ /* Retry without GFP_DMA32 for platforms DMA32 is not available */
+ if (unlikely(glob->dummy_read_page == NULL)) {
+ glob->dummy_read_page = alloc_page(__GFP_ZERO);
+ if (unlikely(glob->dummy_read_page == NULL)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ pr_warn("Using GFP_DMA32 fallback for dummy_read_page\n");
+ }
+
+ INIT_LIST_HEAD(&glob->device_list);
+ atomic_set(&glob->bo_count, 0);
+
+ debugfs_create_atomic_t("buffer_objects", 0444, ttm_debugfs_root,
+ &glob->bo_count);
+out:
+ if (ret && ttm_debugfs_root)
+ debugfs_remove(ttm_debugfs_root);
+ if (ret)
+ --ttm_glob_use_count;
+ mutex_unlock(&ttm_global_mutex);
+ return ret;
+}
+
+/**
+ * ttm_device_prepare_hibernation - move GTT BOs to shmem for hibernation.
+ *
+ * @bdev: A pointer to a struct ttm_device to prepare hibernation for.
+ *
+ * Return: 0 on success, negative number on failure.
+ */
+int ttm_device_prepare_hibernation(struct ttm_device *bdev)
+{
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false,
+ };
+ int ret;
+
+ do {
+ ret = ttm_device_swapout(bdev, &ctx, GFP_KERNEL);
+ } while (ret > 0);
+ return ret;
+}
+EXPORT_SYMBOL(ttm_device_prepare_hibernation);
+
+/*
+ * A buffer object shrink method that tries to swap out the first
+ * buffer object on the global::swap_lru list.
+ */
+int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags)
+{
+ struct ttm_global *glob = &ttm_glob;
+ struct ttm_device *bdev;
+ int ret = 0;
+
+ mutex_lock(&ttm_global_mutex);
+ list_for_each_entry(bdev, &glob->device_list, device_list) {
+ ret = ttm_device_swapout(bdev, ctx, gfp_flags);
+ if (ret > 0) {
+ list_move_tail(&bdev->device_list, &glob->device_list);
+ break;
+ }
+ }
+ mutex_unlock(&ttm_global_mutex);
+ return ret;
+}
+
+int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
+ gfp_t gfp_flags)
+{
+ struct ttm_resource_manager *man;
+ unsigned i;
+ s64 lret;
+
+ for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
+ man = ttm_manager_type(bdev, i);
+ if (!man || !man->use_tt)
+ continue;
+
+ lret = ttm_bo_swapout(bdev, ctx, man, gfp_flags, 1);
+ /* Can be both positive (num_pages) and negative (error) */
+ if (lret)
+ return lret;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ttm_device_swapout);
+
+/**
+ * ttm_device_init
+ *
+ * @bdev: A pointer to a struct ttm_device to initialize.
+ * @funcs: Function table for the device.
+ * @dev: The core kernel device pointer for DMA mappings and allocations.
+ * @mapping: The address space to use for this bo.
+ * @vma_manager: A pointer to a vma manager.
+ * @alloc_flags: TTM_ALLOCATION_* flags.
+ *
+ * Initializes a struct ttm_device:
+ * Returns:
+ * !0: Failure.
+ */
+int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *funcs,
+ struct device *dev, struct address_space *mapping,
+ struct drm_vma_offset_manager *vma_manager,
+ unsigned int alloc_flags)
+{
+ struct ttm_global *glob = &ttm_glob;
+ int ret, nid;
+
+ if (WARN_ON(vma_manager == NULL))
+ return -EINVAL;
+
+ ret = ttm_global_init();
+ if (ret)
+ return ret;
+
+ bdev->wq = alloc_workqueue("ttm",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_UNBOUND, 16);
+ if (!bdev->wq) {
+ ttm_global_release();
+ return -ENOMEM;
+ }
+
+ bdev->alloc_flags = alloc_flags;
+ bdev->funcs = funcs;
+
+ ttm_sys_man_init(bdev);
+
+ if (dev)
+ nid = dev_to_node(dev);
+ else
+ nid = NUMA_NO_NODE;
+
+ ttm_pool_init(&bdev->pool, dev, nid, alloc_flags);
+
+ bdev->vma_manager = vma_manager;
+ spin_lock_init(&bdev->lru_lock);
+ INIT_LIST_HEAD(&bdev->unevictable);
+ bdev->dev_mapping = mapping;
+ mutex_lock(&ttm_global_mutex);
+ list_add_tail(&bdev->device_list, &glob->device_list);
+ mutex_unlock(&ttm_global_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(ttm_device_init);
+
+void ttm_device_fini(struct ttm_device *bdev)
+{
+ struct ttm_resource_manager *man;
+ unsigned i;
+
+ mutex_lock(&ttm_global_mutex);
+ list_del(&bdev->device_list);
+ mutex_unlock(&ttm_global_mutex);
+
+ drain_workqueue(bdev->wq);
+ destroy_workqueue(bdev->wq);
+
+ man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
+ ttm_resource_manager_set_used(man, false);
+ ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
+
+ spin_lock(&bdev->lru_lock);
+ for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
+ if (list_empty(&man->lru[0]))
+ pr_debug("Swap list %d was clean\n", i);
+ spin_unlock(&bdev->lru_lock);
+
+ ttm_pool_fini(&bdev->pool);
+ ttm_global_release();
+}
+EXPORT_SYMBOL(ttm_device_fini);
+
+static void ttm_device_clear_lru_dma_mappings(struct ttm_device *bdev,
+ struct list_head *list)
+{
+ struct ttm_resource *res;
+
+ spin_lock(&bdev->lru_lock);
+ while ((res = ttm_lru_first_res_or_null(list))) {
+ struct ttm_buffer_object *bo = res->bo;
+
+ /* Take ref against racing releases once lru_lock is unlocked */
+ if (!ttm_bo_get_unless_zero(bo))
+ continue;
+
+ list_del_init(&bo->resource->lru.link);
+ spin_unlock(&bdev->lru_lock);
+
+ if (bo->ttm)
+ ttm_tt_unpopulate(bo->bdev, bo->ttm);
+
+ ttm_bo_put(bo);
+ spin_lock(&bdev->lru_lock);
+ }
+ spin_unlock(&bdev->lru_lock);
+}
+
+void ttm_device_clear_dma_mappings(struct ttm_device *bdev)
+{
+ struct ttm_resource_manager *man;
+ unsigned int i, j;
+
+ ttm_device_clear_lru_dma_mappings(bdev, &bdev->unevictable);
+
+ for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
+ man = ttm_manager_type(bdev, i);
+ if (!man || !man->use_tt)
+ continue;
+
+ for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j)
+ ttm_device_clear_lru_dma_mappings(bdev, &man->lru[j]);
+ }
+}
+EXPORT_SYMBOL(ttm_device_clear_dma_mappings);
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 6c911789ae5c..bc7a83a9fe44 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
@@ -25,59 +26,18 @@
*
**************************************************************************/
-#include <drm/ttm/ttm_execbuf_util.h>
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_placement.h>
-#include <linux/wait.h>
-#include <linux/sched.h>
-#include <linux/module.h>
-
-static void ttm_eu_backoff_reservation_locked(struct list_head *list,
- struct ww_acquire_ctx *ticket)
-{
- struct ttm_validate_buffer *entry;
-
- list_for_each_entry(entry, list, head) {
- struct ttm_buffer_object *bo = entry->bo;
- if (!entry->reserved)
- continue;
+#include <linux/export.h>
- entry->reserved = false;
- if (entry->removed) {
- ttm_bo_add_to_lru(bo);
- entry->removed = false;
- }
- ww_mutex_unlock(&bo->resv->lock);
- }
-}
+#include <drm/ttm/ttm_execbuf_util.h>
+#include <drm/ttm/ttm_bo.h>
-static void ttm_eu_del_from_lru_locked(struct list_head *list)
+static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
+ struct ttm_validate_buffer *entry)
{
- struct ttm_validate_buffer *entry;
-
- list_for_each_entry(entry, list, head) {
+ list_for_each_entry_continue_reverse(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- if (!entry->reserved)
- continue;
- if (!entry->removed) {
- entry->put_count = ttm_bo_del_from_lru(bo);
- entry->removed = true;
- }
- }
-}
-
-static void ttm_eu_list_ref_sub(struct list_head *list)
-{
- struct ttm_validate_buffer *entry;
-
- list_for_each_entry(entry, list, head) {
- struct ttm_buffer_object *bo = entry->bo;
-
- if (entry->put_count) {
- ttm_bo_list_ref_sub(bo, entry->put_count, true);
- entry->put_count = 0;
- }
+ dma_resv_unlock(bo->base.resv);
}
}
@@ -85,17 +45,19 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
struct list_head *list)
{
struct ttm_validate_buffer *entry;
- struct ttm_bo_global *glob;
if (list_empty(list))
return;
- entry = list_first_entry(list, struct ttm_validate_buffer, head);
- glob = entry->bo->glob;
- spin_lock(&glob->lru_lock);
- ttm_eu_backoff_reservation_locked(list, ticket);
- ww_acquire_fini(ticket);
- spin_unlock(&glob->lru_lock);
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+
+ ttm_bo_move_to_lru_tail_unlocked(bo);
+ dma_resv_unlock(bo->base.resv);
+ }
+
+ if (ticket)
+ ww_acquire_fini(ticket);
}
EXPORT_SYMBOL(ttm_eu_backoff_reservation);
@@ -112,123 +74,90 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
*/
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
- struct list_head *list)
+ struct list_head *list, bool intr,
+ struct list_head *dups)
{
- struct ttm_bo_global *glob;
struct ttm_validate_buffer *entry;
int ret;
if (list_empty(list))
return 0;
- list_for_each_entry(entry, list, head) {
- entry->reserved = false;
- entry->put_count = 0;
- entry->removed = false;
- }
-
- entry = list_first_entry(list, struct ttm_validate_buffer, head);
- glob = entry->bo->glob;
+ if (ticket)
+ ww_acquire_init(ticket, &reservation_ww_class);
- ww_acquire_init(ticket, &reservation_ww_class);
-retry:
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
-
- /* already slowpath reserved? */
- if (entry->reserved)
+ unsigned int num_fences;
+
+ ret = ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
+ if (ret == -EALREADY && dups) {
+ struct ttm_validate_buffer *safe = entry;
+ entry = list_prev_entry(entry, head);
+ list_del(&safe->head);
+ list_add(&safe->head, dups);
continue;
+ }
+ num_fences = max(entry->num_shared, 1u);
+ if (!ret) {
+ ret = dma_resv_reserve_fences(bo->base.resv,
+ num_fences);
+ if (!ret)
+ continue;
+ }
- ret = ttm_bo_reserve_nolru(bo, true, false, true, ticket);
+ /* uh oh, we lost out, drop every reservation and try
+ * to only reserve this buffer, then start over if
+ * this succeeds.
+ */
+ ttm_eu_backoff_reservation_reverse(list, entry);
if (ret == -EDEADLK) {
- /* uh oh, we lost out, drop every reservation and try
- * to only reserve this buffer, then start over if
- * this succeeds.
- */
- spin_lock(&glob->lru_lock);
- ttm_eu_backoff_reservation_locked(list, ticket);
- spin_unlock(&glob->lru_lock);
- ttm_eu_list_ref_sub(list);
- ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
- ticket);
- if (unlikely(ret != 0)) {
- if (ret == -EINTR)
- ret = -ERESTARTSYS;
- goto err_fini;
- }
+ ret = ttm_bo_reserve_slowpath(bo, intr, ticket);
+ }
- entry->reserved = true;
- if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
- ret = -EBUSY;
- goto err;
+ if (!ret)
+ ret = dma_resv_reserve_fences(bo->base.resv,
+ num_fences);
+
+ if (unlikely(ret != 0)) {
+ if (ticket) {
+ ww_acquire_done(ticket);
+ ww_acquire_fini(ticket);
}
- goto retry;
- } else if (ret)
- goto err;
-
- entry->reserved = true;
- if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
- ret = -EBUSY;
- goto err;
+ return ret;
}
+
+ /* move this item to the front of the list,
+ * forces correct iteration of the loop without keeping track
+ */
+ list_del(&entry->head);
+ list_add(&entry->head, list);
}
- ww_acquire_done(ticket);
- spin_lock(&glob->lru_lock);
- ttm_eu_del_from_lru_locked(list);
- spin_unlock(&glob->lru_lock);
- ttm_eu_list_ref_sub(list);
return 0;
-
-err:
- spin_lock(&glob->lru_lock);
- ttm_eu_backoff_reservation_locked(list, ticket);
- spin_unlock(&glob->lru_lock);
- ttm_eu_list_ref_sub(list);
-err_fini:
- ww_acquire_done(ticket);
- ww_acquire_fini(ticket);
- return ret;
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
- struct list_head *list, void *sync_obj)
+ struct list_head *list,
+ struct dma_fence *fence)
{
struct ttm_validate_buffer *entry;
- struct ttm_buffer_object *bo;
- struct ttm_bo_global *glob;
- struct ttm_bo_device *bdev;
- struct ttm_bo_driver *driver;
if (list_empty(list))
return;
- bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
- bdev = bo->bdev;
- driver = bdev->driver;
- glob = bo->glob;
-
- spin_lock(&glob->lru_lock);
- spin_lock(&bdev->fence_lock);
-
list_for_each_entry(entry, list, head) {
- bo = entry->bo;
- entry->old_sync_obj = bo->sync_obj;
- bo->sync_obj = driver->sync_obj_ref(sync_obj);
- ttm_bo_add_to_lru(bo);
- ww_mutex_unlock(&bo->resv->lock);
- entry->reserved = false;
- }
- spin_unlock(&bdev->fence_lock);
- spin_unlock(&glob->lru_lock);
- ww_acquire_fini(ticket);
+ struct ttm_buffer_object *bo = entry->bo;
- list_for_each_entry(entry, list, head) {
- if (entry->old_sync_obj)
- driver->sync_obj_unref(&entry->old_sync_obj);
+ dma_resv_add_fence(bo->base.resv, fence, entry->num_shared ?
+ DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE);
+ ttm_bo_move_to_lru_tail_unlocked(bo);
+ dma_resv_unlock(bo->base.resv);
}
+ if (ticket)
+ ww_acquire_fini(ticket);
}
EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
deleted file mode 100644
index 3daa9a3930b8..000000000000
--- a/drivers/gpu/drm/ttm/ttm_lock.c
+++ /dev/null
@@ -1,310 +0,0 @@
-/**************************************************************************
- *
- * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- */
-
-#include <drm/ttm/ttm_lock.h>
-#include <drm/ttm/ttm_module.h>
-#include <linux/atomic.h>
-#include <linux/errno.h>
-#include <linux/wait.h>
-#include <linux/sched.h>
-#include <linux/module.h>
-
-#define TTM_WRITE_LOCK_PENDING (1 << 0)
-#define TTM_VT_LOCK_PENDING (1 << 1)
-#define TTM_SUSPEND_LOCK_PENDING (1 << 2)
-#define TTM_VT_LOCK (1 << 3)
-#define TTM_SUSPEND_LOCK (1 << 4)
-
-void ttm_lock_init(struct ttm_lock *lock)
-{
- spin_lock_init(&lock->lock);
- init_waitqueue_head(&lock->queue);
- lock->rw = 0;
- lock->flags = 0;
- lock->kill_takers = false;
- lock->signal = SIGKILL;
-}
-EXPORT_SYMBOL(ttm_lock_init);
-
-void ttm_read_unlock(struct ttm_lock *lock)
-{
- spin_lock(&lock->lock);
- if (--lock->rw == 0)
- wake_up_all(&lock->queue);
- spin_unlock(&lock->lock);
-}
-EXPORT_SYMBOL(ttm_read_unlock);
-
-static bool __ttm_read_lock(struct ttm_lock *lock)
-{
- bool locked = false;
-
- spin_lock(&lock->lock);
- if (unlikely(lock->kill_takers)) {
- send_sig(lock->signal, current, 0);
- spin_unlock(&lock->lock);
- return false;
- }
- if (lock->rw >= 0 && lock->flags == 0) {
- ++lock->rw;
- locked = true;
- }
- spin_unlock(&lock->lock);
- return locked;
-}
-
-int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
-{
- int ret = 0;
-
- if (interruptible)
- ret = wait_event_interruptible(lock->queue,
- __ttm_read_lock(lock));
- else
- wait_event(lock->queue, __ttm_read_lock(lock));
- return ret;
-}
-EXPORT_SYMBOL(ttm_read_lock);
-
-static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
-{
- bool block = true;
-
- *locked = false;
-
- spin_lock(&lock->lock);
- if (unlikely(lock->kill_takers)) {
- send_sig(lock->signal, current, 0);
- spin_unlock(&lock->lock);
- return false;
- }
- if (lock->rw >= 0 && lock->flags == 0) {
- ++lock->rw;
- block = false;
- *locked = true;
- } else if (lock->flags == 0) {
- block = false;
- }
- spin_unlock(&lock->lock);
-
- return !block;
-}
-
-int ttm_read_trylock(struct ttm_lock *lock, bool interruptible)
-{
- int ret = 0;
- bool locked;
-
- if (interruptible)
- ret = wait_event_interruptible
- (lock->queue, __ttm_read_trylock(lock, &locked));
- else
- wait_event(lock->queue, __ttm_read_trylock(lock, &locked));
-
- if (unlikely(ret != 0)) {
- BUG_ON(locked);
- return ret;
- }
-
- return (locked) ? 0 : -EBUSY;
-}
-
-void ttm_write_unlock(struct ttm_lock *lock)
-{
- spin_lock(&lock->lock);
- lock->rw = 0;
- wake_up_all(&lock->queue);
- spin_unlock(&lock->lock);
-}
-EXPORT_SYMBOL(ttm_write_unlock);
-
-static bool __ttm_write_lock(struct ttm_lock *lock)
-{
- bool locked = false;
-
- spin_lock(&lock->lock);
- if (unlikely(lock->kill_takers)) {
- send_sig(lock->signal, current, 0);
- spin_unlock(&lock->lock);
- return false;
- }
- if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
- lock->rw = -1;
- lock->flags &= ~TTM_WRITE_LOCK_PENDING;
- locked = true;
- } else {
- lock->flags |= TTM_WRITE_LOCK_PENDING;
- }
- spin_unlock(&lock->lock);
- return locked;
-}
-
-int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
-{
- int ret = 0;
-
- if (interruptible) {
- ret = wait_event_interruptible(lock->queue,
- __ttm_write_lock(lock));
- if (unlikely(ret != 0)) {
- spin_lock(&lock->lock);
- lock->flags &= ~TTM_WRITE_LOCK_PENDING;
- wake_up_all(&lock->queue);
- spin_unlock(&lock->lock);
- }
- } else
- wait_event(lock->queue, __ttm_read_lock(lock));
-
- return ret;
-}
-EXPORT_SYMBOL(ttm_write_lock);
-
-void ttm_write_lock_downgrade(struct ttm_lock *lock)
-{
- spin_lock(&lock->lock);
- lock->rw = 1;
- wake_up_all(&lock->queue);
- spin_unlock(&lock->lock);
-}
-
-static int __ttm_vt_unlock(struct ttm_lock *lock)
-{
- int ret = 0;
-
- spin_lock(&lock->lock);
- if (unlikely(!(lock->flags & TTM_VT_LOCK)))
- ret = -EINVAL;
- lock->flags &= ~TTM_VT_LOCK;
- wake_up_all(&lock->queue);
- spin_unlock(&lock->lock);
-
- return ret;
-}
-
-static void ttm_vt_lock_remove(struct ttm_base_object **p_base)
-{
- struct ttm_base_object *base = *p_base;
- struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
- int ret;
-
- *p_base = NULL;
- ret = __ttm_vt_unlock(lock);
- BUG_ON(ret != 0);
-}
-
-static bool __ttm_vt_lock(struct ttm_lock *lock)
-{
- bool locked = false;
-
- spin_lock(&lock->lock);
- if (lock->rw == 0) {
- lock->flags &= ~TTM_VT_LOCK_PENDING;
- lock->flags |= TTM_VT_LOCK;
- locked = true;
- } else {
- lock->flags |= TTM_VT_LOCK_PENDING;
- }
- spin_unlock(&lock->lock);
- return locked;
-}
-
-int ttm_vt_lock(struct ttm_lock *lock,
- bool interruptible,
- struct ttm_object_file *tfile)
-{
- int ret = 0;
-
- if (interruptible) {
- ret = wait_event_interruptible(lock->queue,
- __ttm_vt_lock(lock));
- if (unlikely(ret != 0)) {
- spin_lock(&lock->lock);
- lock->flags &= ~TTM_VT_LOCK_PENDING;
- wake_up_all(&lock->queue);
- spin_unlock(&lock->lock);
- return ret;
- }
- } else
- wait_event(lock->queue, __ttm_vt_lock(lock));
-
- /*
- * Add a base-object, the destructor of which will
- * make sure the lock is released if the client dies
- * while holding it.
- */
-
- ret = ttm_base_object_init(tfile, &lock->base, false,
- ttm_lock_type, &ttm_vt_lock_remove, NULL);
- if (ret)
- (void)__ttm_vt_unlock(lock);
- else
- lock->vt_holder = tfile;
-
- return ret;
-}
-EXPORT_SYMBOL(ttm_vt_lock);
-
-int ttm_vt_unlock(struct ttm_lock *lock)
-{
- return ttm_ref_object_base_unref(lock->vt_holder,
- lock->base.hash.key, TTM_REF_USAGE);
-}
-EXPORT_SYMBOL(ttm_vt_unlock);
-
-void ttm_suspend_unlock(struct ttm_lock *lock)
-{
- spin_lock(&lock->lock);
- lock->flags &= ~TTM_SUSPEND_LOCK;
- wake_up_all(&lock->queue);
- spin_unlock(&lock->lock);
-}
-EXPORT_SYMBOL(ttm_suspend_unlock);
-
-static bool __ttm_suspend_lock(struct ttm_lock *lock)
-{
- bool locked = false;
-
- spin_lock(&lock->lock);
- if (lock->rw == 0) {
- lock->flags &= ~TTM_SUSPEND_LOCK_PENDING;
- lock->flags |= TTM_SUSPEND_LOCK;
- locked = true;
- } else {
- lock->flags |= TTM_SUSPEND_LOCK_PENDING;
- }
- spin_unlock(&lock->lock);
- return locked;
-}
-
-void ttm_suspend_lock(struct ttm_lock *lock)
-{
- wait_event(lock->queue, __ttm_suspend_lock(lock));
-}
-EXPORT_SYMBOL(ttm_suspend_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
deleted file mode 100644
index dbc2def887cd..000000000000
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ /dev/null
@@ -1,601 +0,0 @@
-/**************************************************************************
- *
- * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#define pr_fmt(fmt) "[TTM] " fmt
-
-#include <drm/ttm/ttm_memory.h>
-#include <drm/ttm/ttm_module.h>
-#include <drm/ttm/ttm_page_alloc.h>
-#include <linux/spinlock.h>
-#include <linux/sched.h>
-#include <linux/wait.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-
-#define TTM_MEMORY_ALLOC_RETRIES 4
-
-struct ttm_mem_zone {
- struct kobject kobj;
- struct ttm_mem_global *glob;
- const char *name;
- uint64_t zone_mem;
- uint64_t emer_mem;
- uint64_t max_mem;
- uint64_t swap_limit;
- uint64_t used_mem;
-};
-
-static struct attribute ttm_mem_sys = {
- .name = "zone_memory",
- .mode = S_IRUGO
-};
-static struct attribute ttm_mem_emer = {
- .name = "emergency_memory",
- .mode = S_IRUGO | S_IWUSR
-};
-static struct attribute ttm_mem_max = {
- .name = "available_memory",
- .mode = S_IRUGO | S_IWUSR
-};
-static struct attribute ttm_mem_swap = {
- .name = "swap_limit",
- .mode = S_IRUGO | S_IWUSR
-};
-static struct attribute ttm_mem_used = {
- .name = "used_memory",
- .mode = S_IRUGO
-};
-
-static void ttm_mem_zone_kobj_release(struct kobject *kobj)
-{
- struct ttm_mem_zone *zone =
- container_of(kobj, struct ttm_mem_zone, kobj);
-
- pr_info("Zone %7s: Used memory at exit: %llu kiB\n",
- zone->name, (unsigned long long)zone->used_mem >> 10);
- kfree(zone);
-}
-
-static ssize_t ttm_mem_zone_show(struct kobject *kobj,
- struct attribute *attr,
- char *buffer)
-{
- struct ttm_mem_zone *zone =
- container_of(kobj, struct ttm_mem_zone, kobj);
- uint64_t val = 0;
-
- spin_lock(&zone->glob->lock);
- if (attr == &ttm_mem_sys)
- val = zone->zone_mem;
- else if (attr == &ttm_mem_emer)
- val = zone->emer_mem;
- else if (attr == &ttm_mem_max)
- val = zone->max_mem;
- else if (attr == &ttm_mem_swap)
- val = zone->swap_limit;
- else if (attr == &ttm_mem_used)
- val = zone->used_mem;
- spin_unlock(&zone->glob->lock);
-
- return snprintf(buffer, PAGE_SIZE, "%llu\n",
- (unsigned long long) val >> 10);
-}
-
-static void ttm_check_swapping(struct ttm_mem_global *glob);
-
-static ssize_t ttm_mem_zone_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t size)
-{
- struct ttm_mem_zone *zone =
- container_of(kobj, struct ttm_mem_zone, kobj);
- int chars;
- unsigned long val;
- uint64_t val64;
-
- chars = sscanf(buffer, "%lu", &val);
- if (chars == 0)
- return size;
-
- val64 = val;
- val64 <<= 10;
-
- spin_lock(&zone->glob->lock);
- if (val64 > zone->zone_mem)
- val64 = zone->zone_mem;
- if (attr == &ttm_mem_emer) {
- zone->emer_mem = val64;
- if (zone->max_mem > val64)
- zone->max_mem = val64;
- } else if (attr == &ttm_mem_max) {
- zone->max_mem = val64;
- if (zone->emer_mem < val64)
- zone->emer_mem = val64;
- } else if (attr == &ttm_mem_swap)
- zone->swap_limit = val64;
- spin_unlock(&zone->glob->lock);
-
- ttm_check_swapping(zone->glob);
-
- return size;
-}
-
-static struct attribute *ttm_mem_zone_attrs[] = {
- &ttm_mem_sys,
- &ttm_mem_emer,
- &ttm_mem_max,
- &ttm_mem_swap,
- &ttm_mem_used,
- NULL
-};
-
-static const struct sysfs_ops ttm_mem_zone_ops = {
- .show = &ttm_mem_zone_show,
- .store = &ttm_mem_zone_store
-};
-
-static struct kobj_type ttm_mem_zone_kobj_type = {
- .release = &ttm_mem_zone_kobj_release,
- .sysfs_ops = &ttm_mem_zone_ops,
- .default_attrs = ttm_mem_zone_attrs,
-};
-
-static void ttm_mem_global_kobj_release(struct kobject *kobj)
-{
- struct ttm_mem_global *glob =
- container_of(kobj, struct ttm_mem_global, kobj);
-
- kfree(glob);
-}
-
-static struct kobj_type ttm_mem_glob_kobj_type = {
- .release = &ttm_mem_global_kobj_release,
-};
-
-static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
- bool from_wq, uint64_t extra)
-{
- unsigned int i;
- struct ttm_mem_zone *zone;
- uint64_t target;
-
- for (i = 0; i < glob->num_zones; ++i) {
- zone = glob->zones[i];
-
- if (from_wq)
- target = zone->swap_limit;
- else if (capable(CAP_SYS_ADMIN))
- target = zone->emer_mem;
- else
- target = zone->max_mem;
-
- target = (extra > target) ? 0ULL : target;
-
- if (zone->used_mem > target)
- return true;
- }
- return false;
-}
-
-/**
- * At this point we only support a single shrink callback.
- * Extend this if needed, perhaps using a linked list of callbacks.
- * Note that this function is reentrant:
- * many threads may try to swap out at any given time.
- */
-
-static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
- uint64_t extra)
-{
- int ret;
- struct ttm_mem_shrink *shrink;
-
- spin_lock(&glob->lock);
- if (glob->shrink == NULL)
- goto out;
-
- while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
- shrink = glob->shrink;
- spin_unlock(&glob->lock);
- ret = shrink->do_shrink(shrink);
- spin_lock(&glob->lock);
- if (unlikely(ret != 0))
- goto out;
- }
-out:
- spin_unlock(&glob->lock);
-}
-
-
-
-static void ttm_shrink_work(struct work_struct *work)
-{
- struct ttm_mem_global *glob =
- container_of(work, struct ttm_mem_global, work);
-
- ttm_shrink(glob, true, 0ULL);
-}
-
-static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
- const struct sysinfo *si)
-{
- struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
- uint64_t mem;
- int ret;
-
- if (unlikely(!zone))
- return -ENOMEM;
-
- mem = si->totalram - si->totalhigh;
- mem *= si->mem_unit;
-
- zone->name = "kernel";
- zone->zone_mem = mem;
- zone->max_mem = mem >> 1;
- zone->emer_mem = (mem >> 1) + (mem >> 2);
- zone->swap_limit = zone->max_mem - (mem >> 3);
- zone->used_mem = 0;
- zone->glob = glob;
- glob->zone_kernel = zone;
- ret = kobject_init_and_add(
- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
- if (unlikely(ret != 0)) {
- kobject_put(&zone->kobj);
- return ret;
- }
- glob->zones[glob->num_zones++] = zone;
- return 0;
-}
-
-#ifdef CONFIG_HIGHMEM
-static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
- const struct sysinfo *si)
-{
- struct ttm_mem_zone *zone;
- uint64_t mem;
- int ret;
-
- if (si->totalhigh == 0)
- return 0;
-
- zone = kzalloc(sizeof(*zone), GFP_KERNEL);
- if (unlikely(!zone))
- return -ENOMEM;
-
- mem = si->totalram;
- mem *= si->mem_unit;
-
- zone->name = "highmem";
- zone->zone_mem = mem;
- zone->max_mem = mem >> 1;
- zone->emer_mem = (mem >> 1) + (mem >> 2);
- zone->swap_limit = zone->max_mem - (mem >> 3);
- zone->used_mem = 0;
- zone->glob = glob;
- glob->zone_highmem = zone;
- ret = kobject_init_and_add(
- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
- if (unlikely(ret != 0)) {
- kobject_put(&zone->kobj);
- return ret;
- }
- glob->zones[glob->num_zones++] = zone;
- return 0;
-}
-#else
-static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
- const struct sysinfo *si)
-{
- struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
- uint64_t mem;
- int ret;
-
- if (unlikely(!zone))
- return -ENOMEM;
-
- mem = si->totalram;
- mem *= si->mem_unit;
-
- /**
- * No special dma32 zone needed.
- */
-
- if (mem <= ((uint64_t) 1ULL << 32)) {
- kfree(zone);
- return 0;
- }
-
- /*
- * Limit max dma32 memory to 4GB for now
- * until we can figure out how big this
- * zone really is.
- */
-
- mem = ((uint64_t) 1ULL << 32);
- zone->name = "dma32";
- zone->zone_mem = mem;
- zone->max_mem = mem >> 1;
- zone->emer_mem = (mem >> 1) + (mem >> 2);
- zone->swap_limit = zone->max_mem - (mem >> 3);
- zone->used_mem = 0;
- zone->glob = glob;
- glob->zone_dma32 = zone;
- ret = kobject_init_and_add(
- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
- if (unlikely(ret != 0)) {
- kobject_put(&zone->kobj);
- return ret;
- }
- glob->zones[glob->num_zones++] = zone;
- return 0;
-}
-#endif
-
-int ttm_mem_global_init(struct ttm_mem_global *glob)
-{
- struct sysinfo si;
- int ret;
- int i;
- struct ttm_mem_zone *zone;
-
- spin_lock_init(&glob->lock);
- glob->swap_queue = create_singlethread_workqueue("ttm_swap");
- INIT_WORK(&glob->work, ttm_shrink_work);
- ret = kobject_init_and_add(
- &glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
- if (unlikely(ret != 0)) {
- kobject_put(&glob->kobj);
- return ret;
- }
-
- si_meminfo(&si);
-
- ret = ttm_mem_init_kernel_zone(glob, &si);
- if (unlikely(ret != 0))
- goto out_no_zone;
-#ifdef CONFIG_HIGHMEM
- ret = ttm_mem_init_highmem_zone(glob, &si);
- if (unlikely(ret != 0))
- goto out_no_zone;
-#else
- ret = ttm_mem_init_dma32_zone(glob, &si);
- if (unlikely(ret != 0))
- goto out_no_zone;
-#endif
- for (i = 0; i < glob->num_zones; ++i) {
- zone = glob->zones[i];
- pr_info("Zone %7s: Available graphics memory: %llu kiB\n",
- zone->name, (unsigned long long)zone->max_mem >> 10);
- }
- ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
- ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
- return 0;
-out_no_zone:
- ttm_mem_global_release(glob);
- return ret;
-}
-EXPORT_SYMBOL(ttm_mem_global_init);
-
-void ttm_mem_global_release(struct ttm_mem_global *glob)
-{
- unsigned int i;
- struct ttm_mem_zone *zone;
-
- /* let the page allocator first stop the shrink work. */
- ttm_page_alloc_fini();
- ttm_dma_page_alloc_fini();
-
- flush_workqueue(glob->swap_queue);
- destroy_workqueue(glob->swap_queue);
- glob->swap_queue = NULL;
- for (i = 0; i < glob->num_zones; ++i) {
- zone = glob->zones[i];
- kobject_del(&zone->kobj);
- kobject_put(&zone->kobj);
- }
- kobject_del(&glob->kobj);
- kobject_put(&glob->kobj);
-}
-EXPORT_SYMBOL(ttm_mem_global_release);
-
-static void ttm_check_swapping(struct ttm_mem_global *glob)
-{
- bool needs_swapping = false;
- unsigned int i;
- struct ttm_mem_zone *zone;
-
- spin_lock(&glob->lock);
- for (i = 0; i < glob->num_zones; ++i) {
- zone = glob->zones[i];
- if (zone->used_mem > zone->swap_limit) {
- needs_swapping = true;
- break;
- }
- }
-
- spin_unlock(&glob->lock);
-
- if (unlikely(needs_swapping))
- (void)queue_work(glob->swap_queue, &glob->work);
-
-}
-
-static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
- struct ttm_mem_zone *single_zone,
- uint64_t amount)
-{
- unsigned int i;
- struct ttm_mem_zone *zone;
-
- spin_lock(&glob->lock);
- for (i = 0; i < glob->num_zones; ++i) {
- zone = glob->zones[i];
- if (single_zone && zone != single_zone)
- continue;
- zone->used_mem -= amount;
- }
- spin_unlock(&glob->lock);
-}
-
-void ttm_mem_global_free(struct ttm_mem_global *glob,
- uint64_t amount)
-{
- return ttm_mem_global_free_zone(glob, NULL, amount);
-}
-EXPORT_SYMBOL(ttm_mem_global_free);
-
-static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
- struct ttm_mem_zone *single_zone,
- uint64_t amount, bool reserve)
-{
- uint64_t limit;
- int ret = -ENOMEM;
- unsigned int i;
- struct ttm_mem_zone *zone;
-
- spin_lock(&glob->lock);
- for (i = 0; i < glob->num_zones; ++i) {
- zone = glob->zones[i];
- if (single_zone && zone != single_zone)
- continue;
-
- limit = (capable(CAP_SYS_ADMIN)) ?
- zone->emer_mem : zone->max_mem;
-
- if (zone->used_mem > limit)
- goto out_unlock;
- }
-
- if (reserve) {
- for (i = 0; i < glob->num_zones; ++i) {
- zone = glob->zones[i];
- if (single_zone && zone != single_zone)
- continue;
- zone->used_mem += amount;
- }
- }
-
- ret = 0;
-out_unlock:
- spin_unlock(&glob->lock);
- ttm_check_swapping(glob);
-
- return ret;
-}
-
-
-static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
- struct ttm_mem_zone *single_zone,
- uint64_t memory,
- bool no_wait, bool interruptible)
-{
- int count = TTM_MEMORY_ALLOC_RETRIES;
-
- while (unlikely(ttm_mem_global_reserve(glob,
- single_zone,
- memory, true)
- != 0)) {
- if (no_wait)
- return -ENOMEM;
- if (unlikely(count-- == 0))
- return -ENOMEM;
- ttm_shrink(glob, false, memory + (memory >> 2) + 16);
- }
-
- return 0;
-}
-
-int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
- bool no_wait, bool interruptible)
-{
- /**
- * Normal allocations of kernel memory are registered in
- * all zones.
- */
-
- return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
- interruptible);
-}
-EXPORT_SYMBOL(ttm_mem_global_alloc);
-
-int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
- struct page *page,
- bool no_wait, bool interruptible)
-{
-
- struct ttm_mem_zone *zone = NULL;
-
- /**
- * Page allocations may be registed in a single zone
- * only if highmem or !dma32.
- */
-
-#ifdef CONFIG_HIGHMEM
- if (PageHighMem(page) && glob->zone_highmem != NULL)
- zone = glob->zone_highmem;
-#else
- if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
- zone = glob->zone_kernel;
-#endif
- return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait,
- interruptible);
-}
-
-void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page)
-{
- struct ttm_mem_zone *zone = NULL;
-
-#ifdef CONFIG_HIGHMEM
- if (PageHighMem(page) && glob->zone_highmem != NULL)
- zone = glob->zone_highmem;
-#else
- if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
- zone = glob->zone_kernel;
-#endif
- ttm_mem_global_free_zone(glob, zone, PAGE_SIZE);
-}
-
-
-size_t ttm_round_pot(size_t size)
-{
- if ((size & (size - 1)) == 0)
- return size;
- else if (size > PAGE_SIZE)
- return PAGE_ALIGN(size);
- else {
- size_t tmp_size = 4;
-
- while (tmp_size < size)
- tmp_size <<= 1;
-
- return tmp_size;
- }
- return 0;
-}
-EXPORT_SYMBOL(ttm_round_pot);
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
index d7f92fe9d904..aa137ead5cc5 100644
--- a/drivers/gpu/drm/ttm/ttm_module.c
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
@@ -30,73 +31,62 @@
*/
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/pgtable.h>
#include <linux/sched.h>
-#include <drm/ttm/ttm_module.h>
+#include <linux/debugfs.h>
#include <drm/drm_sysfs.h>
+#include <drm/ttm/ttm_caching.h>
-static DECLARE_WAIT_QUEUE_HEAD(exit_q);
-atomic_t device_released;
+#include "ttm_module.h"
-static struct device_type ttm_drm_class_type = {
- .name = "ttm",
- /**
- * Add pm ops here.
- */
-};
-
-static void ttm_drm_class_device_release(struct device *dev)
-{
- atomic_set(&device_released, 1);
- wake_up_all(&exit_q);
-}
-
-static struct device ttm_drm_class_device = {
- .type = &ttm_drm_class_type,
- .release = &ttm_drm_class_device_release
-};
-
-struct kobject *ttm_get_kobj(void)
-{
- struct kobject *kobj = &ttm_drm_class_device.kobj;
- BUG_ON(kobj == NULL);
- return kobj;
-}
-
-static int __init ttm_init(void)
-{
- int ret;
-
- ret = dev_set_name(&ttm_drm_class_device, "ttm");
- if (unlikely(ret != 0))
- return ret;
-
- atomic_set(&device_released, 0);
- ret = drm_class_device_register(&ttm_drm_class_device);
- if (unlikely(ret != 0))
- goto out_no_dev_reg;
-
- return 0;
-out_no_dev_reg:
- atomic_set(&device_released, 1);
- wake_up_all(&exit_q);
- return ret;
-}
+/**
+ * DOC: TTM
+ *
+ * TTM is a memory manager for accelerator devices with dedicated memory.
+ *
+ * The basic idea is that resources are grouped together in buffer objects of
+ * certain size and TTM handles lifetime, movement and CPU mappings of those
+ * objects.
+ *
+ * TODO: Add more design background and information here.
+ */
-static void __exit ttm_exit(void)
+/**
+ * ttm_prot_from_caching - Modify the page protection according to the
+ * ttm cacing mode
+ * @caching: The ttm caching mode
+ * @tmp: The original page protection
+ *
+ * Return: The modified page protection
+ */
+pgprot_t ttm_prot_from_caching(enum ttm_caching caching, pgprot_t tmp)
{
- drm_class_device_unregister(&ttm_drm_class_device);
+ /* Cached mappings need no adjustment */
+ if (caching == ttm_cached)
+ return tmp;
- /**
- * Refuse to unload until the TTM device is released.
- * Not sure this is 100% needed.
- */
-
- wait_event(exit_q, atomic_read(&device_released) == 1);
+#if defined(__i386__) || defined(__x86_64__)
+ if (caching == ttm_write_combined)
+ tmp = pgprot_writecombine(tmp);
+#ifndef CONFIG_UML
+ else if (boot_cpu_data.x86 > 3)
+ tmp = pgprot_noncached(tmp);
+#endif /* CONFIG_UML */
+#endif /* __i386__ || __x86_64__ */
+#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
+ defined(__powerpc__) || defined(__mips__) || defined(__loongarch__) || \
+ defined(__riscv)
+ if (caching == ttm_write_combined)
+ tmp = pgprot_writecombine(tmp);
+ else
+ tmp = pgprot_noncached(tmp);
+#endif
+#if defined(__sparc__)
+ tmp = pgprot_noncached(tmp);
+#endif
+ return tmp;
}
-module_init(ttm_init);
-module_exit(ttm_exit);
-
MODULE_AUTHOR("Thomas Hellstrom, Jerome Glisse");
MODULE_DESCRIPTION("TTM memory manager subsystem (for DRM device)");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/ttm/ttm_module.h b/drivers/gpu/drm/ttm/ttm_module.h
new file mode 100644
index 000000000000..767fe22aed48
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_module.h
@@ -0,0 +1,43 @@
+/**************************************************************************
+ *
+ * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#ifndef _TTM_MODULE_H_
+#define _TTM_MODULE_H_
+
+#define TTM_PFX "[TTM] "
+
+struct dentry;
+struct ttm_device;
+
+extern struct dentry *ttm_debugfs_root;
+
+void ttm_sys_man_init(struct ttm_device *bdev);
+
+#endif /* _TTM_MODULE_H_ */
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
deleted file mode 100644
index 58a5f3261c0b..000000000000
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ /dev/null
@@ -1,454 +0,0 @@
-/**************************************************************************
- *
- * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- */
-/** @file ttm_ref_object.c
- *
- * Base- and reference object implementation for the various
- * ttm objects. Implements reference counting, minimal security checks
- * and release on file close.
- */
-
-/**
- * struct ttm_object_file
- *
- * @tdev: Pointer to the ttm_object_device.
- *
- * @lock: Lock that protects the ref_list list and the
- * ref_hash hash tables.
- *
- * @ref_list: List of ttm_ref_objects to be destroyed at
- * file release.
- *
- * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
- * for fast lookup of ref objects given a base object.
- */
-
-#define pr_fmt(fmt) "[TTM] " fmt
-
-#include <drm/ttm/ttm_object.h>
-#include <drm/ttm/ttm_module.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/atomic.h>
-
-struct ttm_object_file {
- struct ttm_object_device *tdev;
- rwlock_t lock;
- struct list_head ref_list;
- struct drm_open_hash ref_hash[TTM_REF_NUM];
- struct kref refcount;
-};
-
-/**
- * struct ttm_object_device
- *
- * @object_lock: lock that protects the object_hash hash table.
- *
- * @object_hash: hash table for fast lookup of object global names.
- *
- * @object_count: Per device object count.
- *
- * This is the per-device data structure needed for ttm object management.
- */
-
-struct ttm_object_device {
- spinlock_t object_lock;
- struct drm_open_hash object_hash;
- atomic_t object_count;
- struct ttm_mem_global *mem_glob;
-};
-
-/**
- * struct ttm_ref_object
- *
- * @hash: Hash entry for the per-file object reference hash.
- *
- * @head: List entry for the per-file list of ref-objects.
- *
- * @kref: Ref count.
- *
- * @obj: Base object this ref object is referencing.
- *
- * @ref_type: Type of ref object.
- *
- * This is similar to an idr object, but it also has a hash table entry
- * that allows lookup with a pointer to the referenced object as a key. In
- * that way, one can easily detect whether a base object is referenced by
- * a particular ttm_object_file. It also carries a ref count to avoid creating
- * multiple ref objects if a ttm_object_file references the same base
- * object more than once.
- */
-
-struct ttm_ref_object {
- struct drm_hash_item hash;
- struct list_head head;
- struct kref kref;
- enum ttm_ref_type ref_type;
- struct ttm_base_object *obj;
- struct ttm_object_file *tfile;
-};
-
-static inline struct ttm_object_file *
-ttm_object_file_ref(struct ttm_object_file *tfile)
-{
- kref_get(&tfile->refcount);
- return tfile;
-}
-
-static void ttm_object_file_destroy(struct kref *kref)
-{
- struct ttm_object_file *tfile =
- container_of(kref, struct ttm_object_file, refcount);
-
- kfree(tfile);
-}
-
-
-static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
-{
- struct ttm_object_file *tfile = *p_tfile;
-
- *p_tfile = NULL;
- kref_put(&tfile->refcount, ttm_object_file_destroy);
-}
-
-
-int ttm_base_object_init(struct ttm_object_file *tfile,
- struct ttm_base_object *base,
- bool shareable,
- enum ttm_object_type object_type,
- void (*refcount_release) (struct ttm_base_object **),
- void (*ref_obj_release) (struct ttm_base_object *,
- enum ttm_ref_type ref_type))
-{
- struct ttm_object_device *tdev = tfile->tdev;
- int ret;
-
- base->shareable = shareable;
- base->tfile = ttm_object_file_ref(tfile);
- base->refcount_release = refcount_release;
- base->ref_obj_release = ref_obj_release;
- base->object_type = object_type;
- kref_init(&base->refcount);
- spin_lock(&tdev->object_lock);
- ret = drm_ht_just_insert_please_rcu(&tdev->object_hash,
- &base->hash,
- (unsigned long)base, 31, 0, 0);
- spin_unlock(&tdev->object_lock);
- if (unlikely(ret != 0))
- goto out_err0;
-
- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
- if (unlikely(ret != 0))
- goto out_err1;
-
- ttm_base_object_unref(&base);
-
- return 0;
-out_err1:
- spin_lock(&tdev->object_lock);
- (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
- spin_unlock(&tdev->object_lock);
-out_err0:
- return ret;
-}
-EXPORT_SYMBOL(ttm_base_object_init);
-
-static void ttm_release_base(struct kref *kref)
-{
- struct ttm_base_object *base =
- container_of(kref, struct ttm_base_object, refcount);
- struct ttm_object_device *tdev = base->tfile->tdev;
-
- spin_lock(&tdev->object_lock);
- (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
- spin_unlock(&tdev->object_lock);
-
- /*
- * Note: We don't use synchronize_rcu() here because it's far
- * too slow. It's up to the user to free the object using
- * call_rcu() or ttm_base_object_kfree().
- */
-
- if (base->refcount_release) {
- ttm_object_file_unref(&base->tfile);
- base->refcount_release(&base);
- }
-}
-
-void ttm_base_object_unref(struct ttm_base_object **p_base)
-{
- struct ttm_base_object *base = *p_base;
-
- *p_base = NULL;
-
- kref_put(&base->refcount, ttm_release_base);
-}
-EXPORT_SYMBOL(ttm_base_object_unref);
-
-struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
- uint32_t key)
-{
- struct ttm_object_device *tdev = tfile->tdev;
- struct ttm_base_object *base;
- struct drm_hash_item *hash;
- int ret;
-
- rcu_read_lock();
- ret = drm_ht_find_item_rcu(&tdev->object_hash, key, &hash);
-
- if (likely(ret == 0)) {
- base = drm_hash_entry(hash, struct ttm_base_object, hash);
- ret = kref_get_unless_zero(&base->refcount) ? 0 : -EINVAL;
- }
- rcu_read_unlock();
-
- if (unlikely(ret != 0))
- return NULL;
-
- if (tfile != base->tfile && !base->shareable) {
- pr_err("Attempted access of non-shareable object\n");
- ttm_base_object_unref(&base);
- return NULL;
- }
-
- return base;
-}
-EXPORT_SYMBOL(ttm_base_object_lookup);
-
-int ttm_ref_object_add(struct ttm_object_file *tfile,
- struct ttm_base_object *base,
- enum ttm_ref_type ref_type, bool *existed)
-{
- struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
- struct ttm_ref_object *ref;
- struct drm_hash_item *hash;
- struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
- int ret = -EINVAL;
-
- if (existed != NULL)
- *existed = true;
-
- while (ret == -EINVAL) {
- read_lock(&tfile->lock);
- ret = drm_ht_find_item(ht, base->hash.key, &hash);
-
- if (ret == 0) {
- ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
- kref_get(&ref->kref);
- read_unlock(&tfile->lock);
- break;
- }
-
- read_unlock(&tfile->lock);
- ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
- false, false);
- if (unlikely(ret != 0))
- return ret;
- ref = kmalloc(sizeof(*ref), GFP_KERNEL);
- if (unlikely(ref == NULL)) {
- ttm_mem_global_free(mem_glob, sizeof(*ref));
- return -ENOMEM;
- }
-
- ref->hash.key = base->hash.key;
- ref->obj = base;
- ref->tfile = tfile;
- ref->ref_type = ref_type;
- kref_init(&ref->kref);
-
- write_lock(&tfile->lock);
- ret = drm_ht_insert_item(ht, &ref->hash);
-
- if (likely(ret == 0)) {
- list_add_tail(&ref->head, &tfile->ref_list);
- kref_get(&base->refcount);
- write_unlock(&tfile->lock);
- if (existed != NULL)
- *existed = false;
- break;
- }
-
- write_unlock(&tfile->lock);
- BUG_ON(ret != -EINVAL);
-
- ttm_mem_global_free(mem_glob, sizeof(*ref));
- kfree(ref);
- }
-
- return ret;
-}
-EXPORT_SYMBOL(ttm_ref_object_add);
-
-static void ttm_ref_object_release(struct kref *kref)
-{
- struct ttm_ref_object *ref =
- container_of(kref, struct ttm_ref_object, kref);
- struct ttm_base_object *base = ref->obj;
- struct ttm_object_file *tfile = ref->tfile;
- struct drm_open_hash *ht;
- struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
-
- ht = &tfile->ref_hash[ref->ref_type];
- (void)drm_ht_remove_item(ht, &ref->hash);
- list_del(&ref->head);
- write_unlock(&tfile->lock);
-
- if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
- base->ref_obj_release(base, ref->ref_type);
-
- ttm_base_object_unref(&ref->obj);
- ttm_mem_global_free(mem_glob, sizeof(*ref));
- kfree(ref);
- write_lock(&tfile->lock);
-}
-
-int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
- unsigned long key, enum ttm_ref_type ref_type)
-{
- struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
- struct ttm_ref_object *ref;
- struct drm_hash_item *hash;
- int ret;
-
- write_lock(&tfile->lock);
- ret = drm_ht_find_item(ht, key, &hash);
- if (unlikely(ret != 0)) {
- write_unlock(&tfile->lock);
- return -EINVAL;
- }
- ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
- kref_put(&ref->kref, ttm_ref_object_release);
- write_unlock(&tfile->lock);
- return 0;
-}
-EXPORT_SYMBOL(ttm_ref_object_base_unref);
-
-void ttm_object_file_release(struct ttm_object_file **p_tfile)
-{
- struct ttm_ref_object *ref;
- struct list_head *list;
- unsigned int i;
- struct ttm_object_file *tfile = *p_tfile;
-
- *p_tfile = NULL;
- write_lock(&tfile->lock);
-
- /*
- * Since we release the lock within the loop, we have to
- * restart it from the beginning each time.
- */
-
- while (!list_empty(&tfile->ref_list)) {
- list = tfile->ref_list.next;
- ref = list_entry(list, struct ttm_ref_object, head);
- ttm_ref_object_release(&ref->kref);
- }
-
- for (i = 0; i < TTM_REF_NUM; ++i)
- drm_ht_remove(&tfile->ref_hash[i]);
-
- write_unlock(&tfile->lock);
- ttm_object_file_unref(&tfile);
-}
-EXPORT_SYMBOL(ttm_object_file_release);
-
-struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
- unsigned int hash_order)
-{
- struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
- unsigned int i;
- unsigned int j = 0;
- int ret;
-
- if (unlikely(tfile == NULL))
- return NULL;
-
- rwlock_init(&tfile->lock);
- tfile->tdev = tdev;
- kref_init(&tfile->refcount);
- INIT_LIST_HEAD(&tfile->ref_list);
-
- for (i = 0; i < TTM_REF_NUM; ++i) {
- ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
- if (ret) {
- j = i;
- goto out_err;
- }
- }
-
- return tfile;
-out_err:
- for (i = 0; i < j; ++i)
- drm_ht_remove(&tfile->ref_hash[i]);
-
- kfree(tfile);
-
- return NULL;
-}
-EXPORT_SYMBOL(ttm_object_file_init);
-
-struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
- *mem_glob,
- unsigned int hash_order)
-{
- struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
- int ret;
-
- if (unlikely(tdev == NULL))
- return NULL;
-
- tdev->mem_glob = mem_glob;
- spin_lock_init(&tdev->object_lock);
- atomic_set(&tdev->object_count, 0);
- ret = drm_ht_create(&tdev->object_hash, hash_order);
-
- if (likely(ret == 0))
- return tdev;
-
- kfree(tdev);
- return NULL;
-}
-EXPORT_SYMBOL(ttm_object_device_init);
-
-void ttm_object_device_release(struct ttm_object_device **p_tdev)
-{
- struct ttm_object_device *tdev = *p_tdev;
-
- *p_tdev = NULL;
-
- spin_lock(&tdev->object_lock);
- drm_ht_remove(&tdev->object_hash);
- spin_unlock(&tdev->object_lock);
-
- kfree(tdev);
-}
-EXPORT_SYMBOL(ttm_object_device_release);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
deleted file mode 100644
index bd2a3b40cd12..000000000000
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ /dev/null
@@ -1,919 +0,0 @@
-/*
- * Copyright (c) Red Hat Inc.
-
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors: Dave Airlie <airlied@redhat.com>
- * Jerome Glisse <jglisse@redhat.com>
- * Pauli Nieminen <suokkos@gmail.com>
- */
-
-/* simple list based uncached page pool
- * - Pool collects resently freed pages for reuse
- * - Use page->lru to keep a free list
- * - doesn't track currently in use pages
- */
-
-#define pr_fmt(fmt) "[TTM] " fmt
-
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/highmem.h>
-#include <linux/mm_types.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/seq_file.h> /* for seq_printf */
-#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-
-#include <linux/atomic.h>
-
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_page_alloc.h>
-
-#ifdef TTM_HAS_AGP
-#include <asm/agp.h>
-#endif
-
-#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
-#define SMALL_ALLOCATION 16
-#define FREE_ALL_PAGES (~0U)
-/* times are in msecs */
-#define PAGE_FREE_INTERVAL 1000
-
-/**
- * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
- *
- * @lock: Protects the shared pool from concurrnet access. Must be used with
- * irqsave/irqrestore variants because pool allocator maybe called from
- * delayed work.
- * @fill_lock: Prevent concurrent calls to fill.
- * @list: Pool of free uc/wc pages for fast reuse.
- * @gfp_flags: Flags to pass for alloc_page.
- * @npages: Number of pages in pool.
- */
-struct ttm_page_pool {
- spinlock_t lock;
- bool fill_lock;
- struct list_head list;
- gfp_t gfp_flags;
- unsigned npages;
- char *name;
- unsigned long nfrees;
- unsigned long nrefills;
-};
-
-/**
- * Limits for the pool. They are handled without locks because only place where
- * they may change is in sysfs store. They won't have immediate effect anyway
- * so forcing serialization to access them is pointless.
- */
-
-struct ttm_pool_opts {
- unsigned alloc_size;
- unsigned max_size;
- unsigned small;
-};
-
-#define NUM_POOLS 4
-
-/**
- * struct ttm_pool_manager - Holds memory pools for fst allocation
- *
- * Manager is read only object for pool code so it doesn't need locking.
- *
- * @free_interval: minimum number of jiffies between freeing pages from pool.
- * @page_alloc_inited: reference counting for pool allocation.
- * @work: Work that is used to shrink the pool. Work is only run when there is
- * some pages to free.
- * @small_allocation: Limit in number of pages what is small allocation.
- *
- * @pools: All pool objects in use.
- **/
-struct ttm_pool_manager {
- struct kobject kobj;
- struct shrinker mm_shrink;
- struct ttm_pool_opts options;
-
- union {
- struct ttm_page_pool pools[NUM_POOLS];
- struct {
- struct ttm_page_pool wc_pool;
- struct ttm_page_pool uc_pool;
- struct ttm_page_pool wc_pool_dma32;
- struct ttm_page_pool uc_pool_dma32;
- } ;
- };
-};
-
-static struct attribute ttm_page_pool_max = {
- .name = "pool_max_size",
- .mode = S_IRUGO | S_IWUSR
-};
-static struct attribute ttm_page_pool_small = {
- .name = "pool_small_allocation",
- .mode = S_IRUGO | S_IWUSR
-};
-static struct attribute ttm_page_pool_alloc_size = {
- .name = "pool_allocation_size",
- .mode = S_IRUGO | S_IWUSR
-};
-
-static struct attribute *ttm_pool_attrs[] = {
- &ttm_page_pool_max,
- &ttm_page_pool_small,
- &ttm_page_pool_alloc_size,
- NULL
-};
-
-static void ttm_pool_kobj_release(struct kobject *kobj)
-{
- struct ttm_pool_manager *m =
- container_of(kobj, struct ttm_pool_manager, kobj);
- kfree(m);
-}
-
-static ssize_t ttm_pool_store(struct kobject *kobj,
- struct attribute *attr, const char *buffer, size_t size)
-{
- struct ttm_pool_manager *m =
- container_of(kobj, struct ttm_pool_manager, kobj);
- int chars;
- unsigned val;
- chars = sscanf(buffer, "%u", &val);
- if (chars == 0)
- return size;
-
- /* Convert kb to number of pages */
- val = val / (PAGE_SIZE >> 10);
-
- if (attr == &ttm_page_pool_max)
- m->options.max_size = val;
- else if (attr == &ttm_page_pool_small)
- m->options.small = val;
- else if (attr == &ttm_page_pool_alloc_size) {
- if (val > NUM_PAGES_TO_ALLOC*8) {
- pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
- NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
- NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
- return size;
- } else if (val > NUM_PAGES_TO_ALLOC) {
- pr_warn("Setting allocation size to larger than %lu is not recommended\n",
- NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
- }
- m->options.alloc_size = val;
- }
-
- return size;
-}
-
-static ssize_t ttm_pool_show(struct kobject *kobj,
- struct attribute *attr, char *buffer)
-{
- struct ttm_pool_manager *m =
- container_of(kobj, struct ttm_pool_manager, kobj);
- unsigned val = 0;
-
- if (attr == &ttm_page_pool_max)
- val = m->options.max_size;
- else if (attr == &ttm_page_pool_small)
- val = m->options.small;
- else if (attr == &ttm_page_pool_alloc_size)
- val = m->options.alloc_size;
-
- val = val * (PAGE_SIZE >> 10);
-
- return snprintf(buffer, PAGE_SIZE, "%u\n", val);
-}
-
-static const struct sysfs_ops ttm_pool_sysfs_ops = {
- .show = &ttm_pool_show,
- .store = &ttm_pool_store,
-};
-
-static struct kobj_type ttm_pool_kobj_type = {
- .release = &ttm_pool_kobj_release,
- .sysfs_ops = &ttm_pool_sysfs_ops,
- .default_attrs = ttm_pool_attrs,
-};
-
-static struct ttm_pool_manager *_manager;
-
-#ifndef CONFIG_X86
-static int set_pages_array_wb(struct page **pages, int addrinarray)
-{
-#ifdef TTM_HAS_AGP
- int i;
-
- for (i = 0; i < addrinarray; i++)
- unmap_page_from_agp(pages[i]);
-#endif
- return 0;
-}
-
-static int set_pages_array_wc(struct page **pages, int addrinarray)
-{
-#ifdef TTM_HAS_AGP
- int i;
-
- for (i = 0; i < addrinarray; i++)
- map_page_into_agp(pages[i]);
-#endif
- return 0;
-}
-
-static int set_pages_array_uc(struct page **pages, int addrinarray)
-{
-#ifdef TTM_HAS_AGP
- int i;
-
- for (i = 0; i < addrinarray; i++)
- map_page_into_agp(pages[i]);
-#endif
- return 0;
-}
-#endif
-
-/**
- * Select the right pool or requested caching state and ttm flags. */
-static struct ttm_page_pool *ttm_get_pool(int flags,
- enum ttm_caching_state cstate)
-{
- int pool_index;
-
- if (cstate == tt_cached)
- return NULL;
-
- if (cstate == tt_wc)
- pool_index = 0x0;
- else
- pool_index = 0x1;
-
- if (flags & TTM_PAGE_FLAG_DMA32)
- pool_index |= 0x2;
-
- return &_manager->pools[pool_index];
-}
-
-/* set memory back to wb and free the pages. */
-static void ttm_pages_put(struct page *pages[], unsigned npages)
-{
- unsigned i;
- if (set_pages_array_wb(pages, npages))
- pr_err("Failed to set %d pages to wb!\n", npages);
- for (i = 0; i < npages; ++i)
- __free_page(pages[i]);
-}
-
-static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
- unsigned freed_pages)
-{
- pool->npages -= freed_pages;
- pool->nfrees += freed_pages;
-}
-
-/**
- * Free pages from pool.
- *
- * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
- * number of pages in one go.
- *
- * @pool: to free the pages from
- * @free_all: If set to true will free all pages in pool
- **/
-static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
-{
- unsigned long irq_flags;
- struct page *p;
- struct page **pages_to_free;
- unsigned freed_pages = 0,
- npages_to_free = nr_free;
-
- if (NUM_PAGES_TO_ALLOC < nr_free)
- npages_to_free = NUM_PAGES_TO_ALLOC;
-
- pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
- GFP_KERNEL);
- if (!pages_to_free) {
- pr_err("Failed to allocate memory for pool free operation\n");
- return 0;
- }
-
-restart:
- spin_lock_irqsave(&pool->lock, irq_flags);
-
- list_for_each_entry_reverse(p, &pool->list, lru) {
- if (freed_pages >= npages_to_free)
- break;
-
- pages_to_free[freed_pages++] = p;
- /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
- if (freed_pages >= NUM_PAGES_TO_ALLOC) {
- /* remove range of pages from the pool */
- __list_del(p->lru.prev, &pool->list);
-
- ttm_pool_update_free_locked(pool, freed_pages);
- /**
- * Because changing page caching is costly
- * we unlock the pool to prevent stalling.
- */
- spin_unlock_irqrestore(&pool->lock, irq_flags);
-
- ttm_pages_put(pages_to_free, freed_pages);
- if (likely(nr_free != FREE_ALL_PAGES))
- nr_free -= freed_pages;
-
- if (NUM_PAGES_TO_ALLOC >= nr_free)
- npages_to_free = nr_free;
- else
- npages_to_free = NUM_PAGES_TO_ALLOC;
-
- freed_pages = 0;
-
- /* free all so restart the processing */
- if (nr_free)
- goto restart;
-
- /* Not allowed to fall through or break because
- * following context is inside spinlock while we are
- * outside here.
- */
- goto out;
-
- }
- }
-
- /* remove range of pages from the pool */
- if (freed_pages) {
- __list_del(&p->lru, &pool->list);
-
- ttm_pool_update_free_locked(pool, freed_pages);
- nr_free -= freed_pages;
- }
-
- spin_unlock_irqrestore(&pool->lock, irq_flags);
-
- if (freed_pages)
- ttm_pages_put(pages_to_free, freed_pages);
-out:
- kfree(pages_to_free);
- return nr_free;
-}
-
-/* Get good estimation how many pages are free in pools */
-static int ttm_pool_get_num_unused_pages(void)
-{
- unsigned i;
- int total = 0;
- for (i = 0; i < NUM_POOLS; ++i)
- total += _manager->pools[i].npages;
-
- return total;
-}
-
-/**
- * Callback for mm to request pool to reduce number of page held.
- */
-static int ttm_pool_mm_shrink(struct shrinker *shrink,
- struct shrink_control *sc)
-{
- static atomic_t start_pool = ATOMIC_INIT(0);
- unsigned i;
- unsigned pool_offset = atomic_add_return(1, &start_pool);
- struct ttm_page_pool *pool;
- int shrink_pages = sc->nr_to_scan;
-
- pool_offset = pool_offset % NUM_POOLS;
- /* select start pool in round robin fashion */
- for (i = 0; i < NUM_POOLS; ++i) {
- unsigned nr_free = shrink_pages;
- if (shrink_pages == 0)
- break;
- pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
- shrink_pages = ttm_page_pool_free(pool, nr_free);
- }
- /* return estimated number of unused pages in pool */
- return ttm_pool_get_num_unused_pages();
-}
-
-static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
-{
- manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
- manager->mm_shrink.seeks = 1;
- register_shrinker(&manager->mm_shrink);
-}
-
-static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
-{
- unregister_shrinker(&manager->mm_shrink);
-}
-
-static int ttm_set_pages_caching(struct page **pages,
- enum ttm_caching_state cstate, unsigned cpages)
-{
- int r = 0;
- /* Set page caching */
- switch (cstate) {
- case tt_uncached:
- r = set_pages_array_uc(pages, cpages);
- if (r)
- pr_err("Failed to set %d pages to uc!\n", cpages);
- break;
- case tt_wc:
- r = set_pages_array_wc(pages, cpages);
- if (r)
- pr_err("Failed to set %d pages to wc!\n", cpages);
- break;
- default:
- break;
- }
- return r;
-}
-
-/**
- * Free pages the pages that failed to change the caching state. If there is
- * any pages that have changed their caching state already put them to the
- * pool.
- */
-static void ttm_handle_caching_state_failure(struct list_head *pages,
- int ttm_flags, enum ttm_caching_state cstate,
- struct page **failed_pages, unsigned cpages)
-{
- unsigned i;
- /* Failed pages have to be freed */
- for (i = 0; i < cpages; ++i) {
- list_del(&failed_pages[i]->lru);
- __free_page(failed_pages[i]);
- }
-}
-
-/**
- * Allocate new pages with correct caching.
- *
- * This function is reentrant if caller updates count depending on number of
- * pages returned in pages array.
- */
-static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
- int ttm_flags, enum ttm_caching_state cstate, unsigned count)
-{
- struct page **caching_array;
- struct page *p;
- int r = 0;
- unsigned i, cpages;
- unsigned max_cpages = min(count,
- (unsigned)(PAGE_SIZE/sizeof(struct page *)));
-
- /* allocate array for page caching change */
- caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
-
- if (!caching_array) {
- pr_err("Unable to allocate table for new pages\n");
- return -ENOMEM;
- }
-
- for (i = 0, cpages = 0; i < count; ++i) {
- p = alloc_page(gfp_flags);
-
- if (!p) {
- pr_err("Unable to get page %u\n", i);
-
- /* store already allocated pages in the pool after
- * setting the caching state */
- if (cpages) {
- r = ttm_set_pages_caching(caching_array,
- cstate, cpages);
- if (r)
- ttm_handle_caching_state_failure(pages,
- ttm_flags, cstate,
- caching_array, cpages);
- }
- r = -ENOMEM;
- goto out;
- }
-
-#ifdef CONFIG_HIGHMEM
- /* gfp flags of highmem page should never be dma32 so we
- * we should be fine in such case
- */
- if (!PageHighMem(p))
-#endif
- {
- caching_array[cpages++] = p;
- if (cpages == max_cpages) {
-
- r = ttm_set_pages_caching(caching_array,
- cstate, cpages);
- if (r) {
- ttm_handle_caching_state_failure(pages,
- ttm_flags, cstate,
- caching_array, cpages);
- goto out;
- }
- cpages = 0;
- }
- }
-
- list_add(&p->lru, pages);
- }
-
- if (cpages) {
- r = ttm_set_pages_caching(caching_array, cstate, cpages);
- if (r)
- ttm_handle_caching_state_failure(pages,
- ttm_flags, cstate,
- caching_array, cpages);
- }
-out:
- kfree(caching_array);
-
- return r;
-}
-
-/**
- * Fill the given pool if there aren't enough pages and the requested number of
- * pages is small.
- */
-static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
- int ttm_flags, enum ttm_caching_state cstate, unsigned count,
- unsigned long *irq_flags)
-{
- struct page *p;
- int r;
- unsigned cpages = 0;
- /**
- * Only allow one pool fill operation at a time.
- * If pool doesn't have enough pages for the allocation new pages are
- * allocated from outside of pool.
- */
- if (pool->fill_lock)
- return;
-
- pool->fill_lock = true;
-
- /* If allocation request is small and there are not enough
- * pages in a pool we fill the pool up first. */
- if (count < _manager->options.small
- && count > pool->npages) {
- struct list_head new_pages;
- unsigned alloc_size = _manager->options.alloc_size;
-
- /**
- * Can't change page caching if in irqsave context. We have to
- * drop the pool->lock.
- */
- spin_unlock_irqrestore(&pool->lock, *irq_flags);
-
- INIT_LIST_HEAD(&new_pages);
- r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
- cstate, alloc_size);
- spin_lock_irqsave(&pool->lock, *irq_flags);
-
- if (!r) {
- list_splice(&new_pages, &pool->list);
- ++pool->nrefills;
- pool->npages += alloc_size;
- } else {
- pr_err("Failed to fill pool (%p)\n", pool);
- /* If we have any pages left put them to the pool. */
- list_for_each_entry(p, &pool->list, lru) {
- ++cpages;
- }
- list_splice(&new_pages, &pool->list);
- pool->npages += cpages;
- }
-
- }
- pool->fill_lock = false;
-}
-
-/**
- * Cut 'count' number of pages from the pool and put them on the return list.
- *
- * @return count of pages still required to fulfill the request.
- */
-static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
- struct list_head *pages,
- int ttm_flags,
- enum ttm_caching_state cstate,
- unsigned count)
-{
- unsigned long irq_flags;
- struct list_head *p;
- unsigned i;
-
- spin_lock_irqsave(&pool->lock, irq_flags);
- ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
-
- if (count >= pool->npages) {
- /* take all pages from the pool */
- list_splice_init(&pool->list, pages);
- count -= pool->npages;
- pool->npages = 0;
- goto out;
- }
- /* find the last pages to include for requested number of pages. Split
- * pool to begin and halve it to reduce search space. */
- if (count <= pool->npages/2) {
- i = 0;
- list_for_each(p, &pool->list) {
- if (++i == count)
- break;
- }
- } else {
- i = pool->npages + 1;
- list_for_each_prev(p, &pool->list) {
- if (--i == count)
- break;
- }
- }
- /* Cut 'count' number of pages from the pool */
- list_cut_position(pages, &pool->list, p);
- pool->npages -= count;
- count = 0;
-out:
- spin_unlock_irqrestore(&pool->lock, irq_flags);
- return count;
-}
-
-/* Put all pages in pages list to correct pool to wait for reuse */
-static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
- enum ttm_caching_state cstate)
-{
- unsigned long irq_flags;
- struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
- unsigned i;
-
- if (pool == NULL) {
- /* No pool for this memory type so free the pages */
- for (i = 0; i < npages; i++) {
- if (pages[i]) {
- if (page_count(pages[i]) != 1)
- pr_err("Erroneous page count. Leaking pages.\n");
- __free_page(pages[i]);
- pages[i] = NULL;
- }
- }
- return;
- }
-
- spin_lock_irqsave(&pool->lock, irq_flags);
- for (i = 0; i < npages; i++) {
- if (pages[i]) {
- if (page_count(pages[i]) != 1)
- pr_err("Erroneous page count. Leaking pages.\n");
- list_add_tail(&pages[i]->lru, &pool->list);
- pages[i] = NULL;
- pool->npages++;
- }
- }
- /* Check that we don't go over the pool limit */
- npages = 0;
- if (pool->npages > _manager->options.max_size) {
- npages = pool->npages - _manager->options.max_size;
- /* free at least NUM_PAGES_TO_ALLOC number of pages
- * to reduce calls to set_memory_wb */
- if (npages < NUM_PAGES_TO_ALLOC)
- npages = NUM_PAGES_TO_ALLOC;
- }
- spin_unlock_irqrestore(&pool->lock, irq_flags);
- if (npages)
- ttm_page_pool_free(pool, npages);
-}
-
-/*
- * On success pages list will hold count number of correctly
- * cached pages.
- */
-static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
- enum ttm_caching_state cstate)
-{
- struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
- struct list_head plist;
- struct page *p = NULL;
- gfp_t gfp_flags = GFP_USER;
- unsigned count;
- int r;
-
- /* set zero flag for page allocation if required */
- if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
- gfp_flags |= __GFP_ZERO;
-
- /* No pool for cached pages */
- if (pool == NULL) {
- if (flags & TTM_PAGE_FLAG_DMA32)
- gfp_flags |= GFP_DMA32;
- else
- gfp_flags |= GFP_HIGHUSER;
-
- for (r = 0; r < npages; ++r) {
- p = alloc_page(gfp_flags);
- if (!p) {
-
- pr_err("Unable to allocate page\n");
- return -ENOMEM;
- }
-
- pages[r] = p;
- }
- return 0;
- }
-
- /* combine zero flag to pool flags */
- gfp_flags |= pool->gfp_flags;
-
- /* First we take pages from the pool */
- INIT_LIST_HEAD(&plist);
- npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
- count = 0;
- list_for_each_entry(p, &plist, lru) {
- pages[count++] = p;
- }
-
- /* clear the pages coming from the pool if requested */
- if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
- list_for_each_entry(p, &plist, lru) {
- if (PageHighMem(p))
- clear_highpage(p);
- else
- clear_page(page_address(p));
- }
- }
-
- /* If pool didn't have enough pages allocate new one. */
- if (npages > 0) {
- /* ttm_alloc_new_pages doesn't reference pool so we can run
- * multiple requests in parallel.
- **/
- INIT_LIST_HEAD(&plist);
- r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
- list_for_each_entry(p, &plist, lru) {
- pages[count++] = p;
- }
- if (r) {
- /* If there is any pages in the list put them back to
- * the pool. */
- pr_err("Failed to allocate extra pages for large request\n");
- ttm_put_pages(pages, count, flags, cstate);
- return r;
- }
- }
-
- return 0;
-}
-
-static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
- char *name)
-{
- spin_lock_init(&pool->lock);
- pool->fill_lock = false;
- INIT_LIST_HEAD(&pool->list);
- pool->npages = pool->nfrees = 0;
- pool->gfp_flags = flags;
- pool->name = name;
-}
-
-int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
-{
- int ret;
-
- WARN_ON(_manager);
-
- pr_info("Initializing pool allocator\n");
-
- _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
-
- ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
-
- ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
-
- ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
- GFP_USER | GFP_DMA32, "wc dma");
-
- ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
- GFP_USER | GFP_DMA32, "uc dma");
-
- _manager->options.max_size = max_pages;
- _manager->options.small = SMALL_ALLOCATION;
- _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
-
- ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
- &glob->kobj, "pool");
- if (unlikely(ret != 0)) {
- kobject_put(&_manager->kobj);
- _manager = NULL;
- return ret;
- }
-
- ttm_pool_mm_shrink_init(_manager);
-
- return 0;
-}
-
-void ttm_page_alloc_fini(void)
-{
- int i;
-
- pr_info("Finalizing pool allocator\n");
- ttm_pool_mm_shrink_fini(_manager);
-
- for (i = 0; i < NUM_POOLS; ++i)
- ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
-
- kobject_put(&_manager->kobj);
- _manager = NULL;
-}
-
-int ttm_pool_populate(struct ttm_tt *ttm)
-{
- struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
- unsigned i;
- int ret;
-
- if (ttm->state != tt_unpopulated)
- return 0;
-
- for (i = 0; i < ttm->num_pages; ++i) {
- ret = ttm_get_pages(&ttm->pages[i], 1,
- ttm->page_flags,
- ttm->caching_state);
- if (ret != 0) {
- ttm_pool_unpopulate(ttm);
- return -ENOMEM;
- }
-
- ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
- false, false);
- if (unlikely(ret != 0)) {
- ttm_pool_unpopulate(ttm);
- return -ENOMEM;
- }
- }
-
- if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
- ret = ttm_tt_swapin(ttm);
- if (unlikely(ret != 0)) {
- ttm_pool_unpopulate(ttm);
- return ret;
- }
- }
-
- ttm->state = tt_unbound;
- return 0;
-}
-EXPORT_SYMBOL(ttm_pool_populate);
-
-void ttm_pool_unpopulate(struct ttm_tt *ttm)
-{
- unsigned i;
-
- for (i = 0; i < ttm->num_pages; ++i) {
- if (ttm->pages[i]) {
- ttm_mem_global_free_page(ttm->glob->mem_glob,
- ttm->pages[i]);
- ttm_put_pages(&ttm->pages[i], 1,
- ttm->page_flags,
- ttm->caching_state);
- }
- }
- ttm->state = tt_unpopulated;
-}
-EXPORT_SYMBOL(ttm_pool_unpopulate);
-
-int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
-{
- struct ttm_page_pool *p;
- unsigned i;
- char *h[] = {"pool", "refills", "pages freed", "size"};
- if (!_manager) {
- seq_printf(m, "No pool allocator running.\n");
- return 0;
- }
- seq_printf(m, "%6s %12s %13s %8s\n",
- h[0], h[1], h[2], h[3]);
- for (i = 0; i < NUM_POOLS; ++i) {
- p = &_manager->pools[i];
-
- seq_printf(m, "%6s %12ld %13ld %8d\n",
- p->name, p->nrefills,
- p->nfrees, p->npages);
- }
- return 0;
-}
-EXPORT_SYMBOL(ttm_page_alloc_debugfs);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
deleted file mode 100644
index b8b394319b45..000000000000
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ /dev/null
@@ -1,1131 +0,0 @@
-/*
- * Copyright 2011 (c) Oracle Corp.
-
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
- */
-
-/*
- * A simple DMA pool losely based on dmapool.c. It has certain advantages
- * over the DMA pools:
- * - Pool collects resently freed pages for reuse (and hooks up to
- * the shrinker).
- * - Tracks currently in use pages
- * - Tracks whether the page is UC, WB or cached (and reverts to WB
- * when freed).
- */
-
-#define pr_fmt(fmt) "[TTM] " fmt
-
-#include <linux/dma-mapping.h>
-#include <linux/list.h>
-#include <linux/seq_file.h> /* for seq_printf */
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/highmem.h>
-#include <linux/mm_types.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/atomic.h>
-#include <linux/device.h>
-#include <linux/kthread.h>
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_page_alloc.h>
-#ifdef TTM_HAS_AGP
-#include <asm/agp.h>
-#endif
-
-#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
-#define SMALL_ALLOCATION 4
-#define FREE_ALL_PAGES (~0U)
-/* times are in msecs */
-#define IS_UNDEFINED (0)
-#define IS_WC (1<<1)
-#define IS_UC (1<<2)
-#define IS_CACHED (1<<3)
-#define IS_DMA32 (1<<4)
-
-enum pool_type {
- POOL_IS_UNDEFINED,
- POOL_IS_WC = IS_WC,
- POOL_IS_UC = IS_UC,
- POOL_IS_CACHED = IS_CACHED,
- POOL_IS_WC_DMA32 = IS_WC | IS_DMA32,
- POOL_IS_UC_DMA32 = IS_UC | IS_DMA32,
- POOL_IS_CACHED_DMA32 = IS_CACHED | IS_DMA32,
-};
-/*
- * The pool structure. There are usually six pools:
- * - generic (not restricted to DMA32):
- * - write combined, uncached, cached.
- * - dma32 (up to 2^32 - so up 4GB):
- * - write combined, uncached, cached.
- * for each 'struct device'. The 'cached' is for pages that are actively used.
- * The other ones can be shrunk by the shrinker API if neccessary.
- * @pools: The 'struct device->dma_pools' link.
- * @type: Type of the pool
- * @lock: Protects the inuse_list and free_list from concurrnet access. Must be
- * used with irqsave/irqrestore variants because pool allocator maybe called
- * from delayed work.
- * @inuse_list: Pool of pages that are in use. The order is very important and
- * it is in the order that the TTM pages that are put back are in.
- * @free_list: Pool of pages that are free to be used. No order requirements.
- * @dev: The device that is associated with these pools.
- * @size: Size used during DMA allocation.
- * @npages_free: Count of available pages for re-use.
- * @npages_in_use: Count of pages that are in use.
- * @nfrees: Stats when pool is shrinking.
- * @nrefills: Stats when the pool is grown.
- * @gfp_flags: Flags to pass for alloc_page.
- * @name: Name of the pool.
- * @dev_name: Name derieved from dev - similar to how dev_info works.
- * Used during shutdown as the dev_info during release is unavailable.
- */
-struct dma_pool {
- struct list_head pools; /* The 'struct device->dma_pools link */
- enum pool_type type;
- spinlock_t lock;
- struct list_head inuse_list;
- struct list_head free_list;
- struct device *dev;
- unsigned size;
- unsigned npages_free;
- unsigned npages_in_use;
- unsigned long nfrees; /* Stats when shrunk. */
- unsigned long nrefills; /* Stats when grown. */
- gfp_t gfp_flags;
- char name[13]; /* "cached dma32" */
- char dev_name[64]; /* Constructed from dev */
-};
-
-/*
- * The accounting page keeping track of the allocated page along with
- * the DMA address.
- * @page_list: The link to the 'page_list' in 'struct dma_pool'.
- * @vaddr: The virtual address of the page
- * @dma: The bus address of the page. If the page is not allocated
- * via the DMA API, it will be -1.
- */
-struct dma_page {
- struct list_head page_list;
- void *vaddr;
- struct page *p;
- dma_addr_t dma;
-};
-
-/*
- * Limits for the pool. They are handled without locks because only place where
- * they may change is in sysfs store. They won't have immediate effect anyway
- * so forcing serialization to access them is pointless.
- */
-
-struct ttm_pool_opts {
- unsigned alloc_size;
- unsigned max_size;
- unsigned small;
-};
-
-/*
- * Contains the list of all of the 'struct device' and their corresponding
- * DMA pools. Guarded by _mutex->lock.
- * @pools: The link to 'struct ttm_pool_manager->pools'
- * @dev: The 'struct device' associated with the 'pool'
- * @pool: The 'struct dma_pool' associated with the 'dev'
- */
-struct device_pools {
- struct list_head pools;
- struct device *dev;
- struct dma_pool *pool;
-};
-
-/*
- * struct ttm_pool_manager - Holds memory pools for fast allocation
- *
- * @lock: Lock used when adding/removing from pools
- * @pools: List of 'struct device' and 'struct dma_pool' tuples.
- * @options: Limits for the pool.
- * @npools: Total amount of pools in existence.
- * @shrinker: The structure used by [un|]register_shrinker
- */
-struct ttm_pool_manager {
- struct mutex lock;
- struct list_head pools;
- struct ttm_pool_opts options;
- unsigned npools;
- struct shrinker mm_shrink;
- struct kobject kobj;
-};
-
-static struct ttm_pool_manager *_manager;
-
-static struct attribute ttm_page_pool_max = {
- .name = "pool_max_size",
- .mode = S_IRUGO | S_IWUSR
-};
-static struct attribute ttm_page_pool_small = {
- .name = "pool_small_allocation",
- .mode = S_IRUGO | S_IWUSR
-};
-static struct attribute ttm_page_pool_alloc_size = {
- .name = "pool_allocation_size",
- .mode = S_IRUGO | S_IWUSR
-};
-
-static struct attribute *ttm_pool_attrs[] = {
- &ttm_page_pool_max,
- &ttm_page_pool_small,
- &ttm_page_pool_alloc_size,
- NULL
-};
-
-static void ttm_pool_kobj_release(struct kobject *kobj)
-{
- struct ttm_pool_manager *m =
- container_of(kobj, struct ttm_pool_manager, kobj);
- kfree(m);
-}
-
-static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer, size_t size)
-{
- struct ttm_pool_manager *m =
- container_of(kobj, struct ttm_pool_manager, kobj);
- int chars;
- unsigned val;
- chars = sscanf(buffer, "%u", &val);
- if (chars == 0)
- return size;
-
- /* Convert kb to number of pages */
- val = val / (PAGE_SIZE >> 10);
-
- if (attr == &ttm_page_pool_max)
- m->options.max_size = val;
- else if (attr == &ttm_page_pool_small)
- m->options.small = val;
- else if (attr == &ttm_page_pool_alloc_size) {
- if (val > NUM_PAGES_TO_ALLOC*8) {
- pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
- NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
- NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
- return size;
- } else if (val > NUM_PAGES_TO_ALLOC) {
- pr_warn("Setting allocation size to larger than %lu is not recommended\n",
- NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
- }
- m->options.alloc_size = val;
- }
-
- return size;
-}
-
-static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
- char *buffer)
-{
- struct ttm_pool_manager *m =
- container_of(kobj, struct ttm_pool_manager, kobj);
- unsigned val = 0;
-
- if (attr == &ttm_page_pool_max)
- val = m->options.max_size;
- else if (attr == &ttm_page_pool_small)
- val = m->options.small;
- else if (attr == &ttm_page_pool_alloc_size)
- val = m->options.alloc_size;
-
- val = val * (PAGE_SIZE >> 10);
-
- return snprintf(buffer, PAGE_SIZE, "%u\n", val);
-}
-
-static const struct sysfs_ops ttm_pool_sysfs_ops = {
- .show = &ttm_pool_show,
- .store = &ttm_pool_store,
-};
-
-static struct kobj_type ttm_pool_kobj_type = {
- .release = &ttm_pool_kobj_release,
- .sysfs_ops = &ttm_pool_sysfs_ops,
- .default_attrs = ttm_pool_attrs,
-};
-
-#ifndef CONFIG_X86
-static int set_pages_array_wb(struct page **pages, int addrinarray)
-{
-#ifdef TTM_HAS_AGP
- int i;
-
- for (i = 0; i < addrinarray; i++)
- unmap_page_from_agp(pages[i]);
-#endif
- return 0;
-}
-
-static int set_pages_array_wc(struct page **pages, int addrinarray)
-{
-#ifdef TTM_HAS_AGP
- int i;
-
- for (i = 0; i < addrinarray; i++)
- map_page_into_agp(pages[i]);
-#endif
- return 0;
-}
-
-static int set_pages_array_uc(struct page **pages, int addrinarray)
-{
-#ifdef TTM_HAS_AGP
- int i;
-
- for (i = 0; i < addrinarray; i++)
- map_page_into_agp(pages[i]);
-#endif
- return 0;
-}
-#endif /* for !CONFIG_X86 */
-
-static int ttm_set_pages_caching(struct dma_pool *pool,
- struct page **pages, unsigned cpages)
-{
- int r = 0;
- /* Set page caching */
- if (pool->type & IS_UC) {
- r = set_pages_array_uc(pages, cpages);
- if (r)
- pr_err("%s: Failed to set %d pages to uc!\n",
- pool->dev_name, cpages);
- }
- if (pool->type & IS_WC) {
- r = set_pages_array_wc(pages, cpages);
- if (r)
- pr_err("%s: Failed to set %d pages to wc!\n",
- pool->dev_name, cpages);
- }
- return r;
-}
-
-static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
-{
- dma_addr_t dma = d_page->dma;
- dma_free_coherent(pool->dev, pool->size, d_page->vaddr, dma);
-
- kfree(d_page);
- d_page = NULL;
-}
-static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
-{
- struct dma_page *d_page;
-
- d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
- if (!d_page)
- return NULL;
-
- d_page->vaddr = dma_alloc_coherent(pool->dev, pool->size,
- &d_page->dma,
- pool->gfp_flags);
- if (d_page->vaddr)
- d_page->p = virt_to_page(d_page->vaddr);
- else {
- kfree(d_page);
- d_page = NULL;
- }
- return d_page;
-}
-static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
-{
- enum pool_type type = IS_UNDEFINED;
-
- if (flags & TTM_PAGE_FLAG_DMA32)
- type |= IS_DMA32;
- if (cstate == tt_cached)
- type |= IS_CACHED;
- else if (cstate == tt_uncached)
- type |= IS_UC;
- else
- type |= IS_WC;
-
- return type;
-}
-
-static void ttm_pool_update_free_locked(struct dma_pool *pool,
- unsigned freed_pages)
-{
- pool->npages_free -= freed_pages;
- pool->nfrees += freed_pages;
-
-}
-
-/* set memory back to wb and free the pages. */
-static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
- struct page *pages[], unsigned npages)
-{
- struct dma_page *d_page, *tmp;
-
- /* Don't set WB on WB page pool. */
- if (npages && !(pool->type & IS_CACHED) &&
- set_pages_array_wb(pages, npages))
- pr_err("%s: Failed to set %d pages to wb!\n",
- pool->dev_name, npages);
-
- list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
- list_del(&d_page->page_list);
- __ttm_dma_free_page(pool, d_page);
- }
-}
-
-static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
-{
- /* Don't set WB on WB page pool. */
- if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1))
- pr_err("%s: Failed to set %d pages to wb!\n",
- pool->dev_name, 1);
-
- list_del(&d_page->page_list);
- __ttm_dma_free_page(pool, d_page);
-}
-
-/*
- * Free pages from pool.
- *
- * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
- * number of pages in one go.
- *
- * @pool: to free the pages from
- * @nr_free: If set to true will free all pages in pool
- **/
-static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
-{
- unsigned long irq_flags;
- struct dma_page *dma_p, *tmp;
- struct page **pages_to_free;
- struct list_head d_pages;
- unsigned freed_pages = 0,
- npages_to_free = nr_free;
-
- if (NUM_PAGES_TO_ALLOC < nr_free)
- npages_to_free = NUM_PAGES_TO_ALLOC;
-#if 0
- if (nr_free > 1) {
- pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n",
- pool->dev_name, pool->name, current->pid,
- npages_to_free, nr_free);
- }
-#endif
- pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
- GFP_KERNEL);
-
- if (!pages_to_free) {
- pr_err("%s: Failed to allocate memory for pool free operation\n",
- pool->dev_name);
- return 0;
- }
- INIT_LIST_HEAD(&d_pages);
-restart:
- spin_lock_irqsave(&pool->lock, irq_flags);
-
- /* We picking the oldest ones off the list */
- list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
- page_list) {
- if (freed_pages >= npages_to_free)
- break;
-
- /* Move the dma_page from one list to another. */
- list_move(&dma_p->page_list, &d_pages);
-
- pages_to_free[freed_pages++] = dma_p->p;
- /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
- if (freed_pages >= NUM_PAGES_TO_ALLOC) {
-
- ttm_pool_update_free_locked(pool, freed_pages);
- /**
- * Because changing page caching is costly
- * we unlock the pool to prevent stalling.
- */
- spin_unlock_irqrestore(&pool->lock, irq_flags);
-
- ttm_dma_pages_put(pool, &d_pages, pages_to_free,
- freed_pages);
-
- INIT_LIST_HEAD(&d_pages);
-
- if (likely(nr_free != FREE_ALL_PAGES))
- nr_free -= freed_pages;
-
- if (NUM_PAGES_TO_ALLOC >= nr_free)
- npages_to_free = nr_free;
- else
- npages_to_free = NUM_PAGES_TO_ALLOC;
-
- freed_pages = 0;
-
- /* free all so restart the processing */
- if (nr_free)
- goto restart;
-
- /* Not allowed to fall through or break because
- * following context is inside spinlock while we are
- * outside here.
- */
- goto out;
-
- }
- }
-
- /* remove range of pages from the pool */
- if (freed_pages) {
- ttm_pool_update_free_locked(pool, freed_pages);
- nr_free -= freed_pages;
- }
-
- spin_unlock_irqrestore(&pool->lock, irq_flags);
-
- if (freed_pages)
- ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
-out:
- kfree(pages_to_free);
- return nr_free;
-}
-
-static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
-{
- struct device_pools *p;
- struct dma_pool *pool;
-
- if (!dev)
- return;
-
- mutex_lock(&_manager->lock);
- list_for_each_entry_reverse(p, &_manager->pools, pools) {
- if (p->dev != dev)
- continue;
- pool = p->pool;
- if (pool->type != type)
- continue;
-
- list_del(&p->pools);
- kfree(p);
- _manager->npools--;
- break;
- }
- list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
- if (pool->type != type)
- continue;
- /* Takes a spinlock.. */
- ttm_dma_page_pool_free(pool, FREE_ALL_PAGES);
- WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
- /* This code path is called after _all_ references to the
- * struct device has been dropped - so nobody should be
- * touching it. In case somebody is trying to _add_ we are
- * guarded by the mutex. */
- list_del(&pool->pools);
- kfree(pool);
- break;
- }
- mutex_unlock(&_manager->lock);
-}
-
-/*
- * On free-ing of the 'struct device' this deconstructor is run.
- * Albeit the pool might have already been freed earlier.
- */
-static void ttm_dma_pool_release(struct device *dev, void *res)
-{
- struct dma_pool *pool = *(struct dma_pool **)res;
-
- if (pool)
- ttm_dma_free_pool(dev, pool->type);
-}
-
-static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
-{
- return *(struct dma_pool **)res == match_data;
-}
-
-static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
- enum pool_type type)
-{
- char *n[] = {"wc", "uc", "cached", " dma32", "unknown",};
- enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_UNDEFINED};
- struct device_pools *sec_pool = NULL;
- struct dma_pool *pool = NULL, **ptr;
- unsigned i;
- int ret = -ENODEV;
- char *p;
-
- if (!dev)
- return NULL;
-
- ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return NULL;
-
- ret = -ENOMEM;
-
- pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
- dev_to_node(dev));
- if (!pool)
- goto err_mem;
-
- sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
- dev_to_node(dev));
- if (!sec_pool)
- goto err_mem;
-
- INIT_LIST_HEAD(&sec_pool->pools);
- sec_pool->dev = dev;
- sec_pool->pool = pool;
-
- INIT_LIST_HEAD(&pool->free_list);
- INIT_LIST_HEAD(&pool->inuse_list);
- INIT_LIST_HEAD(&pool->pools);
- spin_lock_init(&pool->lock);
- pool->dev = dev;
- pool->npages_free = pool->npages_in_use = 0;
- pool->nfrees = 0;
- pool->gfp_flags = flags;
- pool->size = PAGE_SIZE;
- pool->type = type;
- pool->nrefills = 0;
- p = pool->name;
- for (i = 0; i < 5; i++) {
- if (type & t[i]) {
- p += snprintf(p, sizeof(pool->name) - (p - pool->name),
- "%s", n[i]);
- }
- }
- *p = 0;
- /* We copy the name for pr_ calls b/c when dma_pool_destroy is called
- * - the kobj->name has already been deallocated.*/
- snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
- dev_driver_string(dev), dev_name(dev));
- mutex_lock(&_manager->lock);
- /* You can get the dma_pool from either the global: */
- list_add(&sec_pool->pools, &_manager->pools);
- _manager->npools++;
- /* or from 'struct device': */
- list_add(&pool->pools, &dev->dma_pools);
- mutex_unlock(&_manager->lock);
-
- *ptr = pool;
- devres_add(dev, ptr);
-
- return pool;
-err_mem:
- devres_free(ptr);
- kfree(sec_pool);
- kfree(pool);
- return ERR_PTR(ret);
-}
-
-static struct dma_pool *ttm_dma_find_pool(struct device *dev,
- enum pool_type type)
-{
- struct dma_pool *pool, *tmp, *found = NULL;
-
- if (type == IS_UNDEFINED)
- return found;
-
- /* NB: We iterate on the 'struct dev' which has no spinlock, but
- * it does have a kref which we have taken. The kref is taken during
- * graphic driver loading - in the drm_pci_init it calls either
- * pci_dev_get or pci_register_driver which both end up taking a kref
- * on 'struct device'.
- *
- * On teardown, the graphic drivers end up quiescing the TTM (put_pages)
- * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
- * thing is at that point of time there are no pages associated with the
- * driver so this function will not be called.
- */
- list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) {
- if (pool->type != type)
- continue;
- found = pool;
- break;
- }
- return found;
-}
-
-/*
- * Free pages the pages that failed to change the caching state. If there
- * are pages that have changed their caching state already put them to the
- * pool.
- */
-static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
- struct list_head *d_pages,
- struct page **failed_pages,
- unsigned cpages)
-{
- struct dma_page *d_page, *tmp;
- struct page *p;
- unsigned i = 0;
-
- p = failed_pages[0];
- if (!p)
- return;
- /* Find the failed page. */
- list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
- if (d_page->p != p)
- continue;
- /* .. and then progress over the full list. */
- list_del(&d_page->page_list);
- __ttm_dma_free_page(pool, d_page);
- if (++i < cpages)
- p = failed_pages[i];
- else
- break;
- }
-
-}
-
-/*
- * Allocate 'count' pages, and put 'need' number of them on the
- * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
- * The full list of pages should also be on 'd_pages'.
- * We return zero for success, and negative numbers as errors.
- */
-static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
- struct list_head *d_pages,
- unsigned count)
-{
- struct page **caching_array;
- struct dma_page *dma_p;
- struct page *p;
- int r = 0;
- unsigned i, cpages;
- unsigned max_cpages = min(count,
- (unsigned)(PAGE_SIZE/sizeof(struct page *)));
-
- /* allocate array for page caching change */
- caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
-
- if (!caching_array) {
- pr_err("%s: Unable to allocate table for new pages\n",
- pool->dev_name);
- return -ENOMEM;
- }
-
- if (count > 1) {
- pr_debug("%s: (%s:%d) Getting %d pages\n",
- pool->dev_name, pool->name, current->pid, count);
- }
-
- for (i = 0, cpages = 0; i < count; ++i) {
- dma_p = __ttm_dma_alloc_page(pool);
- if (!dma_p) {
- pr_err("%s: Unable to get page %u\n",
- pool->dev_name, i);
-
- /* store already allocated pages in the pool after
- * setting the caching state */
- if (cpages) {
- r = ttm_set_pages_caching(pool, caching_array,
- cpages);
- if (r)
- ttm_dma_handle_caching_state_failure(
- pool, d_pages, caching_array,
- cpages);
- }
- r = -ENOMEM;
- goto out;
- }
- p = dma_p->p;
-#ifdef CONFIG_HIGHMEM
- /* gfp flags of highmem page should never be dma32 so we
- * we should be fine in such case
- */
- if (!PageHighMem(p))
-#endif
- {
- caching_array[cpages++] = p;
- if (cpages == max_cpages) {
- /* Note: Cannot hold the spinlock */
- r = ttm_set_pages_caching(pool, caching_array,
- cpages);
- if (r) {
- ttm_dma_handle_caching_state_failure(
- pool, d_pages, caching_array,
- cpages);
- goto out;
- }
- cpages = 0;
- }
- }
- list_add(&dma_p->page_list, d_pages);
- }
-
- if (cpages) {
- r = ttm_set_pages_caching(pool, caching_array, cpages);
- if (r)
- ttm_dma_handle_caching_state_failure(pool, d_pages,
- caching_array, cpages);
- }
-out:
- kfree(caching_array);
- return r;
-}
-
-/*
- * @return count of pages still required to fulfill the request.
- */
-static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
- unsigned long *irq_flags)
-{
- unsigned count = _manager->options.small;
- int r = pool->npages_free;
-
- if (count > pool->npages_free) {
- struct list_head d_pages;
-
- INIT_LIST_HEAD(&d_pages);
-
- spin_unlock_irqrestore(&pool->lock, *irq_flags);
-
- /* Returns how many more are neccessary to fulfill the
- * request. */
- r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
-
- spin_lock_irqsave(&pool->lock, *irq_flags);
- if (!r) {
- /* Add the fresh to the end.. */
- list_splice(&d_pages, &pool->free_list);
- ++pool->nrefills;
- pool->npages_free += count;
- r = count;
- } else {
- struct dma_page *d_page;
- unsigned cpages = 0;
-
- pr_err("%s: Failed to fill %s pool (r:%d)!\n",
- pool->dev_name, pool->name, r);
-
- list_for_each_entry(d_page, &d_pages, page_list) {
- cpages++;
- }
- list_splice_tail(&d_pages, &pool->free_list);
- pool->npages_free += cpages;
- r = cpages;
- }
- }
- return r;
-}
-
-/*
- * @return count of pages still required to fulfill the request.
- * The populate list is actually a stack (not that is matters as TTM
- * allocates one page at a time.
- */
-static int ttm_dma_pool_get_pages(struct dma_pool *pool,
- struct ttm_dma_tt *ttm_dma,
- unsigned index)
-{
- struct dma_page *d_page;
- struct ttm_tt *ttm = &ttm_dma->ttm;
- unsigned long irq_flags;
- int count, r = -ENOMEM;
-
- spin_lock_irqsave(&pool->lock, irq_flags);
- count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
- if (count) {
- d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
- ttm->pages[index] = d_page->p;
- ttm_dma->dma_address[index] = d_page->dma;
- list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
- r = 0;
- pool->npages_in_use += 1;
- pool->npages_free -= 1;
- }
- spin_unlock_irqrestore(&pool->lock, irq_flags);
- return r;
-}
-
-/*
- * On success pages list will hold count number of correctly
- * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
- */
-int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
-{
- struct ttm_tt *ttm = &ttm_dma->ttm;
- struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
- struct dma_pool *pool;
- enum pool_type type;
- unsigned i;
- gfp_t gfp_flags;
- int ret;
-
- if (ttm->state != tt_unpopulated)
- return 0;
-
- type = ttm_to_type(ttm->page_flags, ttm->caching_state);
- if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
- gfp_flags = GFP_USER | GFP_DMA32;
- else
- gfp_flags = GFP_HIGHUSER;
- if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
- gfp_flags |= __GFP_ZERO;
-
- pool = ttm_dma_find_pool(dev, type);
- if (!pool) {
- pool = ttm_dma_pool_init(dev, gfp_flags, type);
- if (IS_ERR_OR_NULL(pool)) {
- return -ENOMEM;
- }
- }
-
- INIT_LIST_HEAD(&ttm_dma->pages_list);
- for (i = 0; i < ttm->num_pages; ++i) {
- ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
- if (ret != 0) {
- ttm_dma_unpopulate(ttm_dma, dev);
- return -ENOMEM;
- }
-
- ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
- false, false);
- if (unlikely(ret != 0)) {
- ttm_dma_unpopulate(ttm_dma, dev);
- return -ENOMEM;
- }
- }
-
- if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
- ret = ttm_tt_swapin(ttm);
- if (unlikely(ret != 0)) {
- ttm_dma_unpopulate(ttm_dma, dev);
- return ret;
- }
- }
-
- ttm->state = tt_unbound;
- return 0;
-}
-EXPORT_SYMBOL_GPL(ttm_dma_populate);
-
-/* Get good estimation how many pages are free in pools */
-static int ttm_dma_pool_get_num_unused_pages(void)
-{
- struct device_pools *p;
- unsigned total = 0;
-
- mutex_lock(&_manager->lock);
- list_for_each_entry(p, &_manager->pools, pools)
- total += p->pool->npages_free;
- mutex_unlock(&_manager->lock);
- return total;
-}
-
-/* Put all pages in pages list to correct pool to wait for reuse */
-void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
-{
- struct ttm_tt *ttm = &ttm_dma->ttm;
- struct dma_pool *pool;
- struct dma_page *d_page, *next;
- enum pool_type type;
- bool is_cached = false;
- unsigned count = 0, i, npages = 0;
- unsigned long irq_flags;
-
- type = ttm_to_type(ttm->page_flags, ttm->caching_state);
- pool = ttm_dma_find_pool(dev, type);
- if (!pool)
- return;
-
- is_cached = (ttm_dma_find_pool(pool->dev,
- ttm_to_type(ttm->page_flags, tt_cached)) == pool);
-
- /* make sure pages array match list and count number of pages */
- list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) {
- ttm->pages[count] = d_page->p;
- count++;
- }
-
- spin_lock_irqsave(&pool->lock, irq_flags);
- pool->npages_in_use -= count;
- if (is_cached) {
- pool->nfrees += count;
- } else {
- pool->npages_free += count;
- list_splice(&ttm_dma->pages_list, &pool->free_list);
- npages = count;
- if (pool->npages_free > _manager->options.max_size) {
- npages = pool->npages_free - _manager->options.max_size;
- /* free at least NUM_PAGES_TO_ALLOC number of pages
- * to reduce calls to set_memory_wb */
- if (npages < NUM_PAGES_TO_ALLOC)
- npages = NUM_PAGES_TO_ALLOC;
- }
- }
- spin_unlock_irqrestore(&pool->lock, irq_flags);
-
- if (is_cached) {
- list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
- ttm_mem_global_free_page(ttm->glob->mem_glob,
- d_page->p);
- ttm_dma_page_put(pool, d_page);
- }
- } else {
- for (i = 0; i < count; i++) {
- ttm_mem_global_free_page(ttm->glob->mem_glob,
- ttm->pages[i]);
- }
- }
-
- INIT_LIST_HEAD(&ttm_dma->pages_list);
- for (i = 0; i < ttm->num_pages; i++) {
- ttm->pages[i] = NULL;
- ttm_dma->dma_address[i] = 0;
- }
-
- /* shrink pool if necessary (only on !is_cached pools)*/
- if (npages)
- ttm_dma_page_pool_free(pool, npages);
- ttm->state = tt_unpopulated;
-}
-EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
-
-/**
- * Callback for mm to request pool to reduce number of page held.
- */
-static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
- struct shrink_control *sc)
-{
- static atomic_t start_pool = ATOMIC_INIT(0);
- unsigned idx = 0;
- unsigned pool_offset = atomic_add_return(1, &start_pool);
- unsigned shrink_pages = sc->nr_to_scan;
- struct device_pools *p;
-
- if (list_empty(&_manager->pools))
- return 0;
-
- mutex_lock(&_manager->lock);
- pool_offset = pool_offset % _manager->npools;
- list_for_each_entry(p, &_manager->pools, pools) {
- unsigned nr_free;
-
- if (!p->dev)
- continue;
- if (shrink_pages == 0)
- break;
- /* Do it in round-robin fashion. */
- if (++idx < pool_offset)
- continue;
- nr_free = shrink_pages;
- shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
- pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
- p->pool->dev_name, p->pool->name, current->pid,
- nr_free, shrink_pages);
- }
- mutex_unlock(&_manager->lock);
- /* return estimated number of unused pages in pool */
- return ttm_dma_pool_get_num_unused_pages();
-}
-
-static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
-{
- manager->mm_shrink.shrink = &ttm_dma_pool_mm_shrink;
- manager->mm_shrink.seeks = 1;
- register_shrinker(&manager->mm_shrink);
-}
-
-static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
-{
- unregister_shrinker(&manager->mm_shrink);
-}
-
-int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
-{
- int ret = -ENOMEM;
-
- WARN_ON(_manager);
-
- pr_info("Initializing DMA pool allocator\n");
-
- _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
- if (!_manager)
- goto err;
-
- mutex_init(&_manager->lock);
- INIT_LIST_HEAD(&_manager->pools);
-
- _manager->options.max_size = max_pages;
- _manager->options.small = SMALL_ALLOCATION;
- _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
-
- /* This takes care of auto-freeing the _manager */
- ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
- &glob->kobj, "dma_pool");
- if (unlikely(ret != 0)) {
- kobject_put(&_manager->kobj);
- goto err;
- }
- ttm_dma_pool_mm_shrink_init(_manager);
- return 0;
-err:
- return ret;
-}
-
-void ttm_dma_page_alloc_fini(void)
-{
- struct device_pools *p, *t;
-
- pr_info("Finalizing DMA pool allocator\n");
- ttm_dma_pool_mm_shrink_fini(_manager);
-
- list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
- dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
- current->pid);
- WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
- ttm_dma_pool_match, p->pool));
- ttm_dma_free_pool(p->dev, p->pool->type);
- }
- kobject_put(&_manager->kobj);
- _manager = NULL;
-}
-
-int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
-{
- struct device_pools *p;
- struct dma_pool *pool = NULL;
- char *h[] = {"pool", "refills", "pages freed", "inuse", "available",
- "name", "virt", "busaddr"};
-
- if (!_manager) {
- seq_printf(m, "No pool allocator running.\n");
- return 0;
- }
- seq_printf(m, "%13s %12s %13s %8s %8s %8s\n",
- h[0], h[1], h[2], h[3], h[4], h[5]);
- mutex_lock(&_manager->lock);
- list_for_each_entry(p, &_manager->pools, pools) {
- struct device *dev = p->dev;
- if (!dev)
- continue;
- pool = p->pool;
- seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
- pool->name, pool->nrefills,
- pool->nfrees, pool->npages_in_use,
- pool->npages_free,
- pool->dev_name);
- }
- mutex_unlock(&_manager->lock);
- return 0;
-}
-EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
new file mode 100644
index 000000000000..18b6db015619
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -0,0 +1,1377 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+/* Pooling of allocated pages is necessary because changing the caching
+ * attributes on x86 of the linear mapping requires a costly cross CPU TLB
+ * invalidate for those addresses.
+ *
+ * Additional to that allocations from the DMA coherent API are pooled as well
+ * cause they are rather slow compared to alloc_pages+map.
+ */
+
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/debugfs.h>
+#include <linux/highmem.h>
+#include <linux/sched/mm.h>
+
+#ifdef CONFIG_X86
+#include <asm/set_memory.h>
+#endif
+
+#include <drm/ttm/ttm_backup.h>
+#include <drm/ttm/ttm_pool.h>
+#include <drm/ttm/ttm_tt.h>
+#include <drm/ttm/ttm_bo.h>
+
+#include "ttm_module.h"
+#include "ttm_pool_internal.h"
+
+#ifdef CONFIG_FAULT_INJECTION
+#include <linux/fault-inject.h>
+static DECLARE_FAULT_ATTR(backup_fault_inject);
+#else
+#define should_fail(...) false
+#endif
+
+/**
+ * struct ttm_pool_dma - Helper object for coherent DMA mappings
+ *
+ * @addr: original DMA address returned for the mapping
+ * @vaddr: original vaddr return for the mapping and order in the lower bits
+ */
+struct ttm_pool_dma {
+ dma_addr_t addr;
+ unsigned long vaddr;
+};
+
+/**
+ * struct ttm_pool_alloc_state - Current state of the tt page allocation process
+ * @pages: Pointer to the next tt page pointer to populate.
+ * @caching_divide: Pointer to the first page pointer whose page has a staged but
+ * not committed caching transition from write-back to @tt_caching.
+ * @dma_addr: Pointer to the next tt dma_address entry to populate if any.
+ * @remaining_pages: Remaining pages to populate.
+ * @tt_caching: The requested cpu-caching for the pages allocated.
+ */
+struct ttm_pool_alloc_state {
+ struct page **pages;
+ struct page **caching_divide;
+ dma_addr_t *dma_addr;
+ pgoff_t remaining_pages;
+ enum ttm_caching tt_caching;
+};
+
+/**
+ * struct ttm_pool_tt_restore - State representing restore from backup
+ * @pool: The pool used for page allocation while restoring.
+ * @snapshot_alloc: A snapshot of the most recent struct ttm_pool_alloc_state.
+ * @alloced_page: Pointer to the page most recently allocated from a pool or system.
+ * @first_dma: The dma address corresponding to @alloced_page if dma_mapping
+ * is requested.
+ * @alloced_pages: The number of allocated pages present in the struct ttm_tt
+ * page vector from this restore session.
+ * @restored_pages: The number of 4K pages restored for @alloced_page (which
+ * is typically a multi-order page).
+ * @page_caching: The struct ttm_tt requested caching
+ * @order: The order of @alloced_page.
+ *
+ * Recovery from backup might fail when we've recovered less than the
+ * full ttm_tt. In order not to loose any data (yet), keep information
+ * around that allows us to restart a failed ttm backup recovery.
+ */
+struct ttm_pool_tt_restore {
+ struct ttm_pool *pool;
+ struct ttm_pool_alloc_state snapshot_alloc;
+ struct page *alloced_page;
+ dma_addr_t first_dma;
+ pgoff_t alloced_pages;
+ pgoff_t restored_pages;
+ enum ttm_caching page_caching;
+ unsigned int order;
+};
+
+static unsigned long page_pool_size;
+
+MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
+module_param(page_pool_size, ulong, 0644);
+
+static atomic_long_t allocated_pages;
+
+static struct ttm_pool_type global_write_combined[NR_PAGE_ORDERS];
+static struct ttm_pool_type global_uncached[NR_PAGE_ORDERS];
+
+static struct ttm_pool_type global_dma32_write_combined[NR_PAGE_ORDERS];
+static struct ttm_pool_type global_dma32_uncached[NR_PAGE_ORDERS];
+
+static spinlock_t shrinker_lock;
+static struct list_head shrinker_list;
+static struct shrinker *mm_shrinker;
+static DECLARE_RWSEM(pool_shrink_rwsem);
+
+/* Allocate pages of size 1 << order with the given gfp_flags */
+static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
+ unsigned int order)
+{
+ const unsigned int beneficial_order = ttm_pool_beneficial_order(pool);
+ unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
+ struct ttm_pool_dma *dma;
+ struct page *p;
+ void *vaddr;
+
+ /* Don't set the __GFP_COMP flag for higher order allocations.
+ * Mapping pages directly into an userspace process and calling
+ * put_page() on a TTM allocated page is illegal.
+ */
+ if (order)
+ gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
+ __GFP_THISNODE;
+
+ /*
+ * Do not add latency to the allocation path for allocations orders
+ * device tolds us do not bring them additional performance gains.
+ */
+ if (beneficial_order && order > beneficial_order)
+ gfp_flags &= ~__GFP_DIRECT_RECLAIM;
+
+ if (!ttm_pool_uses_dma_alloc(pool)) {
+ p = alloc_pages_node(pool->nid, gfp_flags, order);
+ if (p)
+ p->private = order;
+ return p;
+ }
+
+ dma = kmalloc(sizeof(*dma), GFP_KERNEL);
+ if (!dma)
+ return NULL;
+
+ if (order)
+ attr |= DMA_ATTR_NO_WARN;
+
+ vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE,
+ &dma->addr, gfp_flags, attr);
+ if (!vaddr)
+ goto error_free;
+
+ /* TODO: This is an illegal abuse of the DMA API, but we need to rework
+ * TTM page fault handling and extend the DMA API to clean this up.
+ */
+ if (is_vmalloc_addr(vaddr))
+ p = vmalloc_to_page(vaddr);
+ else
+ p = virt_to_page(vaddr);
+
+ dma->vaddr = (unsigned long)vaddr | order;
+ p->private = (unsigned long)dma;
+ return p;
+
+error_free:
+ kfree(dma);
+ return NULL;
+}
+
+/* Reset the caching and pages of size 1 << order */
+static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
+ unsigned int order, struct page *p)
+{
+ unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
+ struct ttm_pool_dma *dma;
+ void *vaddr;
+
+#ifdef CONFIG_X86
+ /* We don't care that set_pages_wb is inefficient here. This is only
+ * used when we have to shrink and CPU overhead is irrelevant then.
+ */
+ if (caching != ttm_cached && !PageHighMem(p))
+ set_pages_wb(p, 1 << order);
+#endif
+
+ if (!pool || !ttm_pool_uses_dma_alloc(pool)) {
+ __free_pages(p, order);
+ return;
+ }
+
+ if (order)
+ attr |= DMA_ATTR_NO_WARN;
+
+ dma = (void *)p->private;
+ vaddr = (void *)(dma->vaddr & PAGE_MASK);
+ dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr,
+ attr);
+ kfree(dma);
+}
+
+/* Apply any cpu-caching deferred during page allocation */
+static int ttm_pool_apply_caching(struct ttm_pool_alloc_state *alloc)
+{
+#ifdef CONFIG_X86
+ unsigned int num_pages = alloc->pages - alloc->caching_divide;
+
+ if (!num_pages)
+ return 0;
+
+ switch (alloc->tt_caching) {
+ case ttm_cached:
+ break;
+ case ttm_write_combined:
+ return set_pages_array_wc(alloc->caching_divide, num_pages);
+ case ttm_uncached:
+ return set_pages_array_uc(alloc->caching_divide, num_pages);
+ }
+#endif
+ alloc->caching_divide = alloc->pages;
+ return 0;
+}
+
+/* DMA Map pages of 1 << order size and return the resulting dma_address. */
+static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
+ struct page *p, dma_addr_t *dma_addr)
+{
+ dma_addr_t addr;
+
+ if (ttm_pool_uses_dma_alloc(pool)) {
+ struct ttm_pool_dma *dma = (void *)p->private;
+
+ addr = dma->addr;
+ } else {
+ size_t size = (1ULL << order) * PAGE_SIZE;
+
+ addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(pool->dev, addr))
+ return -EFAULT;
+ }
+
+ *dma_addr = addr;
+
+ return 0;
+}
+
+/* Unmap pages of 1 << order size */
+static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
+ unsigned int num_pages)
+{
+ /* Unmapped while freeing the page */
+ if (ttm_pool_uses_dma_alloc(pool))
+ return;
+
+ dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT,
+ DMA_BIDIRECTIONAL);
+}
+
+/* Give pages into a specific pool_type */
+static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
+{
+ unsigned int i, num_pages = 1 << pt->order;
+
+ for (i = 0; i < num_pages; ++i) {
+ if (PageHighMem(p))
+ clear_highpage(p + i);
+ else
+ clear_page(page_address(p + i));
+ }
+
+ spin_lock(&pt->lock);
+ list_add(&p->lru, &pt->pages);
+ spin_unlock(&pt->lock);
+ atomic_long_add(1 << pt->order, &allocated_pages);
+}
+
+/* Take pages from a specific pool_type, return NULL when nothing available */
+static struct page *ttm_pool_type_take(struct ttm_pool_type *pt)
+{
+ struct page *p;
+
+ spin_lock(&pt->lock);
+ p = list_first_entry_or_null(&pt->pages, typeof(*p), lru);
+ if (p) {
+ atomic_long_sub(1 << pt->order, &allocated_pages);
+ list_del(&p->lru);
+ }
+ spin_unlock(&pt->lock);
+
+ return p;
+}
+
+/* Initialize and add a pool type to the global shrinker list */
+static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
+ enum ttm_caching caching, unsigned int order)
+{
+ pt->pool = pool;
+ pt->caching = caching;
+ pt->order = order;
+ spin_lock_init(&pt->lock);
+ INIT_LIST_HEAD(&pt->pages);
+
+ spin_lock(&shrinker_lock);
+ list_add_tail(&pt->shrinker_list, &shrinker_list);
+ spin_unlock(&shrinker_lock);
+}
+
+/* Remove a pool_type from the global shrinker list and free all pages */
+static void ttm_pool_type_fini(struct ttm_pool_type *pt)
+{
+ struct page *p;
+
+ spin_lock(&shrinker_lock);
+ list_del(&pt->shrinker_list);
+ spin_unlock(&shrinker_lock);
+
+ while ((p = ttm_pool_type_take(pt)))
+ ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
+}
+
+/* Return the pool_type to use for the given caching and order */
+static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
+ enum ttm_caching caching,
+ unsigned int order)
+{
+ if (ttm_pool_uses_dma_alloc(pool))
+ return &pool->caching[caching].orders[order];
+
+#ifdef CONFIG_X86
+ switch (caching) {
+ case ttm_write_combined:
+ if (pool->nid != NUMA_NO_NODE)
+ return &pool->caching[caching].orders[order];
+
+ if (ttm_pool_uses_dma32(pool))
+ return &global_dma32_write_combined[order];
+
+ return &global_write_combined[order];
+ case ttm_uncached:
+ if (pool->nid != NUMA_NO_NODE)
+ return &pool->caching[caching].orders[order];
+
+ if (ttm_pool_uses_dma32(pool))
+ return &global_dma32_uncached[order];
+
+ return &global_uncached[order];
+ default:
+ break;
+ }
+#endif
+
+ return NULL;
+}
+
+/* Free pages using the global shrinker list */
+static unsigned int ttm_pool_shrink(void)
+{
+ struct ttm_pool_type *pt;
+ unsigned int num_pages;
+ struct page *p;
+
+ down_read(&pool_shrink_rwsem);
+ spin_lock(&shrinker_lock);
+ pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
+ list_move_tail(&pt->shrinker_list, &shrinker_list);
+ spin_unlock(&shrinker_lock);
+
+ p = ttm_pool_type_take(pt);
+ if (p) {
+ ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
+ num_pages = 1 << pt->order;
+ } else {
+ num_pages = 0;
+ }
+ up_read(&pool_shrink_rwsem);
+
+ return num_pages;
+}
+
+/* Return the allocation order based for a page */
+static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
+{
+ if (ttm_pool_uses_dma_alloc(pool)) {
+ struct ttm_pool_dma *dma = (void *)p->private;
+
+ return dma->vaddr & ~PAGE_MASK;
+ }
+
+ return p->private;
+}
+
+/*
+ * Split larger pages so that we can free each PAGE_SIZE page as soon
+ * as it has been backed up, in order to avoid memory pressure during
+ * reclaim.
+ */
+static void ttm_pool_split_for_swap(struct ttm_pool *pool, struct page *p)
+{
+ unsigned int order = ttm_pool_page_order(pool, p);
+ pgoff_t nr;
+
+ if (!order)
+ return;
+
+ split_page(p, order);
+ nr = 1UL << order;
+ while (nr--)
+ (p++)->private = 0;
+}
+
+/**
+ * DOC: Partial backup and restoration of a struct ttm_tt.
+ *
+ * Swapout using ttm_backup_backup_page() and swapin using
+ * ttm_backup_copy_page() may fail.
+ * The former most likely due to lack of swap-space or memory, the latter due
+ * to lack of memory or because of signal interruption during waits.
+ *
+ * Backup failure is easily handled by using a ttm_tt pages vector that holds
+ * both backup handles and page pointers. This has to be taken into account when
+ * restoring such a ttm_tt from backup, and when freeing it while backed up.
+ * When restoring, for simplicity, new pages are actually allocated from the
+ * pool and the contents of any old pages are copied in and then the old pages
+ * are released.
+ *
+ * For restoration failures, the struct ttm_pool_tt_restore holds sufficient state
+ * to be able to resume an interrupted restore, and that structure is freed once
+ * the restoration is complete. If the struct ttm_tt is destroyed while there
+ * is a valid struct ttm_pool_tt_restore attached, that is also properly taken
+ * care of.
+ */
+
+/* Is restore ongoing for the currently allocated page? */
+static bool ttm_pool_restore_valid(const struct ttm_pool_tt_restore *restore)
+{
+ return restore && restore->restored_pages < (1 << restore->order);
+}
+
+/* DMA unmap and free a multi-order page, either to the relevant pool or to system. */
+static pgoff_t ttm_pool_unmap_and_free(struct ttm_pool *pool, struct page *page,
+ const dma_addr_t *dma_addr, enum ttm_caching caching)
+{
+ struct ttm_pool_type *pt = NULL;
+ unsigned int order;
+ pgoff_t nr;
+
+ if (pool) {
+ order = ttm_pool_page_order(pool, page);
+ nr = (1UL << order);
+ if (dma_addr)
+ ttm_pool_unmap(pool, *dma_addr, nr);
+
+ pt = ttm_pool_select_type(pool, caching, order);
+ } else {
+ order = page->private;
+ nr = (1UL << order);
+ }
+
+ if (pt)
+ ttm_pool_type_give(pt, page);
+ else
+ ttm_pool_free_page(pool, caching, order, page);
+
+ return nr;
+}
+
+/* Populate the page-array using the most recent allocated multi-order page. */
+static void ttm_pool_allocated_page_commit(struct page *allocated,
+ dma_addr_t first_dma,
+ struct ttm_pool_alloc_state *alloc,
+ pgoff_t nr)
+{
+ pgoff_t i;
+
+ for (i = 0; i < nr; ++i)
+ *alloc->pages++ = allocated++;
+
+ alloc->remaining_pages -= nr;
+
+ if (!alloc->dma_addr)
+ return;
+
+ for (i = 0; i < nr; ++i) {
+ *alloc->dma_addr++ = first_dma;
+ first_dma += PAGE_SIZE;
+ }
+}
+
+/*
+ * When restoring, restore backed-up content to the newly allocated page and
+ * if successful, populate the page-table and dma-address arrays.
+ */
+static int ttm_pool_restore_commit(struct ttm_pool_tt_restore *restore,
+ struct file *backup,
+ const struct ttm_operation_ctx *ctx,
+ struct ttm_pool_alloc_state *alloc)
+
+{
+ pgoff_t i, nr = 1UL << restore->order;
+ struct page **first_page = alloc->pages;
+ struct page *p;
+ int ret = 0;
+
+ for (i = restore->restored_pages; i < nr; ++i) {
+ p = first_page[i];
+ if (ttm_backup_page_ptr_is_handle(p)) {
+ unsigned long handle = ttm_backup_page_ptr_to_handle(p);
+
+ if (IS_ENABLED(CONFIG_FAULT_INJECTION) && ctx->interruptible &&
+ should_fail(&backup_fault_inject, 1)) {
+ ret = -EINTR;
+ break;
+ }
+
+ if (handle == 0) {
+ restore->restored_pages++;
+ continue;
+ }
+
+ ret = ttm_backup_copy_page(backup, restore->alloced_page + i,
+ handle, ctx->interruptible);
+ if (ret)
+ break;
+
+ ttm_backup_drop(backup, handle);
+ } else if (p) {
+ /*
+ * We could probably avoid splitting the old page
+ * using clever logic, but ATM we don't care, as
+ * we prioritize releasing memory ASAP. Note that
+ * here, the old retained page is always write-back
+ * cached.
+ */
+ ttm_pool_split_for_swap(restore->pool, p);
+ copy_highpage(restore->alloced_page + i, p);
+ __free_pages(p, 0);
+ }
+
+ restore->restored_pages++;
+ first_page[i] = ttm_backup_handle_to_page_ptr(0);
+ }
+
+ if (ret) {
+ if (!restore->restored_pages) {
+ dma_addr_t *dma_addr = alloc->dma_addr ? &restore->first_dma : NULL;
+
+ ttm_pool_unmap_and_free(restore->pool, restore->alloced_page,
+ dma_addr, restore->page_caching);
+ restore->restored_pages = nr;
+ }
+ return ret;
+ }
+
+ ttm_pool_allocated_page_commit(restore->alloced_page, restore->first_dma,
+ alloc, nr);
+ if (restore->page_caching == alloc->tt_caching || PageHighMem(restore->alloced_page))
+ alloc->caching_divide = alloc->pages;
+ restore->snapshot_alloc = *alloc;
+ restore->alloced_pages += nr;
+
+ return 0;
+}
+
+/* If restoring, save information needed for ttm_pool_restore_commit(). */
+static void
+ttm_pool_page_allocated_restore(struct ttm_pool *pool, unsigned int order,
+ struct page *p,
+ enum ttm_caching page_caching,
+ dma_addr_t first_dma,
+ struct ttm_pool_tt_restore *restore,
+ const struct ttm_pool_alloc_state *alloc)
+{
+ restore->pool = pool;
+ restore->order = order;
+ restore->restored_pages = 0;
+ restore->page_caching = page_caching;
+ restore->first_dma = first_dma;
+ restore->alloced_page = p;
+ restore->snapshot_alloc = *alloc;
+}
+
+/*
+ * Called when we got a page, either from a pool or newly allocated.
+ * if needed, dma map the page and populate the dma address array.
+ * Populate the page address array.
+ * If the caching is consistent, update any deferred caching. Otherwise
+ * stage this page for an upcoming deferred caching update.
+ */
+static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
+ struct page *p, enum ttm_caching page_caching,
+ struct ttm_pool_alloc_state *alloc,
+ struct ttm_pool_tt_restore *restore)
+{
+ bool caching_consistent;
+ dma_addr_t first_dma;
+ int r = 0;
+
+ caching_consistent = (page_caching == alloc->tt_caching) || PageHighMem(p);
+
+ if (caching_consistent) {
+ r = ttm_pool_apply_caching(alloc);
+ if (r)
+ return r;
+ }
+
+ if (alloc->dma_addr) {
+ r = ttm_pool_map(pool, order, p, &first_dma);
+ if (r)
+ return r;
+ }
+
+ if (restore) {
+ ttm_pool_page_allocated_restore(pool, order, p, page_caching,
+ first_dma, restore, alloc);
+ } else {
+ ttm_pool_allocated_page_commit(p, first_dma, alloc, 1UL << order);
+
+ if (caching_consistent)
+ alloc->caching_divide = alloc->pages;
+ }
+
+ return 0;
+}
+
+/**
+ * ttm_pool_free_range() - Free a range of TTM pages
+ * @pool: The pool used for allocating.
+ * @tt: The struct ttm_tt holding the page pointers.
+ * @caching: The page caching mode used by the range.
+ * @start_page: index for first page to free.
+ * @end_page: index for last page to free + 1.
+ *
+ * During allocation the ttm_tt page-vector may be populated with ranges of
+ * pages with different attributes if allocation hit an error without being
+ * able to completely fulfill the allocation. This function can be used
+ * to free these individual ranges.
+ */
+static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
+ enum ttm_caching caching,
+ pgoff_t start_page, pgoff_t end_page)
+{
+ struct page **pages = &tt->pages[start_page];
+ struct file *backup = tt->backup;
+ pgoff_t i, nr;
+
+ for (i = start_page; i < end_page; i += nr, pages += nr) {
+ struct page *p = *pages;
+
+ nr = 1;
+ if (ttm_backup_page_ptr_is_handle(p)) {
+ unsigned long handle = ttm_backup_page_ptr_to_handle(p);
+
+ if (handle != 0)
+ ttm_backup_drop(backup, handle);
+ } else if (p) {
+ dma_addr_t *dma_addr = tt->dma_address ?
+ tt->dma_address + i : NULL;
+
+ nr = ttm_pool_unmap_and_free(pool, p, dma_addr, caching);
+ }
+ }
+}
+
+static void ttm_pool_alloc_state_init(const struct ttm_tt *tt,
+ struct ttm_pool_alloc_state *alloc)
+{
+ alloc->pages = tt->pages;
+ alloc->caching_divide = tt->pages;
+ alloc->dma_addr = tt->dma_address;
+ alloc->remaining_pages = tt->num_pages;
+ alloc->tt_caching = tt->caching;
+}
+
+/*
+ * Find a suitable allocation order based on highest desired order
+ * and number of remaining pages
+ */
+static unsigned int ttm_pool_alloc_find_order(unsigned int highest,
+ const struct ttm_pool_alloc_state *alloc)
+{
+ return min_t(unsigned int, highest, __fls(alloc->remaining_pages));
+}
+
+static int __ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+ const struct ttm_operation_ctx *ctx,
+ struct ttm_pool_alloc_state *alloc,
+ struct ttm_pool_tt_restore *restore)
+{
+ enum ttm_caching page_caching;
+ gfp_t gfp_flags = GFP_USER;
+ pgoff_t caching_divide;
+ unsigned int order;
+ bool allow_pools;
+ struct page *p;
+ int r;
+
+ WARN_ON(!alloc->remaining_pages || ttm_tt_is_populated(tt));
+ WARN_ON(alloc->dma_addr && !pool->dev);
+
+ if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC)
+ gfp_flags |= __GFP_ZERO;
+
+ if (ctx->gfp_retry_mayfail)
+ gfp_flags |= __GFP_RETRY_MAYFAIL;
+
+ if (ttm_pool_uses_dma32(pool))
+ gfp_flags |= GFP_DMA32;
+ else
+ gfp_flags |= GFP_HIGHUSER;
+
+ page_caching = tt->caching;
+ allow_pools = true;
+ for (order = ttm_pool_alloc_find_order(MAX_PAGE_ORDER, alloc);
+ alloc->remaining_pages;
+ order = ttm_pool_alloc_find_order(order, alloc)) {
+ struct ttm_pool_type *pt;
+
+ /* First, try to allocate a page from a pool if one exists. */
+ p = NULL;
+ pt = ttm_pool_select_type(pool, page_caching, order);
+ if (pt && allow_pools)
+ p = ttm_pool_type_take(pt);
+ /*
+ * If that fails or previously failed, allocate from system.
+ * Note that this also disallows additional pool allocations using
+ * write-back cached pools of the same order. Consider removing
+ * that behaviour.
+ */
+ if (!p) {
+ page_caching = ttm_cached;
+ allow_pools = false;
+ p = ttm_pool_alloc_page(pool, gfp_flags, order);
+ }
+ /* If that fails, lower the order if possible and retry. */
+ if (!p) {
+ if (order) {
+ --order;
+ page_caching = tt->caching;
+ allow_pools = true;
+ continue;
+ }
+ r = -ENOMEM;
+ goto error_free_all;
+ }
+ r = ttm_pool_page_allocated(pool, order, p, page_caching, alloc,
+ restore);
+ if (r)
+ goto error_free_page;
+
+ if (ttm_pool_restore_valid(restore)) {
+ r = ttm_pool_restore_commit(restore, tt->backup, ctx, alloc);
+ if (r)
+ goto error_free_all;
+ }
+ }
+
+ r = ttm_pool_apply_caching(alloc);
+ if (r)
+ goto error_free_all;
+
+ kfree(tt->restore);
+ tt->restore = NULL;
+
+ return 0;
+
+error_free_page:
+ ttm_pool_free_page(pool, page_caching, order, p);
+
+error_free_all:
+ if (tt->restore)
+ return r;
+
+ caching_divide = alloc->caching_divide - tt->pages;
+ ttm_pool_free_range(pool, tt, tt->caching, 0, caching_divide);
+ ttm_pool_free_range(pool, tt, ttm_cached, caching_divide,
+ tt->num_pages - alloc->remaining_pages);
+
+ return r;
+}
+
+/**
+ * ttm_pool_alloc - Fill a ttm_tt object
+ *
+ * @pool: ttm_pool to use
+ * @tt: ttm_tt object to fill
+ * @ctx: operation context
+ *
+ * Fill the ttm_tt object with pages and also make sure to DMA map them when
+ * necessary.
+ *
+ * Returns: 0 on successe, negative error code otherwise.
+ */
+int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+ struct ttm_operation_ctx *ctx)
+{
+ struct ttm_pool_alloc_state alloc;
+
+ if (WARN_ON(ttm_tt_is_backed_up(tt)))
+ return -EINVAL;
+
+ ttm_pool_alloc_state_init(tt, &alloc);
+
+ return __ttm_pool_alloc(pool, tt, ctx, &alloc, NULL);
+}
+EXPORT_SYMBOL(ttm_pool_alloc);
+
+/**
+ * ttm_pool_restore_and_alloc - Fill a ttm_tt, restoring previously backed-up
+ * content.
+ *
+ * @pool: ttm_pool to use
+ * @tt: ttm_tt object to fill
+ * @ctx: operation context
+ *
+ * Fill the ttm_tt object with pages and also make sure to DMA map them when
+ * necessary. Read in backed-up content.
+ *
+ * Returns: 0 on successe, negative error code otherwise.
+ */
+int ttm_pool_restore_and_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+ const struct ttm_operation_ctx *ctx)
+{
+ struct ttm_pool_alloc_state alloc;
+
+ if (WARN_ON(!ttm_tt_is_backed_up(tt)))
+ return -EINVAL;
+
+ if (!tt->restore) {
+ gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
+
+ ttm_pool_alloc_state_init(tt, &alloc);
+ if (ctx->gfp_retry_mayfail)
+ gfp |= __GFP_RETRY_MAYFAIL;
+
+ tt->restore = kzalloc(sizeof(*tt->restore), gfp);
+ if (!tt->restore)
+ return -ENOMEM;
+
+ tt->restore->snapshot_alloc = alloc;
+ tt->restore->pool = pool;
+ tt->restore->restored_pages = 1;
+ } else {
+ struct ttm_pool_tt_restore *restore = tt->restore;
+ int ret;
+
+ alloc = restore->snapshot_alloc;
+ if (ttm_pool_restore_valid(tt->restore)) {
+ ret = ttm_pool_restore_commit(restore, tt->backup, ctx, &alloc);
+ if (ret)
+ return ret;
+ }
+ if (!alloc.remaining_pages)
+ return 0;
+ }
+
+ return __ttm_pool_alloc(pool, tt, ctx, &alloc, tt->restore);
+}
+
+/**
+ * ttm_pool_free - Free the backing pages from a ttm_tt object
+ *
+ * @pool: Pool to give pages back to.
+ * @tt: ttm_tt object to unpopulate
+ *
+ * Give the packing pages back to a pool or free them
+ */
+void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
+{
+ ttm_pool_free_range(pool, tt, tt->caching, 0, tt->num_pages);
+
+ while (atomic_long_read(&allocated_pages) > page_pool_size)
+ ttm_pool_shrink();
+}
+EXPORT_SYMBOL(ttm_pool_free);
+
+/**
+ * ttm_pool_drop_backed_up() - Release content of a swapped-out struct ttm_tt
+ * @tt: The struct ttm_tt.
+ *
+ * Release handles with associated content or any remaining pages of
+ * a backed-up struct ttm_tt.
+ */
+void ttm_pool_drop_backed_up(struct ttm_tt *tt)
+{
+ struct ttm_pool_tt_restore *restore;
+ pgoff_t start_page = 0;
+
+ WARN_ON(!ttm_tt_is_backed_up(tt));
+
+ restore = tt->restore;
+
+ /*
+ * Unmap and free any uncommitted restore page.
+ * any tt page-array backup entries already read back has
+ * been cleared already
+ */
+ if (ttm_pool_restore_valid(restore)) {
+ dma_addr_t *dma_addr = tt->dma_address ? &restore->first_dma : NULL;
+
+ ttm_pool_unmap_and_free(restore->pool, restore->alloced_page,
+ dma_addr, restore->page_caching);
+ restore->restored_pages = 1UL << restore->order;
+ }
+
+ /*
+ * If a restore is ongoing, part of the tt pages may have a
+ * caching different than writeback.
+ */
+ if (restore) {
+ pgoff_t mid = restore->snapshot_alloc.caching_divide - tt->pages;
+
+ start_page = restore->alloced_pages;
+ WARN_ON(mid > start_page);
+ /* Pages that might be dma-mapped and non-cached */
+ ttm_pool_free_range(restore->pool, tt, tt->caching,
+ 0, mid);
+ /* Pages that might be dma-mapped but cached */
+ ttm_pool_free_range(restore->pool, tt, ttm_cached,
+ mid, restore->alloced_pages);
+ kfree(restore);
+ tt->restore = NULL;
+ }
+
+ ttm_pool_free_range(NULL, tt, ttm_cached, start_page, tt->num_pages);
+}
+
+/**
+ * ttm_pool_backup() - Back up or purge a struct ttm_tt
+ * @pool: The pool used when allocating the struct ttm_tt.
+ * @tt: The struct ttm_tt.
+ * @flags: Flags to govern the backup behaviour.
+ *
+ * Back up or purge a struct ttm_tt. If @purge is true, then
+ * all pages will be freed directly to the system rather than to the pool
+ * they were allocated from, making the function behave similarly to
+ * ttm_pool_free(). If @purge is false the pages will be backed up instead,
+ * exchanged for handles.
+ * A subsequent call to ttm_pool_restore_and_alloc() will then read back the content and
+ * a subsequent call to ttm_pool_drop_backed_up() will drop it.
+ * If backup of a page fails for whatever reason, @ttm will still be
+ * partially backed up, retaining those pages for which backup fails.
+ * In that case, this function can be retried, possibly after freeing up
+ * memory resources.
+ *
+ * Return: Number of pages actually backed up or freed, or negative
+ * error code on error.
+ */
+long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *tt,
+ const struct ttm_backup_flags *flags)
+{
+ struct file *backup = tt->backup;
+ struct page *page;
+ unsigned long handle;
+ gfp_t alloc_gfp;
+ gfp_t gfp;
+ int ret = 0;
+ pgoff_t shrunken = 0;
+ pgoff_t i, num_pages;
+
+ if (WARN_ON(ttm_tt_is_backed_up(tt)))
+ return -EINVAL;
+
+ if ((!ttm_backup_bytes_avail() && !flags->purge) ||
+ ttm_pool_uses_dma_alloc(pool) || ttm_tt_is_backed_up(tt))
+ return -EBUSY;
+
+#ifdef CONFIG_X86
+ /* Anything returned to the system needs to be cached. */
+ if (tt->caching != ttm_cached)
+ set_pages_array_wb(tt->pages, tt->num_pages);
+#endif
+
+ if (tt->dma_address || flags->purge) {
+ for (i = 0; i < tt->num_pages; i += num_pages) {
+ unsigned int order;
+
+ page = tt->pages[i];
+ if (unlikely(!page)) {
+ num_pages = 1;
+ continue;
+ }
+
+ order = ttm_pool_page_order(pool, page);
+ num_pages = 1UL << order;
+ if (tt->dma_address)
+ ttm_pool_unmap(pool, tt->dma_address[i],
+ num_pages);
+ if (flags->purge) {
+ shrunken += num_pages;
+ page->private = 0;
+ __free_pages(page, order);
+ memset(tt->pages + i, 0,
+ num_pages * sizeof(*tt->pages));
+ }
+ }
+ }
+
+ if (flags->purge)
+ return shrunken;
+
+ if (ttm_pool_uses_dma32(pool))
+ gfp = GFP_DMA32;
+ else
+ gfp = GFP_HIGHUSER;
+
+ alloc_gfp = GFP_KERNEL | __GFP_HIGH | __GFP_NOWARN | __GFP_RETRY_MAYFAIL;
+
+ num_pages = tt->num_pages;
+
+ /* Pretend doing fault injection by shrinking only half of the pages. */
+ if (IS_ENABLED(CONFIG_FAULT_INJECTION) && should_fail(&backup_fault_inject, 1))
+ num_pages = DIV_ROUND_UP(num_pages, 2);
+
+ for (i = 0; i < num_pages; ++i) {
+ s64 shandle;
+
+ page = tt->pages[i];
+ if (unlikely(!page))
+ continue;
+
+ ttm_pool_split_for_swap(pool, page);
+
+ shandle = ttm_backup_backup_page(backup, page, flags->writeback, i,
+ gfp, alloc_gfp);
+ if (shandle < 0) {
+ /* We allow partially shrunken tts */
+ ret = shandle;
+ break;
+ }
+ handle = shandle;
+ tt->pages[i] = ttm_backup_handle_to_page_ptr(handle);
+ put_page(page);
+ shrunken++;
+ }
+
+ return shrunken ? shrunken : ret;
+}
+
+/**
+ * ttm_pool_init - Initialize a pool
+ *
+ * @pool: the pool to initialize
+ * @dev: device for DMA allocations and mappings
+ * @nid: NUMA node to use for allocations
+ * @alloc_flags: TTM_ALLOCATION_POOL_* flags
+ *
+ * Initialize the pool and its pool types.
+ */
+void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
+ int nid, unsigned int alloc_flags)
+{
+ unsigned int i, j;
+
+ WARN_ON(!dev && ttm_pool_uses_dma_alloc(pool));
+
+ pool->dev = dev;
+ pool->nid = nid;
+ pool->alloc_flags = alloc_flags;
+
+ for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
+ for (j = 0; j < NR_PAGE_ORDERS; ++j) {
+ struct ttm_pool_type *pt;
+
+ /* Initialize only pool types which are actually used */
+ pt = ttm_pool_select_type(pool, i, j);
+ if (pt != &pool->caching[i].orders[j])
+ continue;
+
+ ttm_pool_type_init(pt, pool, i, j);
+ }
+ }
+}
+EXPORT_SYMBOL(ttm_pool_init);
+
+/**
+ * ttm_pool_synchronize_shrinkers - Wait for all running shrinkers to complete.
+ *
+ * This is useful to guarantee that all shrinker invocations have seen an
+ * update, before freeing memory, similar to rcu.
+ */
+static void ttm_pool_synchronize_shrinkers(void)
+{
+ down_write(&pool_shrink_rwsem);
+ up_write(&pool_shrink_rwsem);
+}
+
+/**
+ * ttm_pool_fini - Cleanup a pool
+ *
+ * @pool: the pool to clean up
+ *
+ * Free all pages in the pool and unregister the types from the global
+ * shrinker.
+ */
+void ttm_pool_fini(struct ttm_pool *pool)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
+ for (j = 0; j < NR_PAGE_ORDERS; ++j) {
+ struct ttm_pool_type *pt;
+
+ pt = ttm_pool_select_type(pool, i, j);
+ if (pt != &pool->caching[i].orders[j])
+ continue;
+
+ ttm_pool_type_fini(pt);
+ }
+ }
+
+ /* We removed the pool types from the LRU, but we need to also make sure
+ * that no shrinker is concurrently freeing pages from the pool.
+ */
+ ttm_pool_synchronize_shrinkers();
+}
+EXPORT_SYMBOL(ttm_pool_fini);
+
+/* Free average pool number of pages. */
+#define TTM_SHRINKER_BATCH ((1 << (MAX_PAGE_ORDER / 2)) * NR_PAGE_ORDERS)
+
+static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ unsigned long num_freed = 0;
+
+ do
+ num_freed += ttm_pool_shrink();
+ while (num_freed < sc->nr_to_scan &&
+ atomic_long_read(&allocated_pages));
+
+ sc->nr_scanned = num_freed;
+
+ return num_freed ?: SHRINK_STOP;
+}
+
+/* Return the number of pages available or SHRINK_EMPTY if we have none */
+static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ unsigned long num_pages = atomic_long_read(&allocated_pages);
+
+ return num_pages ? num_pages : SHRINK_EMPTY;
+}
+
+#ifdef CONFIG_DEBUG_FS
+/* Count the number of pages available in a pool_type */
+static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
+{
+ unsigned int count = 0;
+ struct page *p;
+
+ spin_lock(&pt->lock);
+ /* Only used for debugfs, the overhead doesn't matter */
+ list_for_each_entry(p, &pt->pages, lru)
+ ++count;
+ spin_unlock(&pt->lock);
+
+ return count;
+}
+
+/* Print a nice header for the order */
+static void ttm_pool_debugfs_header(struct seq_file *m)
+{
+ unsigned int i;
+
+ seq_puts(m, "\t ");
+ for (i = 0; i < NR_PAGE_ORDERS; ++i)
+ seq_printf(m, " ---%2u---", i);
+ seq_puts(m, "\n");
+}
+
+/* Dump information about the different pool types */
+static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
+ struct seq_file *m)
+{
+ unsigned int i;
+
+ for (i = 0; i < NR_PAGE_ORDERS; ++i)
+ seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
+ seq_puts(m, "\n");
+}
+
+/* Dump the total amount of allocated pages */
+static void ttm_pool_debugfs_footer(struct seq_file *m)
+{
+ seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
+ atomic_long_read(&allocated_pages), page_pool_size);
+}
+
+/* Dump the information for the global pools */
+static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
+{
+ ttm_pool_debugfs_header(m);
+
+ spin_lock(&shrinker_lock);
+ seq_puts(m, "wc\t:");
+ ttm_pool_debugfs_orders(global_write_combined, m);
+ seq_puts(m, "uc\t:");
+ ttm_pool_debugfs_orders(global_uncached, m);
+ seq_puts(m, "wc 32\t:");
+ ttm_pool_debugfs_orders(global_dma32_write_combined, m);
+ seq_puts(m, "uc 32\t:");
+ ttm_pool_debugfs_orders(global_dma32_uncached, m);
+ spin_unlock(&shrinker_lock);
+
+ ttm_pool_debugfs_footer(m);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_globals);
+
+/**
+ * ttm_pool_debugfs - Debugfs dump function for a pool
+ *
+ * @pool: the pool to dump the information for
+ * @m: seq_file to dump to
+ *
+ * Make a debugfs dump with the per pool and global information.
+ */
+int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
+{
+ unsigned int i;
+
+ if (!ttm_pool_uses_dma_alloc(pool) && pool->nid == NUMA_NO_NODE) {
+ seq_puts(m, "unused\n");
+ return 0;
+ }
+
+ ttm_pool_debugfs_header(m);
+
+ spin_lock(&shrinker_lock);
+ for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
+ if (!ttm_pool_select_type(pool, i, 0))
+ continue;
+ if (ttm_pool_uses_dma_alloc(pool))
+ seq_puts(m, "DMA ");
+ else
+ seq_printf(m, "N%d ", pool->nid);
+ switch (i) {
+ case ttm_cached:
+ seq_puts(m, "\t:");
+ break;
+ case ttm_write_combined:
+ seq_puts(m, "wc\t:");
+ break;
+ case ttm_uncached:
+ seq_puts(m, "uc\t:");
+ break;
+ }
+ ttm_pool_debugfs_orders(pool->caching[i].orders, m);
+ }
+ spin_unlock(&shrinker_lock);
+
+ ttm_pool_debugfs_footer(m);
+ return 0;
+}
+EXPORT_SYMBOL(ttm_pool_debugfs);
+
+/* Test the shrinker functions and dump the result */
+static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data)
+{
+ struct shrink_control sc = {
+ .gfp_mask = GFP_NOFS,
+ .nr_to_scan = TTM_SHRINKER_BATCH,
+ };
+ unsigned long count;
+
+ fs_reclaim_acquire(GFP_KERNEL);
+ count = ttm_pool_shrinker_count(mm_shrinker, &sc);
+ seq_printf(m, "%lu/%lu\n", count,
+ ttm_pool_shrinker_scan(mm_shrinker, &sc));
+ fs_reclaim_release(GFP_KERNEL);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_shrink);
+
+#endif
+
+/**
+ * ttm_pool_mgr_init - Initialize globals
+ *
+ * @num_pages: default number of pages
+ *
+ * Initialize the global locks and lists for the MM shrinker.
+ */
+int ttm_pool_mgr_init(unsigned long num_pages)
+{
+ unsigned int i;
+
+ if (!page_pool_size)
+ page_pool_size = num_pages;
+
+ spin_lock_init(&shrinker_lock);
+ INIT_LIST_HEAD(&shrinker_list);
+
+ for (i = 0; i < NR_PAGE_ORDERS; ++i) {
+ ttm_pool_type_init(&global_write_combined[i], NULL,
+ ttm_write_combined, i);
+ ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
+
+ ttm_pool_type_init(&global_dma32_write_combined[i], NULL,
+ ttm_write_combined, i);
+ ttm_pool_type_init(&global_dma32_uncached[i], NULL,
+ ttm_uncached, i);
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ debugfs_create_file("page_pool", 0444, ttm_debugfs_root, NULL,
+ &ttm_pool_debugfs_globals_fops);
+ debugfs_create_file("page_pool_shrink", 0400, ttm_debugfs_root, NULL,
+ &ttm_pool_debugfs_shrink_fops);
+#ifdef CONFIG_FAULT_INJECTION
+ fault_create_debugfs_attr("backup_fault_inject", ttm_debugfs_root,
+ &backup_fault_inject);
+#endif
+#endif
+
+ mm_shrinker = shrinker_alloc(0, "drm-ttm_pool");
+ if (!mm_shrinker)
+ return -ENOMEM;
+
+ mm_shrinker->count_objects = ttm_pool_shrinker_count;
+ mm_shrinker->scan_objects = ttm_pool_shrinker_scan;
+ mm_shrinker->batch = TTM_SHRINKER_BATCH;
+ mm_shrinker->seeks = 1;
+
+ shrinker_register(mm_shrinker);
+
+ return 0;
+}
+
+/**
+ * ttm_pool_mgr_fini - Finalize globals
+ *
+ * Cleanup the global pools and unregister the MM shrinker.
+ */
+void ttm_pool_mgr_fini(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < NR_PAGE_ORDERS; ++i) {
+ ttm_pool_type_fini(&global_write_combined[i]);
+ ttm_pool_type_fini(&global_uncached[i]);
+
+ ttm_pool_type_fini(&global_dma32_write_combined[i]);
+ ttm_pool_type_fini(&global_dma32_uncached[i]);
+ }
+
+ shrinker_free(mm_shrinker);
+ WARN_ON(!list_empty(&shrinker_list));
+}
diff --git a/drivers/gpu/drm/ttm/ttm_pool_internal.h b/drivers/gpu/drm/ttm/ttm_pool_internal.h
new file mode 100644
index 000000000000..82c4b7e56a99
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_pool_internal.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2025 Valve Corporation */
+
+#ifndef _TTM_POOL_INTERNAL_H_
+#define _TTM_POOL_INTERNAL_H_
+
+#include <drm/ttm/ttm_allocation.h>
+#include <drm/ttm/ttm_pool.h>
+
+static inline bool ttm_pool_uses_dma_alloc(struct ttm_pool *pool)
+{
+ return pool->alloc_flags & TTM_ALLOCATION_POOL_USE_DMA_ALLOC;
+}
+
+static inline bool ttm_pool_uses_dma32(struct ttm_pool *pool)
+{
+ return pool->alloc_flags & TTM_ALLOCATION_POOL_USE_DMA32;
+}
+
+static inline bool ttm_pool_beneficial_order(struct ttm_pool *pool)
+{
+ return pool->alloc_flags & 0xff;
+}
+
+#endif
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
new file mode 100644
index 000000000000..db854b581d83
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -0,0 +1,242 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include <drm/ttm/ttm_device.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_range_manager.h>
+#include <drm/ttm/ttm_bo.h>
+#include <drm/drm_mm.h>
+
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+/*
+ * Currently we use a spinlock for the lock, but a mutex *may* be
+ * more appropriate to reduce scheduling latency if the range manager
+ * ends up with very fragmented allocation patterns.
+ */
+
+struct ttm_range_manager {
+ struct ttm_resource_manager manager;
+ struct drm_mm mm;
+ spinlock_t lock;
+};
+
+static inline struct ttm_range_manager *
+to_range_manager(struct ttm_resource_manager *man)
+{
+ return container_of(man, struct ttm_range_manager, manager);
+}
+
+static int ttm_range_man_alloc(struct ttm_resource_manager *man,
+ struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_resource **res)
+{
+ struct ttm_range_manager *rman = to_range_manager(man);
+ struct ttm_range_mgr_node *node;
+ struct drm_mm *mm = &rman->mm;
+ enum drm_mm_insert_mode mode;
+ unsigned long lpfn;
+ int ret;
+
+ lpfn = place->lpfn;
+ if (!lpfn)
+ lpfn = man->size;
+
+ node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ mode = DRM_MM_INSERT_BEST;
+ if (place->flags & TTM_PL_FLAG_TOPDOWN)
+ mode = DRM_MM_INSERT_HIGH;
+
+ ttm_resource_init(bo, place, &node->base);
+
+ spin_lock(&rman->lock);
+ ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
+ PFN_UP(node->base.size),
+ bo->page_alignment, 0,
+ place->fpfn, lpfn, mode);
+ spin_unlock(&rman->lock);
+
+ if (unlikely(ret)) {
+ ttm_resource_fini(man, &node->base);
+ kfree(node);
+ return ret;
+ }
+
+ node->base.start = node->mm_nodes[0].start;
+ *res = &node->base;
+ return 0;
+}
+
+static void ttm_range_man_free(struct ttm_resource_manager *man,
+ struct ttm_resource *res)
+{
+ struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
+ struct ttm_range_manager *rman = to_range_manager(man);
+
+ spin_lock(&rman->lock);
+ drm_mm_remove_node(&node->mm_nodes[0]);
+ spin_unlock(&rman->lock);
+
+ ttm_resource_fini(man, res);
+ kfree(node);
+}
+
+static bool ttm_range_man_intersects(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ struct drm_mm_node *node = &to_ttm_range_mgr_node(res)->mm_nodes[0];
+ u32 num_pages = PFN_UP(size);
+
+ /* Don't evict BOs outside of the requested placement range */
+ if (place->fpfn >= (node->start + num_pages) ||
+ (place->lpfn && place->lpfn <= node->start))
+ return false;
+
+ return true;
+}
+
+static bool ttm_range_man_compatible(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ struct drm_mm_node *node = &to_ttm_range_mgr_node(res)->mm_nodes[0];
+ u32 num_pages = PFN_UP(size);
+
+ if (node->start < place->fpfn ||
+ (place->lpfn && (node->start + num_pages) > place->lpfn))
+ return false;
+
+ return true;
+}
+
+static void ttm_range_man_debug(struct ttm_resource_manager *man,
+ struct drm_printer *printer)
+{
+ struct ttm_range_manager *rman = to_range_manager(man);
+
+ spin_lock(&rman->lock);
+ drm_mm_print(&rman->mm, printer);
+ spin_unlock(&rman->lock);
+}
+
+static const struct ttm_resource_manager_func ttm_range_manager_func = {
+ .alloc = ttm_range_man_alloc,
+ .free = ttm_range_man_free,
+ .intersects = ttm_range_man_intersects,
+ .compatible = ttm_range_man_compatible,
+ .debug = ttm_range_man_debug
+};
+
+/**
+ * ttm_range_man_init_nocheck - Initialise a generic range manager for the
+ * selected memory type.
+ *
+ * @bdev: ttm device
+ * @type: memory manager type
+ * @use_tt: if the memory manager uses tt
+ * @p_size: size of area to be managed in pages.
+ *
+ * The range manager is installed for this device in the type slot.
+ *
+ * Return: %0 on success or a negative error code on failure
+ */
+int ttm_range_man_init_nocheck(struct ttm_device *bdev,
+ unsigned type, bool use_tt,
+ unsigned long p_size)
+{
+ struct ttm_resource_manager *man;
+ struct ttm_range_manager *rman;
+
+ rman = kzalloc(sizeof(*rman), GFP_KERNEL);
+ if (!rman)
+ return -ENOMEM;
+
+ man = &rman->manager;
+ man->use_tt = use_tt;
+
+ man->func = &ttm_range_manager_func;
+
+ ttm_resource_manager_init(man, bdev, p_size);
+
+ drm_mm_init(&rman->mm, 0, p_size);
+ spin_lock_init(&rman->lock);
+
+ ttm_set_driver_manager(bdev, type, &rman->manager);
+ ttm_resource_manager_set_used(man, true);
+ return 0;
+}
+EXPORT_SYMBOL(ttm_range_man_init_nocheck);
+
+/**
+ * ttm_range_man_fini_nocheck - Remove the generic range manager from a slot
+ * and tear it down.
+ *
+ * @bdev: ttm device
+ * @type: memory manager type
+ *
+ * Return: %0 on success or a negative error code on failure
+ */
+int ttm_range_man_fini_nocheck(struct ttm_device *bdev,
+ unsigned type)
+{
+ struct ttm_resource_manager *man = ttm_manager_type(bdev, type);
+ struct ttm_range_manager *rman = to_range_manager(man);
+ struct drm_mm *mm = &rman->mm;
+ int ret;
+
+ if (!man)
+ return 0;
+
+ ttm_resource_manager_set_used(man, false);
+
+ ret = ttm_resource_manager_evict_all(bdev, man);
+ if (ret)
+ return ret;
+
+ spin_lock(&rman->lock);
+ drm_mm_takedown(mm);
+ spin_unlock(&rman->lock);
+
+ ttm_resource_manager_cleanup(man);
+ ttm_set_driver_manager(bdev, type, NULL);
+ kfree(rman);
+ return 0;
+}
+EXPORT_SYMBOL(ttm_range_man_fini_nocheck);
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
new file mode 100644
index 000000000000..f5aa29dc6ec0
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -0,0 +1,945 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/io-mapping.h>
+#include <linux/iosys-map.h>
+#include <linux/scatterlist.h>
+#include <linux/cgroup_dmem.h>
+
+#include <drm/ttm/ttm_bo.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_tt.h>
+
+#include <drm/drm_print.h>
+#include <drm/drm_util.h>
+
+/* Detach the cursor from the bulk move list*/
+static void
+ttm_resource_cursor_clear_bulk(struct ttm_resource_cursor *cursor)
+{
+ lockdep_assert_held(&cursor->man->bdev->lru_lock);
+
+ cursor->bulk = NULL;
+ list_del_init(&cursor->bulk_link);
+}
+
+/* Move the cursor to the end of the bulk move list it's in */
+static void ttm_resource_cursor_move_bulk_tail(struct ttm_lru_bulk_move *bulk,
+ struct ttm_resource_cursor *cursor)
+{
+ struct ttm_lru_bulk_move_pos *pos;
+
+ lockdep_assert_held(&cursor->man->bdev->lru_lock);
+
+ if (WARN_ON_ONCE(bulk != cursor->bulk)) {
+ list_del_init(&cursor->bulk_link);
+ return;
+ }
+
+ pos = &bulk->pos[cursor->mem_type][cursor->priority];
+ if (pos->last)
+ list_move(&cursor->hitch.link, &pos->last->lru.link);
+ ttm_resource_cursor_clear_bulk(cursor);
+}
+
+/* Move all cursors attached to a bulk move to its end */
+static void ttm_bulk_move_adjust_cursors(struct ttm_lru_bulk_move *bulk)
+{
+ struct ttm_resource_cursor *cursor, *next;
+
+ list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link)
+ ttm_resource_cursor_move_bulk_tail(bulk, cursor);
+}
+
+/* Remove a cursor from an empty bulk move list */
+static void ttm_bulk_move_drop_cursors(struct ttm_lru_bulk_move *bulk)
+{
+ struct ttm_resource_cursor *cursor, *next;
+
+ list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link)
+ ttm_resource_cursor_clear_bulk(cursor);
+}
+
+/**
+ * ttm_resource_cursor_init() - Initialize a struct ttm_resource_cursor
+ * @cursor: The cursor to initialize.
+ * @man: The resource manager.
+ *
+ * Initialize the cursor before using it for iteration.
+ */
+void ttm_resource_cursor_init(struct ttm_resource_cursor *cursor,
+ struct ttm_resource_manager *man)
+{
+ cursor->priority = 0;
+ cursor->man = man;
+ ttm_lru_item_init(&cursor->hitch, TTM_LRU_HITCH);
+ INIT_LIST_HEAD(&cursor->bulk_link);
+ INIT_LIST_HEAD(&cursor->hitch.link);
+}
+
+/**
+ * ttm_resource_cursor_fini() - Finalize the LRU list cursor usage
+ * @cursor: The struct ttm_resource_cursor to finalize.
+ *
+ * The function pulls the LRU list cursor off any lists it was previusly
+ * attached to. Needs to be called with the LRU lock held. The function
+ * can be called multiple times after eachother.
+ */
+void ttm_resource_cursor_fini(struct ttm_resource_cursor *cursor)
+{
+ lockdep_assert_held(&cursor->man->bdev->lru_lock);
+ list_del_init(&cursor->hitch.link);
+ ttm_resource_cursor_clear_bulk(cursor);
+}
+
+/**
+ * ttm_lru_bulk_move_init - initialize a bulk move structure
+ * @bulk: the structure to init
+ *
+ * For now just memset the structure to zero.
+ */
+void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk)
+{
+ memset(bulk, 0, sizeof(*bulk));
+ INIT_LIST_HEAD(&bulk->cursor_list);
+}
+EXPORT_SYMBOL(ttm_lru_bulk_move_init);
+
+/**
+ * ttm_lru_bulk_move_fini - finalize a bulk move structure
+ * @bdev: The struct ttm_device
+ * @bulk: the structure to finalize
+ *
+ * Sanity checks that bulk moves don't have any
+ * resources left and hence no cursors attached.
+ */
+void ttm_lru_bulk_move_fini(struct ttm_device *bdev,
+ struct ttm_lru_bulk_move *bulk)
+{
+ spin_lock(&bdev->lru_lock);
+ ttm_bulk_move_drop_cursors(bulk);
+ spin_unlock(&bdev->lru_lock);
+}
+EXPORT_SYMBOL(ttm_lru_bulk_move_fini);
+
+/**
+ * ttm_lru_bulk_move_tail - bulk move range of resources to the LRU tail.
+ *
+ * @bulk: bulk move structure
+ *
+ * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that
+ * resource order never changes. Should be called with &ttm_device.lru_lock held.
+ */
+void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk)
+{
+ unsigned i, j;
+
+ ttm_bulk_move_adjust_cursors(bulk);
+ for (i = 0; i < TTM_NUM_MEM_TYPES; ++i) {
+ for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
+ struct ttm_lru_bulk_move_pos *pos = &bulk->pos[i][j];
+ struct ttm_resource_manager *man;
+
+ if (!pos->first)
+ continue;
+
+ lockdep_assert_held(&pos->first->bo->bdev->lru_lock);
+ dma_resv_assert_held(pos->first->bo->base.resv);
+ dma_resv_assert_held(pos->last->bo->base.resv);
+
+ man = ttm_manager_type(pos->first->bo->bdev, i);
+ list_bulk_move_tail(&man->lru[j], &pos->first->lru.link,
+ &pos->last->lru.link);
+ }
+ }
+}
+EXPORT_SYMBOL(ttm_lru_bulk_move_tail);
+
+/* Return the bulk move pos object for this resource */
+static struct ttm_lru_bulk_move_pos *
+ttm_lru_bulk_move_pos(struct ttm_lru_bulk_move *bulk, struct ttm_resource *res)
+{
+ return &bulk->pos[res->mem_type][res->bo->priority];
+}
+
+/* Return the previous resource on the list (skip over non-resource list items) */
+static struct ttm_resource *ttm_lru_prev_res(struct ttm_resource *cur)
+{
+ struct ttm_lru_item *lru = &cur->lru;
+
+ do {
+ lru = list_prev_entry(lru, link);
+ } while (!ttm_lru_item_is_res(lru));
+
+ return ttm_lru_item_to_res(lru);
+}
+
+/* Return the next resource on the list (skip over non-resource list items) */
+static struct ttm_resource *ttm_lru_next_res(struct ttm_resource *cur)
+{
+ struct ttm_lru_item *lru = &cur->lru;
+
+ do {
+ lru = list_next_entry(lru, link);
+ } while (!ttm_lru_item_is_res(lru));
+
+ return ttm_lru_item_to_res(lru);
+}
+
+/* Move the resource to the tail of the bulk move range */
+static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos,
+ struct ttm_resource *res)
+{
+ if (pos->last != res) {
+ if (pos->first == res)
+ pos->first = ttm_lru_next_res(res);
+ list_move(&res->lru.link, &pos->last->lru.link);
+ pos->last = res;
+ }
+}
+
+/* Add the resource to a bulk_move cursor */
+static void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk,
+ struct ttm_resource *res)
+{
+ struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res);
+
+ if (!pos->first) {
+ pos->first = res;
+ pos->last = res;
+ } else {
+ WARN_ON(pos->first->bo->base.resv != res->bo->base.resv);
+ ttm_lru_bulk_move_pos_tail(pos, res);
+ }
+}
+
+/* Remove the resource from a bulk_move range */
+static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
+ struct ttm_resource *res)
+{
+ struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res);
+
+ if (unlikely(WARN_ON(!pos->first || !pos->last) ||
+ (pos->first == res && pos->last == res))) {
+ pos->first = NULL;
+ pos->last = NULL;
+ } else if (pos->first == res) {
+ pos->first = ttm_lru_next_res(res);
+ } else if (pos->last == res) {
+ pos->last = ttm_lru_prev_res(res);
+ } else {
+ list_move(&res->lru.link, &pos->last->lru.link);
+ }
+}
+
+static bool ttm_resource_is_swapped(struct ttm_resource *res, struct ttm_buffer_object *bo)
+{
+ /*
+ * Take care when creating a new resource for a bo, that it is not considered
+ * swapped if it's not the current resource for the bo and is thus logically
+ * associated with the ttm_tt. Think a VRAM resource created to move a
+ * swapped-out bo to VRAM.
+ */
+ if (bo->resource != res || !bo->ttm)
+ return false;
+
+ dma_resv_assert_held(bo->base.resv);
+ return ttm_tt_is_swapped(bo->ttm);
+}
+
+static bool ttm_resource_unevictable(struct ttm_resource *res, struct ttm_buffer_object *bo)
+{
+ return bo->pin_count || ttm_resource_is_swapped(res, bo);
+}
+
+/* Add the resource to a bulk move if the BO is configured for it */
+void ttm_resource_add_bulk_move(struct ttm_resource *res,
+ struct ttm_buffer_object *bo)
+{
+ if (bo->bulk_move && !ttm_resource_unevictable(res, bo))
+ ttm_lru_bulk_move_add(bo->bulk_move, res);
+}
+
+/* Remove the resource from a bulk move if the BO is configured for it */
+void ttm_resource_del_bulk_move(struct ttm_resource *res,
+ struct ttm_buffer_object *bo)
+{
+ if (bo->bulk_move && !ttm_resource_unevictable(res, bo))
+ ttm_lru_bulk_move_del(bo->bulk_move, res);
+}
+
+/* Move a resource to the LRU or bulk tail */
+void ttm_resource_move_to_lru_tail(struct ttm_resource *res)
+{
+ struct ttm_buffer_object *bo = res->bo;
+ struct ttm_device *bdev = bo->bdev;
+
+ lockdep_assert_held(&bo->bdev->lru_lock);
+
+ if (ttm_resource_unevictable(res, bo)) {
+ list_move_tail(&res->lru.link, &bdev->unevictable);
+
+ } else if (bo->bulk_move) {
+ struct ttm_lru_bulk_move_pos *pos =
+ ttm_lru_bulk_move_pos(bo->bulk_move, res);
+
+ ttm_lru_bulk_move_pos_tail(pos, res);
+ } else {
+ struct ttm_resource_manager *man;
+
+ man = ttm_manager_type(bdev, res->mem_type);
+ list_move_tail(&res->lru.link, &man->lru[bo->priority]);
+ }
+}
+
+/**
+ * ttm_resource_init - resource object constructure
+ * @bo: buffer object this resources is allocated for
+ * @place: placement of the resource
+ * @res: the resource object to inistilize
+ *
+ * Initialize a new resource object. Counterpart of ttm_resource_fini().
+ */
+void ttm_resource_init(struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_resource *res)
+{
+ struct ttm_resource_manager *man;
+
+ res->start = 0;
+ res->size = bo->base.size;
+ res->mem_type = place->mem_type;
+ res->placement = place->flags;
+ res->bus.addr = NULL;
+ res->bus.offset = 0;
+ res->bus.is_iomem = false;
+ res->bus.caching = ttm_cached;
+ res->bo = bo;
+
+ man = ttm_manager_type(bo->bdev, place->mem_type);
+ spin_lock(&bo->bdev->lru_lock);
+ if (ttm_resource_unevictable(res, bo))
+ list_add_tail(&res->lru.link, &bo->bdev->unevictable);
+ else
+ list_add_tail(&res->lru.link, &man->lru[bo->priority]);
+ man->usage += res->size;
+ spin_unlock(&bo->bdev->lru_lock);
+}
+EXPORT_SYMBOL(ttm_resource_init);
+
+/**
+ * ttm_resource_fini - resource destructor
+ * @man: the resource manager this resource belongs to
+ * @res: the resource to clean up
+ *
+ * Should be used by resource manager backends to clean up the TTM resource
+ * objects before freeing the underlying structure. Makes sure the resource is
+ * removed from the LRU before destruction.
+ * Counterpart of ttm_resource_init().
+ */
+void ttm_resource_fini(struct ttm_resource_manager *man,
+ struct ttm_resource *res)
+{
+ struct ttm_device *bdev = man->bdev;
+
+ spin_lock(&bdev->lru_lock);
+ list_del_init(&res->lru.link);
+ man->usage -= res->size;
+ spin_unlock(&bdev->lru_lock);
+}
+EXPORT_SYMBOL(ttm_resource_fini);
+
+int ttm_resource_alloc(struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_resource **res_ptr,
+ struct dmem_cgroup_pool_state **ret_limit_pool)
+{
+ struct ttm_resource_manager *man =
+ ttm_manager_type(bo->bdev, place->mem_type);
+ struct dmem_cgroup_pool_state *pool = NULL;
+ int ret;
+
+ if (man->cg) {
+ ret = dmem_cgroup_try_charge(man->cg, bo->base.size, &pool, ret_limit_pool);
+ if (ret)
+ return ret;
+ }
+
+ ret = man->func->alloc(man, bo, place, res_ptr);
+ if (ret) {
+ if (pool)
+ dmem_cgroup_uncharge(pool, bo->base.size);
+ return ret;
+ }
+
+ (*res_ptr)->css = pool;
+
+ spin_lock(&bo->bdev->lru_lock);
+ ttm_resource_add_bulk_move(*res_ptr, bo);
+ spin_unlock(&bo->bdev->lru_lock);
+ return 0;
+}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_resource_alloc);
+
+void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
+{
+ struct ttm_resource_manager *man;
+ struct dmem_cgroup_pool_state *pool;
+
+ if (!*res)
+ return;
+
+ spin_lock(&bo->bdev->lru_lock);
+ ttm_resource_del_bulk_move(*res, bo);
+ spin_unlock(&bo->bdev->lru_lock);
+
+ pool = (*res)->css;
+ man = ttm_manager_type(bo->bdev, (*res)->mem_type);
+ man->func->free(man, *res);
+ *res = NULL;
+ if (man->cg)
+ dmem_cgroup_uncharge(pool, bo->base.size);
+}
+EXPORT_SYMBOL(ttm_resource_free);
+
+/**
+ * ttm_resource_intersects - test for intersection
+ *
+ * @bdev: TTM device structure
+ * @res: The resource to test
+ * @place: The placement to test
+ * @size: How many bytes the new allocation needs.
+ *
+ * Test if @res intersects with @place and @size. Used for testing if evictions
+ * are valueable or not.
+ *
+ * Returns true if the res placement intersects with @place and @size.
+ */
+bool ttm_resource_intersects(struct ttm_device *bdev,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ struct ttm_resource_manager *man;
+
+ if (!res)
+ return false;
+
+ man = ttm_manager_type(bdev, res->mem_type);
+ if (!place || !man->func->intersects)
+ return true;
+
+ return man->func->intersects(man, res, place, size);
+}
+
+/**
+ * ttm_resource_compatible - check if resource is compatible with placement
+ *
+ * @res: the resource to check
+ * @placement: the placement to check against
+ * @evicting: true if the caller is doing evictions
+ *
+ * Returns true if the placement is compatible.
+ */
+bool ttm_resource_compatible(struct ttm_resource *res,
+ struct ttm_placement *placement,
+ bool evicting)
+{
+ struct ttm_buffer_object *bo = res->bo;
+ struct ttm_device *bdev = bo->bdev;
+ unsigned i;
+
+ if (res->placement & TTM_PL_FLAG_TEMPORARY)
+ return false;
+
+ for (i = 0; i < placement->num_placement; i++) {
+ const struct ttm_place *place = &placement->placement[i];
+ struct ttm_resource_manager *man;
+
+ if (res->mem_type != place->mem_type)
+ continue;
+
+ if (place->flags & (evicting ? TTM_PL_FLAG_DESIRED :
+ TTM_PL_FLAG_FALLBACK))
+ continue;
+
+ if (place->flags & TTM_PL_FLAG_CONTIGUOUS &&
+ !(res->placement & TTM_PL_FLAG_CONTIGUOUS))
+ continue;
+
+ man = ttm_manager_type(bdev, res->mem_type);
+ if (man->func->compatible &&
+ !man->func->compatible(man, res, place, bo->base.size))
+ continue;
+
+ return true;
+ }
+ return false;
+}
+
+void ttm_resource_set_bo(struct ttm_resource *res,
+ struct ttm_buffer_object *bo)
+{
+ spin_lock(&bo->bdev->lru_lock);
+ res->bo = bo;
+ spin_unlock(&bo->bdev->lru_lock);
+}
+
+/**
+ * ttm_resource_manager_init
+ *
+ * @man: memory manager object to init
+ * @bdev: ttm device this manager belongs to
+ * @size: size of managed resources in arbitrary units
+ *
+ * Initialise core parts of a manager object.
+ */
+void ttm_resource_manager_init(struct ttm_resource_manager *man,
+ struct ttm_device *bdev,
+ uint64_t size)
+{
+ unsigned i;
+
+ man->bdev = bdev;
+ man->size = size;
+ man->usage = 0;
+
+ for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
+ INIT_LIST_HEAD(&man->lru[i]);
+ spin_lock_init(&man->eviction_lock);
+ for (i = 0; i < TTM_NUM_MOVE_FENCES; i++)
+ man->eviction_fences[i] = NULL;
+}
+EXPORT_SYMBOL(ttm_resource_manager_init);
+
+/*
+ * ttm_resource_manager_evict_all
+ *
+ * @bdev - device to use
+ * @man - manager to use
+ *
+ * Evict all the objects out of a memory manager until it is empty.
+ * Part of memory manager cleanup sequence.
+ */
+int ttm_resource_manager_evict_all(struct ttm_device *bdev,
+ struct ttm_resource_manager *man)
+{
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false,
+ };
+ struct dma_fence *fence;
+ int ret, i;
+
+ do {
+ ret = ttm_bo_evict_first(bdev, man, &ctx);
+ cond_resched();
+ } while (!ret);
+
+ if (ret && ret != -ENOENT)
+ return ret;
+
+ ret = 0;
+
+ spin_lock(&man->eviction_lock);
+ for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) {
+ fence = man->eviction_fences[i];
+ if (fence && !dma_fence_is_signaled(fence)) {
+ dma_fence_get(fence);
+ spin_unlock(&man->eviction_lock);
+ ret = dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+ if (ret)
+ return ret;
+ spin_lock(&man->eviction_lock);
+ }
+ }
+ spin_unlock(&man->eviction_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(ttm_resource_manager_evict_all);
+
+/**
+ * ttm_resource_manager_usage
+ *
+ * @man: A memory manager object.
+ *
+ * Return how many resources are currently used.
+ */
+uint64_t ttm_resource_manager_usage(struct ttm_resource_manager *man)
+{
+ uint64_t usage;
+
+ if (WARN_ON_ONCE(!man->bdev))
+ return 0;
+
+ spin_lock(&man->bdev->lru_lock);
+ usage = man->usage;
+ spin_unlock(&man->bdev->lru_lock);
+ return usage;
+}
+EXPORT_SYMBOL(ttm_resource_manager_usage);
+
+/**
+ * ttm_resource_manager_debug
+ *
+ * @man: manager type to dump.
+ * @p: printer to use for debug.
+ */
+void ttm_resource_manager_debug(struct ttm_resource_manager *man,
+ struct drm_printer *p)
+{
+ drm_printf(p, " use_type: %d\n", man->use_type);
+ drm_printf(p, " use_tt: %d\n", man->use_tt);
+ drm_printf(p, " size: %llu\n", man->size);
+ drm_printf(p, " usage: %llu\n", ttm_resource_manager_usage(man));
+ if (man->func->debug)
+ man->func->debug(man, p);
+}
+EXPORT_SYMBOL(ttm_resource_manager_debug);
+
+static void
+ttm_resource_cursor_check_bulk(struct ttm_resource_cursor *cursor,
+ struct ttm_lru_item *next_lru)
+{
+ struct ttm_resource *next = ttm_lru_item_to_res(next_lru);
+ struct ttm_lru_bulk_move *bulk = NULL;
+ struct ttm_buffer_object *bo = next->bo;
+
+ lockdep_assert_held(&cursor->man->bdev->lru_lock);
+ bulk = bo->bulk_move;
+
+ if (cursor->bulk != bulk) {
+ if (bulk) {
+ list_move_tail(&cursor->bulk_link, &bulk->cursor_list);
+ cursor->mem_type = next->mem_type;
+ } else {
+ list_del_init(&cursor->bulk_link);
+ }
+ cursor->bulk = bulk;
+ }
+}
+
+/**
+ * ttm_resource_manager_first() - Start iterating over the resources
+ * of a resource manager
+ * @cursor: cursor to record the position
+ *
+ * Initializes the cursor and starts iterating. When done iterating,
+ * the caller must explicitly call ttm_resource_cursor_fini().
+ *
+ * Return: The first resource from the resource manager.
+ */
+struct ttm_resource *
+ttm_resource_manager_first(struct ttm_resource_cursor *cursor)
+{
+ struct ttm_resource_manager *man = cursor->man;
+
+ if (WARN_ON_ONCE(!man))
+ return NULL;
+
+ lockdep_assert_held(&man->bdev->lru_lock);
+
+ list_move(&cursor->hitch.link, &man->lru[cursor->priority]);
+ return ttm_resource_manager_next(cursor);
+}
+
+/**
+ * ttm_resource_manager_next() - Continue iterating over the resource manager
+ * resources
+ * @cursor: cursor to record the position
+ *
+ * Return: the next resource from the resource manager.
+ */
+struct ttm_resource *
+ttm_resource_manager_next(struct ttm_resource_cursor *cursor)
+{
+ struct ttm_resource_manager *man = cursor->man;
+ struct ttm_lru_item *lru;
+
+ lockdep_assert_held(&man->bdev->lru_lock);
+
+ for (;;) {
+ lru = &cursor->hitch;
+ list_for_each_entry_continue(lru, &man->lru[cursor->priority], link) {
+ if (ttm_lru_item_is_res(lru)) {
+ ttm_resource_cursor_check_bulk(cursor, lru);
+ list_move(&cursor->hitch.link, &lru->link);
+ return ttm_lru_item_to_res(lru);
+ }
+ }
+
+ if (++cursor->priority >= TTM_MAX_BO_PRIORITY)
+ break;
+
+ list_move(&cursor->hitch.link, &man->lru[cursor->priority]);
+ ttm_resource_cursor_clear_bulk(cursor);
+ }
+
+ return NULL;
+}
+
+/**
+ * ttm_lru_first_res_or_null() - Return the first resource on an lru list
+ * @head: The list head of the lru list.
+ *
+ * Return: Pointer to the first resource on the lru list or NULL if
+ * there is none.
+ */
+struct ttm_resource *ttm_lru_first_res_or_null(struct list_head *head)
+{
+ struct ttm_lru_item *lru;
+
+ list_for_each_entry(lru, head, link) {
+ if (ttm_lru_item_is_res(lru))
+ return ttm_lru_item_to_res(lru);
+ }
+
+ return NULL;
+}
+
+static void ttm_kmap_iter_iomap_map_local(struct ttm_kmap_iter *iter,
+ struct iosys_map *dmap,
+ pgoff_t i)
+{
+ struct ttm_kmap_iter_iomap *iter_io =
+ container_of(iter, typeof(*iter_io), base);
+ void __iomem *addr;
+
+retry:
+ while (i >= iter_io->cache.end) {
+ iter_io->cache.sg = iter_io->cache.sg ?
+ sg_next(iter_io->cache.sg) : iter_io->st->sgl;
+ iter_io->cache.i = iter_io->cache.end;
+ iter_io->cache.end += sg_dma_len(iter_io->cache.sg) >>
+ PAGE_SHIFT;
+ iter_io->cache.offs = sg_dma_address(iter_io->cache.sg) -
+ iter_io->start;
+ }
+
+ if (i < iter_io->cache.i) {
+ iter_io->cache.end = 0;
+ iter_io->cache.sg = NULL;
+ goto retry;
+ }
+
+ addr = io_mapping_map_local_wc(iter_io->iomap, iter_io->cache.offs +
+ (((resource_size_t)i - iter_io->cache.i)
+ << PAGE_SHIFT));
+ iosys_map_set_vaddr_iomem(dmap, addr);
+}
+
+static void ttm_kmap_iter_iomap_unmap_local(struct ttm_kmap_iter *iter,
+ struct iosys_map *map)
+{
+ io_mapping_unmap_local(map->vaddr_iomem);
+}
+
+static const struct ttm_kmap_iter_ops ttm_kmap_iter_io_ops = {
+ .map_local = ttm_kmap_iter_iomap_map_local,
+ .unmap_local = ttm_kmap_iter_iomap_unmap_local,
+ .maps_tt = false,
+};
+
+/**
+ * ttm_kmap_iter_iomap_init - Initialize a struct ttm_kmap_iter_iomap
+ * @iter_io: The struct ttm_kmap_iter_iomap to initialize.
+ * @iomap: The struct io_mapping representing the underlying linear io_memory.
+ * @st: sg_table into @iomap, representing the memory of the struct
+ * ttm_resource.
+ * @start: Offset that needs to be subtracted from @st to make
+ * sg_dma_address(st->sgl) - @start == 0 for @iomap start.
+ *
+ * Return: Pointer to the embedded struct ttm_kmap_iter.
+ */
+struct ttm_kmap_iter *
+ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
+ struct io_mapping *iomap,
+ struct sg_table *st,
+ resource_size_t start)
+{
+ iter_io->base.ops = &ttm_kmap_iter_io_ops;
+ iter_io->iomap = iomap;
+ iter_io->st = st;
+ iter_io->start = start;
+ memset(&iter_io->cache, 0, sizeof(iter_io->cache));
+
+ return &iter_io->base;
+}
+EXPORT_SYMBOL(ttm_kmap_iter_iomap_init);
+
+/**
+ * DOC: Linear io iterator
+ *
+ * This code should die in the not too near future. Best would be if we could
+ * make io-mapping use memremap for all io memory, and have memremap
+ * implement a kmap_local functionality. We could then strip a huge amount of
+ * code. These linear io iterators are implemented to mimic old functionality,
+ * and they don't use kmap_local semantics at all internally. Rather ioremap or
+ * friends, and at least on 32-bit they add global TLB flushes and points
+ * of failure.
+ */
+
+static void ttm_kmap_iter_linear_io_map_local(struct ttm_kmap_iter *iter,
+ struct iosys_map *dmap,
+ pgoff_t i)
+{
+ struct ttm_kmap_iter_linear_io *iter_io =
+ container_of(iter, typeof(*iter_io), base);
+
+ *dmap = iter_io->dmap;
+ iosys_map_incr(dmap, i * PAGE_SIZE);
+}
+
+static const struct ttm_kmap_iter_ops ttm_kmap_iter_linear_io_ops = {
+ .map_local = ttm_kmap_iter_linear_io_map_local,
+ .maps_tt = false,
+};
+
+/**
+ * ttm_kmap_iter_linear_io_init - Initialize an iterator for linear io memory
+ * @iter_io: The iterator to initialize
+ * @bdev: The TTM device
+ * @mem: The ttm resource representing the iomap.
+ *
+ * This function is for internal TTM use only. It sets up a memcpy kmap iterator
+ * pointing at a linear chunk of io memory.
+ *
+ * Return: A pointer to the embedded struct ttm_kmap_iter or error pointer on
+ * failure.
+ */
+struct ttm_kmap_iter *
+ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io,
+ struct ttm_device *bdev,
+ struct ttm_resource *mem)
+{
+ int ret;
+
+ ret = ttm_mem_io_reserve(bdev, mem);
+ if (ret)
+ goto out_err;
+ if (!mem->bus.is_iomem) {
+ ret = -EINVAL;
+ goto out_io_free;
+ }
+
+ if (mem->bus.addr) {
+ iosys_map_set_vaddr(&iter_io->dmap, mem->bus.addr);
+ iter_io->needs_unmap = false;
+ } else {
+ iter_io->needs_unmap = true;
+ memset(&iter_io->dmap, 0, sizeof(iter_io->dmap));
+ if (mem->bus.caching == ttm_write_combined)
+ iosys_map_set_vaddr_iomem(&iter_io->dmap,
+ ioremap_wc(mem->bus.offset,
+ mem->size));
+ else if (mem->bus.caching == ttm_cached)
+ iosys_map_set_vaddr(&iter_io->dmap,
+ memremap(mem->bus.offset, mem->size,
+ MEMREMAP_WB |
+ MEMREMAP_WT |
+ MEMREMAP_WC));
+
+ /* If uncached requested or if mapping cached or wc failed */
+ if (iosys_map_is_null(&iter_io->dmap))
+ iosys_map_set_vaddr_iomem(&iter_io->dmap,
+ ioremap(mem->bus.offset,
+ mem->size));
+
+ if (iosys_map_is_null(&iter_io->dmap)) {
+ ret = -ENOMEM;
+ goto out_io_free;
+ }
+ }
+
+ iter_io->base.ops = &ttm_kmap_iter_linear_io_ops;
+ return &iter_io->base;
+
+out_io_free:
+ ttm_mem_io_free(bdev, mem);
+out_err:
+ return ERR_PTR(ret);
+}
+
+/**
+ * ttm_kmap_iter_linear_io_fini - Clean up an iterator for linear io memory
+ * @iter_io: The iterator to initialize
+ * @bdev: The TTM device
+ * @mem: The ttm resource representing the iomap.
+ *
+ * This function is for internal TTM use only. It cleans up a memcpy kmap
+ * iterator initialized by ttm_kmap_iter_linear_io_init.
+ */
+void
+ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io,
+ struct ttm_device *bdev,
+ struct ttm_resource *mem)
+{
+ if (iter_io->needs_unmap && iosys_map_is_set(&iter_io->dmap)) {
+ if (iter_io->dmap.is_iomem)
+ iounmap(iter_io->dmap.vaddr_iomem);
+ else
+ memunmap(iter_io->dmap.vaddr);
+ }
+
+ ttm_mem_io_free(bdev, mem);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int ttm_resource_manager_show(struct seq_file *m, void *unused)
+{
+ struct ttm_resource_manager *man =
+ (struct ttm_resource_manager *)m->private;
+ struct drm_printer p = drm_seq_file_printer(m);
+ ttm_resource_manager_debug(man, &p);
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(ttm_resource_manager);
+
+#endif
+
+/**
+ * ttm_resource_manager_create_debugfs - Create debugfs entry for specified
+ * resource manager.
+ * @man: The TTM resource manager for which the debugfs stats file be creates
+ * @parent: debugfs directory in which the file will reside
+ * @name: The filename to create.
+ *
+ * This function setups up a debugfs file that can be used to look
+ * at debug statistics of the specified ttm_resource_manager.
+ */
+void ttm_resource_manager_create_debugfs(struct ttm_resource_manager *man,
+ struct dentry * parent,
+ const char *name)
+{
+#if defined(CONFIG_DEBUG_FS)
+ debugfs_create_file(name, 0444, parent, man, &ttm_resource_manager_fops);
+#endif
+}
+EXPORT_SYMBOL(ttm_resource_manager_create_debugfs);
diff --git a/drivers/gpu/drm/ttm/ttm_sys_manager.c b/drivers/gpu/drm/ttm/ttm_sys_manager.c
new file mode 100644
index 000000000000..2ced169513cb
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_sys_manager.c
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_device.h>
+#include <drm/ttm/ttm_placement.h>
+#include <linux/slab.h>
+
+#include "ttm_module.h"
+
+static int ttm_sys_man_alloc(struct ttm_resource_manager *man,
+ struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_resource **res)
+{
+ *res = kzalloc(sizeof(**res), GFP_KERNEL);
+ if (!*res)
+ return -ENOMEM;
+
+ ttm_resource_init(bo, place, *res);
+ return 0;
+}
+
+static void ttm_sys_man_free(struct ttm_resource_manager *man,
+ struct ttm_resource *res)
+{
+ ttm_resource_fini(man, res);
+ kfree(res);
+}
+
+static const struct ttm_resource_manager_func ttm_sys_manager_func = {
+ .alloc = ttm_sys_man_alloc,
+ .free = ttm_sys_man_free,
+};
+
+void ttm_sys_man_init(struct ttm_device *bdev)
+{
+ struct ttm_resource_manager *man = &bdev->sysman;
+
+ /*
+ * Initialize the system memory buffer type.
+ * Other types need to be driver / IOCTL initialized.
+ */
+ man->use_tt = true;
+ man->func = &ttm_sys_manager_func;
+
+ ttm_resource_manager_init(man, bdev, 0);
+ ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, man);
+ ttm_resource_manager_set_used(man, true);
+}
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 5e93a52d4f2c..611d20ab966d 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
@@ -30,174 +31,149 @@
#define pr_fmt(fmt) "[TTM] " fmt
+#include <linux/cc_platform.h>
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/file.h>
+#include <linux/module.h>
#include <linux/sched.h>
-#include <linux/highmem.h>
-#include <linux/pagemap.h>
#include <linux/shmem_fs.h>
-#include <linux/file.h>
-#include <linux/swap.h>
-#include <linux/slab.h>
-#include <linux/export.h>
#include <drm/drm_cache.h>
-#include <drm/drm_mem_util.h>
-#include <drm/ttm/ttm_module.h>
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_page_alloc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include <drm/drm_util.h>
+#include <drm/ttm/ttm_backup.h>
+#include <drm/ttm/ttm_bo.h>
+#include <drm/ttm/ttm_tt.h>
-/**
- * Allocates storage for pointers to the pages that back the ttm.
- */
-static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
-{
- ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*));
-}
+#include "ttm_module.h"
+#include "ttm_pool_internal.h"
-static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
-{
- ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*));
- ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages,
- sizeof(*ttm->dma_address));
-}
+static unsigned long ttm_pages_limit;
-#ifdef CONFIG_X86
-static inline int ttm_tt_set_page_caching(struct page *p,
- enum ttm_caching_state c_old,
- enum ttm_caching_state c_new)
-{
- int ret = 0;
+MODULE_PARM_DESC(pages_limit, "Limit for the allocated pages");
+module_param_named(pages_limit, ttm_pages_limit, ulong, 0644);
- if (PageHighMem(p))
- return 0;
+static unsigned long ttm_dma32_pages_limit;
- if (c_old != tt_cached) {
- /* p isn't in the default caching state, set it to
- * writeback first to free its current memtype. */
+MODULE_PARM_DESC(dma32_pages_limit, "Limit for the allocated DMA32 pages");
+module_param_named(dma32_pages_limit, ttm_dma32_pages_limit, ulong, 0644);
- ret = set_pages_wb(p, 1);
- if (ret)
- return ret;
- }
-
- if (c_new == tt_wc)
- ret = set_memory_wc((unsigned long) page_address(p), 1);
- else if (c_new == tt_uncached)
- ret = set_pages_uc(p, 1);
-
- return ret;
-}
-#else /* CONFIG_X86 */
-static inline int ttm_tt_set_page_caching(struct page *p,
- enum ttm_caching_state c_old,
- enum ttm_caching_state c_new)
-{
- return 0;
-}
-#endif /* CONFIG_X86 */
+static atomic_long_t ttm_pages_allocated;
+static atomic_long_t ttm_dma32_pages_allocated;
/*
- * Change caching policy for the linear kernel map
- * for range of pages in a ttm.
+ * Allocates a ttm structure for the given BO.
*/
-
-static int ttm_tt_set_caching(struct ttm_tt *ttm,
- enum ttm_caching_state c_state)
+int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
{
- int i, j;
- struct page *cur_page;
- int ret;
+ struct ttm_device *bdev = bo->bdev;
+ struct drm_device *ddev = bo->base.dev;
+ uint32_t page_flags = 0;
- if (ttm->caching_state == c_state)
- return 0;
+ dma_resv_assert_held(bo->base.resv);
- if (ttm->state == tt_unpopulated) {
- /* Change caching but don't populate */
- ttm->caching_state = c_state;
+ if (bo->ttm)
return 0;
- }
-
- if (ttm->caching_state == tt_cached)
- drm_clflush_pages(ttm->pages, ttm->num_pages);
- for (i = 0; i < ttm->num_pages; ++i) {
- cur_page = ttm->pages[i];
- if (likely(cur_page != NULL)) {
- ret = ttm_tt_set_page_caching(cur_page,
- ttm->caching_state,
- c_state);
- if (unlikely(ret != 0))
- goto out_err;
- }
+ switch (bo->type) {
+ case ttm_bo_type_device:
+ if (zero_alloc)
+ page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
+ break;
+ case ttm_bo_type_kernel:
+ break;
+ case ttm_bo_type_sg:
+ page_flags |= TTM_TT_FLAG_EXTERNAL;
+ break;
+ default:
+ pr_err("Illegal buffer object type\n");
+ return -EINVAL;
+ }
+ /*
+ * When using dma_alloc_coherent with memory encryption the
+ * mapped TT pages need to be decrypted or otherwise the drivers
+ * will end up sending encrypted mem to the gpu.
+ */
+ if (ttm_pool_uses_dma_alloc(&bdev->pool) &&
+ cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
+ page_flags |= TTM_TT_FLAG_DECRYPTED;
+ drm_info_once(ddev, "TT memory decryption enabled.");
}
- ttm->caching_state = c_state;
-
- return 0;
+ bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags);
+ if (unlikely(bo->ttm == NULL))
+ return -ENOMEM;
-out_err:
- for (j = 0; j < i; ++j) {
- cur_page = ttm->pages[j];
- if (likely(cur_page != NULL)) {
- (void)ttm_tt_set_page_caching(cur_page, c_state,
- ttm->caching_state);
- }
- }
+ WARN_ON(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE &&
+ !(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL));
- return ret;
+ return 0;
}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_create);
-int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
+/*
+ * Allocates storage for pointers to the pages that back the ttm.
+ */
+static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
{
- enum ttm_caching_state state;
-
- if (placement & TTM_PL_FLAG_WC)
- state = tt_wc;
- else if (placement & TTM_PL_FLAG_UNCACHED)
- state = tt_uncached;
- else
- state = tt_cached;
+ ttm->pages = kvcalloc(ttm->num_pages, sizeof(void*), GFP_KERNEL);
+ if (!ttm->pages)
+ return -ENOMEM;
- return ttm_tt_set_caching(ttm, state);
+ return 0;
}
-EXPORT_SYMBOL(ttm_tt_set_placement_caching);
-void ttm_tt_destroy(struct ttm_tt *ttm)
+static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm)
{
- if (unlikely(ttm == NULL))
- return;
+ ttm->pages = kvcalloc(ttm->num_pages, sizeof(*ttm->pages) +
+ sizeof(*ttm->dma_address), GFP_KERNEL);
+ if (!ttm->pages)
+ return -ENOMEM;
- if (ttm->state == tt_bound) {
- ttm_tt_unbind(ttm);
- }
+ ttm->dma_address = (void *)(ttm->pages + ttm->num_pages);
+ return 0;
+}
- if (likely(ttm->pages != NULL)) {
- ttm->bdev->driver->ttm_tt_unpopulate(ttm);
- }
+static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
+{
+ ttm->dma_address = kvcalloc(ttm->num_pages, sizeof(*ttm->dma_address),
+ GFP_KERNEL);
+ if (!ttm->dma_address)
+ return -ENOMEM;
- if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
- ttm->swap_storage)
- fput(ttm->swap_storage);
+ return 0;
+}
- ttm->swap_storage = NULL;
- ttm->func->destroy(ttm);
+void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
+{
+ bdev->funcs->ttm_tt_destroy(bdev, ttm);
}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_destroy);
-int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags,
- struct page *dummy_read_page)
+static void ttm_tt_init_fields(struct ttm_tt *ttm,
+ struct ttm_buffer_object *bo,
+ uint32_t page_flags,
+ enum ttm_caching caching,
+ unsigned long extra_pages)
{
- ttm->bdev = bdev;
- ttm->glob = bdev->glob;
- ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- ttm->caching_state = tt_cached;
+ ttm->num_pages = (PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT) + extra_pages;
ttm->page_flags = page_flags;
- ttm->dummy_read_page = dummy_read_page;
- ttm->state = tt_unpopulated;
+ ttm->dma_address = NULL;
ttm->swap_storage = NULL;
+ ttm->sg = bo->sg;
+ ttm->caching = caching;
+ ttm->restore = NULL;
+ ttm->backup = NULL;
+}
+
+int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
+ uint32_t page_flags, enum ttm_caching caching,
+ unsigned long extra_pages)
+{
+ ttm_tt_init_fields(ttm, bo, page_flags, caching, extra_pages);
- ttm_tt_alloc_page_directory(ttm);
- if (!ttm->pages) {
- ttm_tt_destroy(ttm);
+ if (ttm_tt_alloc_page_directory(ttm)) {
pr_err("Failed allocating page table\n");
return -ENOMEM;
}
@@ -207,82 +183,46 @@ EXPORT_SYMBOL(ttm_tt_init);
void ttm_tt_fini(struct ttm_tt *ttm)
{
- drm_free_large(ttm->pages);
- ttm->pages = NULL;
-}
-EXPORT_SYMBOL(ttm_tt_fini);
+ WARN_ON(ttm->page_flags & TTM_TT_FLAG_PRIV_POPULATED);
-int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags,
- struct page *dummy_read_page)
-{
- struct ttm_tt *ttm = &ttm_dma->ttm;
-
- ttm->bdev = bdev;
- ttm->glob = bdev->glob;
- ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- ttm->caching_state = tt_cached;
- ttm->page_flags = page_flags;
- ttm->dummy_read_page = dummy_read_page;
- ttm->state = tt_unpopulated;
+ if (ttm->swap_storage)
+ fput(ttm->swap_storage);
ttm->swap_storage = NULL;
- INIT_LIST_HEAD(&ttm_dma->pages_list);
- ttm_dma_tt_alloc_page_directory(ttm_dma);
- if (!ttm->pages || !ttm_dma->dma_address) {
- ttm_tt_destroy(ttm);
- pr_err("Failed allocating page table\n");
- return -ENOMEM;
+ if (ttm_tt_is_backed_up(ttm))
+ ttm_pool_drop_backed_up(ttm);
+ if (ttm->backup) {
+ ttm_backup_fini(ttm->backup);
+ ttm->backup = NULL;
}
- return 0;
-}
-EXPORT_SYMBOL(ttm_dma_tt_init);
-
-void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
-{
- struct ttm_tt *ttm = &ttm_dma->ttm;
- drm_free_large(ttm->pages);
+ if (ttm->pages)
+ kvfree(ttm->pages);
+ else
+ kvfree(ttm->dma_address);
ttm->pages = NULL;
- drm_free_large(ttm_dma->dma_address);
- ttm_dma->dma_address = NULL;
+ ttm->dma_address = NULL;
}
-EXPORT_SYMBOL(ttm_dma_tt_fini);
+EXPORT_SYMBOL(ttm_tt_fini);
-void ttm_tt_unbind(struct ttm_tt *ttm)
+int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
+ uint32_t page_flags, enum ttm_caching caching)
{
int ret;
- if (ttm->state == tt_bound) {
- ret = ttm->func->unbind(ttm);
- BUG_ON(ret);
- ttm->state = tt_unbound;
- }
-}
-
-int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
-{
- int ret = 0;
-
- if (!ttm)
- return -EINVAL;
-
- if (ttm->state == tt_bound)
- return 0;
-
- ret = ttm->bdev->driver->ttm_tt_populate(ttm);
- if (ret)
- return ret;
-
- ret = ttm->func->bind(ttm, bo_mem);
- if (unlikely(ret != 0))
- return ret;
-
- ttm->state = tt_bound;
+ ttm_tt_init_fields(ttm, bo, page_flags, caching, 0);
+ if (page_flags & TTM_TT_FLAG_EXTERNAL)
+ ret = ttm_sg_tt_alloc_page_directory(ttm);
+ else
+ ret = ttm_dma_tt_alloc_page_directory(ttm);
+ if (ret) {
+ pr_err("Failed allocating page table\n");
+ return -ENOMEM;
+ }
return 0;
}
-EXPORT_SYMBOL(ttm_tt_bind);
+EXPORT_SYMBOL(ttm_sg_tt_init);
int ttm_tt_swapin(struct ttm_tt *ttm)
{
@@ -290,88 +230,337 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
struct file *swap_storage;
struct page *from_page;
struct page *to_page;
- int i;
- int ret = -ENOMEM;
+ gfp_t gfp_mask;
+ int i, ret;
swap_storage = ttm->swap_storage;
BUG_ON(swap_storage == NULL);
- swap_space = file_inode(swap_storage)->i_mapping;
+ swap_space = swap_storage->f_mapping;
+ gfp_mask = mapping_gfp_mask(swap_space);
for (i = 0; i < ttm->num_pages; ++i) {
- from_page = shmem_read_mapping_page(swap_space, i);
+ from_page = shmem_read_mapping_page_gfp(swap_space, i,
+ gfp_mask);
if (IS_ERR(from_page)) {
ret = PTR_ERR(from_page);
goto out_err;
}
to_page = ttm->pages[i];
- if (unlikely(to_page == NULL))
+ if (unlikely(to_page == NULL)) {
+ ret = -ENOMEM;
goto out_err;
+ }
copy_highpage(to_page, from_page);
- page_cache_release(from_page);
+ put_page(from_page);
}
- if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
- fput(swap_storage);
+ fput(swap_storage);
ttm->swap_storage = NULL;
- ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
+ ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED;
return 0;
+
out_err:
return ret;
}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_swapin);
-int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
+/**
+ * ttm_tt_backup() - Helper to back up a struct ttm_tt.
+ * @bdev: The TTM device.
+ * @tt: The struct ttm_tt.
+ * @flags: Flags that govern the backup behaviour.
+ *
+ * Update the page accounting and call ttm_pool_shrink_tt to free pages
+ * or back them up.
+ *
+ * Return: Number of pages freed or swapped out, or negative error code on
+ * error.
+ */
+long ttm_tt_backup(struct ttm_device *bdev, struct ttm_tt *tt,
+ const struct ttm_backup_flags flags)
{
+ long ret;
+
+ if (WARN_ON(IS_ERR_OR_NULL(tt->backup)))
+ return 0;
+
+ ret = ttm_pool_backup(&bdev->pool, tt, &flags);
+ if (ret > 0) {
+ tt->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED;
+ tt->page_flags |= TTM_TT_FLAG_BACKED_UP;
+ }
+
+ return ret;
+}
+
+int ttm_tt_restore(struct ttm_device *bdev, struct ttm_tt *tt,
+ const struct ttm_operation_ctx *ctx)
+{
+ int ret = ttm_pool_restore_and_alloc(&bdev->pool, tt, ctx);
+
+ if (ret)
+ return ret;
+
+ tt->page_flags &= ~TTM_TT_FLAG_BACKED_UP;
+
+ return 0;
+}
+EXPORT_SYMBOL(ttm_tt_restore);
+
+/**
+ * ttm_tt_swapout - swap out tt object
+ *
+ * @bdev: TTM device structure.
+ * @ttm: The struct ttm_tt.
+ * @gfp_flags: Flags to use for memory allocation.
+ *
+ * Swapout a TT object to a shmem_file, return number of pages swapped out or
+ * negative error code.
+ */
+int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
+ gfp_t gfp_flags)
+{
+ loff_t size = (loff_t)ttm->num_pages << PAGE_SHIFT;
struct address_space *swap_space;
struct file *swap_storage;
struct page *from_page;
struct page *to_page;
- int i;
- int ret = -ENOMEM;
-
- BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
- BUG_ON(ttm->caching_state != tt_cached);
-
- if (!persistent_swap_storage) {
- swap_storage = shmem_file_setup("ttm swap",
- ttm->num_pages << PAGE_SHIFT,
- 0);
- if (unlikely(IS_ERR(swap_storage))) {
- pr_err("Failed allocating swap storage\n");
- return PTR_ERR(swap_storage);
- }
- } else
- swap_storage = persistent_swap_storage;
+ int i, ret;
- swap_space = file_inode(swap_storage)->i_mapping;
+ swap_storage = shmem_file_setup("ttm swap", size, 0);
+ if (IS_ERR(swap_storage)) {
+ pr_err("Failed allocating swap storage\n");
+ return PTR_ERR(swap_storage);
+ }
+
+ swap_space = swap_storage->f_mapping;
+ gfp_flags &= mapping_gfp_mask(swap_space);
for (i = 0; i < ttm->num_pages; ++i) {
from_page = ttm->pages[i];
if (unlikely(from_page == NULL))
continue;
- to_page = shmem_read_mapping_page(swap_space, i);
- if (unlikely(IS_ERR(to_page))) {
+
+ to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_flags);
+ if (IS_ERR(to_page)) {
ret = PTR_ERR(to_page);
goto out_err;
}
copy_highpage(to_page, from_page);
set_page_dirty(to_page);
mark_page_accessed(to_page);
- page_cache_release(to_page);
+ put_page(to_page);
}
- ttm->bdev->driver->ttm_tt_unpopulate(ttm);
+ ttm_tt_unpopulate(bdev, ttm);
ttm->swap_storage = swap_storage;
- ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
- if (persistent_swap_storage)
- ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
+ ttm->page_flags |= TTM_TT_FLAG_SWAPPED;
+
+ return ttm->num_pages;
- return 0;
out_err:
- if (!persistent_swap_storage)
- fput(swap_storage);
+ fput(swap_storage);
return ret;
}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_swapout);
+
+int ttm_tt_populate(struct ttm_device *bdev,
+ struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
+{
+ int ret;
+
+ if (!ttm)
+ return -EINVAL;
+
+ if (ttm_tt_is_populated(ttm))
+ return 0;
+
+ if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
+ atomic_long_add(ttm->num_pages, &ttm_pages_allocated);
+ if (ttm_pool_uses_dma32(&bdev->pool))
+ atomic_long_add(ttm->num_pages,
+ &ttm_dma32_pages_allocated);
+ }
+
+ while (atomic_long_read(&ttm_pages_allocated) > ttm_pages_limit ||
+ atomic_long_read(&ttm_dma32_pages_allocated) >
+ ttm_dma32_pages_limit) {
+
+ ret = ttm_global_swapout(ctx, GFP_KERNEL);
+ if (ret == 0)
+ break;
+ if (ret < 0)
+ goto error;
+ }
+
+ if (bdev->funcs->ttm_tt_populate)
+ ret = bdev->funcs->ttm_tt_populate(bdev, ttm, ctx);
+ else
+ ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
+ if (ret)
+ goto error;
+
+ ttm->page_flags |= TTM_TT_FLAG_PRIV_POPULATED;
+ ttm->page_flags &= ~TTM_TT_FLAG_BACKED_UP;
+ if (unlikely(ttm->page_flags & TTM_TT_FLAG_SWAPPED)) {
+ ret = ttm_tt_swapin(ttm);
+ if (unlikely(ret != 0)) {
+ ttm_tt_unpopulate(bdev, ttm);
+ return ret;
+ }
+ }
+
+ return 0;
+
+error:
+ if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
+ atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
+ if (ttm_pool_uses_dma32(&bdev->pool))
+ atomic_long_sub(ttm->num_pages,
+ &ttm_dma32_pages_allocated);
+ }
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_DRM_TTM_KUNIT_TEST)
+EXPORT_SYMBOL(ttm_tt_populate);
+#endif
+
+void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
+{
+ if (!ttm_tt_is_populated(ttm))
+ return;
+
+ if (bdev->funcs->ttm_tt_unpopulate)
+ bdev->funcs->ttm_tt_unpopulate(bdev, ttm);
+ else
+ ttm_pool_free(&bdev->pool, ttm);
+
+ if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
+ atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
+ if (ttm_pool_uses_dma32(&bdev->pool))
+ atomic_long_sub(ttm->num_pages,
+ &ttm_dma32_pages_allocated);
+ }
+
+ ttm->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED;
+}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_unpopulate);
+
+#ifdef CONFIG_DEBUG_FS
+
+/* Test the shrinker functions and dump the result */
+static int ttm_tt_debugfs_shrink_show(struct seq_file *m, void *data)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+
+ seq_printf(m, "%d\n", ttm_global_swapout(&ctx, GFP_KERNEL));
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(ttm_tt_debugfs_shrink);
+
+#endif
+
+
+/*
+ * ttm_tt_mgr_init - register with the MM shrinker
+ *
+ * Register with the MM shrinker for swapping out BOs.
+ */
+void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages)
+{
+#ifdef CONFIG_DEBUG_FS
+ debugfs_create_file("tt_shrink", 0400, ttm_debugfs_root, NULL,
+ &ttm_tt_debugfs_shrink_fops);
+#endif
+
+ if (!ttm_pages_limit)
+ ttm_pages_limit = num_pages;
+
+ if (!ttm_dma32_pages_limit)
+ ttm_dma32_pages_limit = num_dma32_pages;
+}
+
+static void ttm_kmap_iter_tt_map_local(struct ttm_kmap_iter *iter,
+ struct iosys_map *dmap,
+ pgoff_t i)
+{
+ struct ttm_kmap_iter_tt *iter_tt =
+ container_of(iter, typeof(*iter_tt), base);
+
+ iosys_map_set_vaddr(dmap, kmap_local_page_prot(iter_tt->tt->pages[i],
+ iter_tt->prot));
+}
+
+static void ttm_kmap_iter_tt_unmap_local(struct ttm_kmap_iter *iter,
+ struct iosys_map *map)
+{
+ kunmap_local(map->vaddr);
+}
+
+static const struct ttm_kmap_iter_ops ttm_kmap_iter_tt_ops = {
+ .map_local = ttm_kmap_iter_tt_map_local,
+ .unmap_local = ttm_kmap_iter_tt_unmap_local,
+ .maps_tt = true,
+};
+
+/**
+ * ttm_kmap_iter_tt_init - Initialize a struct ttm_kmap_iter_tt
+ * @iter_tt: The struct ttm_kmap_iter_tt to initialize.
+ * @tt: Struct ttm_tt holding page pointers of the struct ttm_resource.
+ *
+ * Return: Pointer to the embedded struct ttm_kmap_iter.
+ */
+struct ttm_kmap_iter *
+ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
+ struct ttm_tt *tt)
+{
+ iter_tt->base.ops = &ttm_kmap_iter_tt_ops;
+ iter_tt->tt = tt;
+ if (tt)
+ iter_tt->prot = ttm_prot_from_caching(tt->caching, PAGE_KERNEL);
+ else
+ iter_tt->prot = PAGE_KERNEL;
+
+ return &iter_tt->base;
+}
+EXPORT_SYMBOL(ttm_kmap_iter_tt_init);
+
+unsigned long ttm_tt_pages_limit(void)
+{
+ return ttm_pages_limit;
+}
+EXPORT_SYMBOL(ttm_tt_pages_limit);
+
+/**
+ * ttm_tt_setup_backup() - Allocate and assign a backup structure for a ttm_tt
+ * @tt: The ttm_tt for wich to allocate and assign a backup structure.
+ *
+ * Assign a backup structure to be used for tt backup. This should
+ * typically be done at bo creation, to avoid allocations at shrinking
+ * time.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ttm_tt_setup_backup(struct ttm_tt *tt)
+{
+ struct file *backup =
+ ttm_backup_shmem_create(((loff_t)tt->num_pages) << PAGE_SHIFT);
+
+ if (WARN_ON_ONCE(!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)))
+ return -EINVAL;
+
+ if (IS_ERR(backup))
+ return PTR_ERR(backup);
+
+ if (tt->backup)
+ ttm_backup_fini(tt->backup);
+
+ tt->backup = backup;
+ return 0;
+}
+EXPORT_SYMBOL(ttm_tt_setup_backup);