summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/ttm/ttm_tt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_tt.c')
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c652
1 files changed, 359 insertions, 293 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index e3a0691582ff..611d20ab966d 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -31,217 +31,149 @@
#define pr_fmt(fmt) "[TTM] " fmt
+#include <linux/cc_platform.h>
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/file.h>
+#include <linux/module.h>
#include <linux/sched.h>
-#include <linux/pagemap.h>
#include <linux/shmem_fs.h>
-#include <linux/file.h>
#include <drm/drm_cache.h>
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_page_alloc.h>
-#include <drm/ttm/ttm_set_memory.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include <drm/drm_util.h>
+#include <drm/ttm/ttm_backup.h>
+#include <drm/ttm/ttm_bo.h>
+#include <drm/ttm/ttm_tt.h>
-/**
+#include "ttm_module.h"
+#include "ttm_pool_internal.h"
+
+static unsigned long ttm_pages_limit;
+
+MODULE_PARM_DESC(pages_limit, "Limit for the allocated pages");
+module_param_named(pages_limit, ttm_pages_limit, ulong, 0644);
+
+static unsigned long ttm_dma32_pages_limit;
+
+MODULE_PARM_DESC(dma32_pages_limit, "Limit for the allocated DMA32 pages");
+module_param_named(dma32_pages_limit, ttm_dma32_pages_limit, ulong, 0644);
+
+static atomic_long_t ttm_pages_allocated;
+static atomic_long_t ttm_dma32_pages_allocated;
+
+/*
* Allocates a ttm structure for the given BO.
*/
int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
{
- struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_device *bdev = bo->bdev;
+ struct drm_device *ddev = bo->base.dev;
uint32_t page_flags = 0;
- reservation_object_assert_held(bo->resv);
-
- if (bdev->need_dma32)
- page_flags |= TTM_PAGE_FLAG_DMA32;
+ dma_resv_assert_held(bo->base.resv);
- if (bdev->no_retry)
- page_flags |= TTM_PAGE_FLAG_NO_RETRY;
+ if (bo->ttm)
+ return 0;
switch (bo->type) {
case ttm_bo_type_device:
if (zero_alloc)
- page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
+ page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
break;
case ttm_bo_type_kernel:
break;
case ttm_bo_type_sg:
- page_flags |= TTM_PAGE_FLAG_SG;
+ page_flags |= TTM_TT_FLAG_EXTERNAL;
break;
default:
- bo->ttm = NULL;
pr_err("Illegal buffer object type\n");
return -EINVAL;
}
+ /*
+ * When using dma_alloc_coherent with memory encryption the
+ * mapped TT pages need to be decrypted or otherwise the drivers
+ * will end up sending encrypted mem to the gpu.
+ */
+ if (ttm_pool_uses_dma_alloc(&bdev->pool) &&
+ cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
+ page_flags |= TTM_TT_FLAG_DECRYPTED;
+ drm_info_once(ddev, "TT memory decryption enabled.");
+ }
- bo->ttm = bdev->driver->ttm_tt_create(bo, page_flags);
+ bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags);
if (unlikely(bo->ttm == NULL))
return -ENOMEM;
+ WARN_ON(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE &&
+ !(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL));
+
return 0;
}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_create);
-/**
+/*
* Allocates storage for pointers to the pages that back the ttm.
*/
static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
{
- ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*),
- GFP_KERNEL | __GFP_ZERO);
+ ttm->pages = kvcalloc(ttm->num_pages, sizeof(void*), GFP_KERNEL);
if (!ttm->pages)
return -ENOMEM;
+
return 0;
}
-static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
+static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm)
{
- ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages,
- sizeof(*ttm->ttm.pages) +
- sizeof(*ttm->dma_address),
- GFP_KERNEL | __GFP_ZERO);
- if (!ttm->ttm.pages)
+ ttm->pages = kvcalloc(ttm->num_pages, sizeof(*ttm->pages) +
+ sizeof(*ttm->dma_address), GFP_KERNEL);
+ if (!ttm->pages)
return -ENOMEM;
- ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
+
+ ttm->dma_address = (void *)(ttm->pages + ttm->num_pages);
return 0;
}
-static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
+static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
{
- ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages,
- sizeof(*ttm->dma_address),
- GFP_KERNEL | __GFP_ZERO);
+ ttm->dma_address = kvcalloc(ttm->num_pages, sizeof(*ttm->dma_address),
+ GFP_KERNEL);
if (!ttm->dma_address)
return -ENOMEM;
- return 0;
-}
-
-static int ttm_tt_set_page_caching(struct page *p,
- enum ttm_caching_state c_old,
- enum ttm_caching_state c_new)
-{
- int ret = 0;
-
- if (PageHighMem(p))
- return 0;
-
- if (c_old != tt_cached) {
- /* p isn't in the default caching state, set it to
- * writeback first to free its current memtype. */
-
- ret = ttm_set_pages_wb(p, 1);
- if (ret)
- return ret;
- }
-
- if (c_new == tt_wc)
- ret = ttm_set_pages_wc(p, 1);
- else if (c_new == tt_uncached)
- ret = ttm_set_pages_uc(p, 1);
-
- return ret;
-}
-
-/*
- * Change caching policy for the linear kernel map
- * for range of pages in a ttm.
- */
-
-static int ttm_tt_set_caching(struct ttm_tt *ttm,
- enum ttm_caching_state c_state)
-{
- int i, j;
- struct page *cur_page;
- int ret;
-
- if (ttm->caching_state == c_state)
- return 0;
-
- if (ttm->state == tt_unpopulated) {
- /* Change caching but don't populate */
- ttm->caching_state = c_state;
- return 0;
- }
-
- if (ttm->caching_state == tt_cached)
- drm_clflush_pages(ttm->pages, ttm->num_pages);
-
- for (i = 0; i < ttm->num_pages; ++i) {
- cur_page = ttm->pages[i];
- if (likely(cur_page != NULL)) {
- ret = ttm_tt_set_page_caching(cur_page,
- ttm->caching_state,
- c_state);
- if (unlikely(ret != 0))
- goto out_err;
- }
- }
-
- ttm->caching_state = c_state;
return 0;
-
-out_err:
- for (j = 0; j < i; ++j) {
- cur_page = ttm->pages[j];
- if (likely(cur_page != NULL)) {
- (void)ttm_tt_set_page_caching(cur_page, c_state,
- ttm->caching_state);
- }
- }
-
- return ret;
}
-int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
+void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
- enum ttm_caching_state state;
-
- if (placement & TTM_PL_FLAG_WC)
- state = tt_wc;
- else if (placement & TTM_PL_FLAG_UNCACHED)
- state = tt_uncached;
- else
- state = tt_cached;
-
- return ttm_tt_set_caching(ttm, state);
-}
-EXPORT_SYMBOL(ttm_tt_set_placement_caching);
-
-void ttm_tt_destroy(struct ttm_tt *ttm)
-{
- if (ttm == NULL)
- return;
-
- ttm_tt_unbind(ttm);
-
- if (ttm->state == tt_unbound)
- ttm_tt_unpopulate(ttm);
-
- if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
- ttm->swap_storage)
- fput(ttm->swap_storage);
-
- ttm->swap_storage = NULL;
- ttm->func->destroy(ttm);
+ bdev->funcs->ttm_tt_destroy(bdev, ttm);
}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_destroy);
-void ttm_tt_init_fields(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
- uint32_t page_flags)
+static void ttm_tt_init_fields(struct ttm_tt *ttm,
+ struct ttm_buffer_object *bo,
+ uint32_t page_flags,
+ enum ttm_caching caching,
+ unsigned long extra_pages)
{
- ttm->bdev = bo->bdev;
- ttm->num_pages = bo->num_pages;
- ttm->caching_state = tt_cached;
+ ttm->num_pages = (PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT) + extra_pages;
ttm->page_flags = page_flags;
- ttm->state = tt_unpopulated;
+ ttm->dma_address = NULL;
ttm->swap_storage = NULL;
ttm->sg = bo->sg;
+ ttm->caching = caching;
+ ttm->restore = NULL;
+ ttm->backup = NULL;
}
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
- uint32_t page_flags)
+ uint32_t page_flags, enum ttm_caching caching,
+ unsigned long extra_pages)
{
- ttm_tt_init_fields(ttm, bo, page_flags);
+ ttm_tt_init_fields(ttm, bo, page_flags, caching, extra_pages);
if (ttm_tt_alloc_page_directory(ttm)) {
- ttm_tt_destroy(ttm);
pr_err("Failed allocating page table\n");
return -ENOMEM;
}
@@ -251,43 +183,40 @@ EXPORT_SYMBOL(ttm_tt_init);
void ttm_tt_fini(struct ttm_tt *ttm)
{
- kvfree(ttm->pages);
- ttm->pages = NULL;
-}
-EXPORT_SYMBOL(ttm_tt_fini);
-
-int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
- uint32_t page_flags)
-{
- struct ttm_tt *ttm = &ttm_dma->ttm;
+ WARN_ON(ttm->page_flags & TTM_TT_FLAG_PRIV_POPULATED);
- ttm_tt_init_fields(ttm, bo, page_flags);
+ if (ttm->swap_storage)
+ fput(ttm->swap_storage);
+ ttm->swap_storage = NULL;
- INIT_LIST_HEAD(&ttm_dma->pages_list);
- if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
- ttm_tt_destroy(ttm);
- pr_err("Failed allocating page table\n");
- return -ENOMEM;
+ if (ttm_tt_is_backed_up(ttm))
+ ttm_pool_drop_backed_up(ttm);
+ if (ttm->backup) {
+ ttm_backup_fini(ttm->backup);
+ ttm->backup = NULL;
}
- return 0;
+
+ if (ttm->pages)
+ kvfree(ttm->pages);
+ else
+ kvfree(ttm->dma_address);
+ ttm->pages = NULL;
+ ttm->dma_address = NULL;
}
-EXPORT_SYMBOL(ttm_dma_tt_init);
+EXPORT_SYMBOL(ttm_tt_fini);
-int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
- uint32_t page_flags)
+int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
+ uint32_t page_flags, enum ttm_caching caching)
{
- struct ttm_tt *ttm = &ttm_dma->ttm;
int ret;
- ttm_tt_init_fields(ttm, bo, page_flags);
+ ttm_tt_init_fields(ttm, bo, page_flags, caching, 0);
- INIT_LIST_HEAD(&ttm_dma->pages_list);
- if (page_flags & TTM_PAGE_FLAG_SG)
- ret = ttm_sg_tt_alloc_page_directory(ttm_dma);
+ if (page_flags & TTM_TT_FLAG_EXTERNAL)
+ ret = ttm_sg_tt_alloc_page_directory(ttm);
else
- ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
+ ret = ttm_dma_tt_alloc_page_directory(ttm);
if (ret) {
- ttm_tt_destroy(ttm);
pr_err("Failed allocating page table\n");
return -ENOMEM;
}
@@ -295,133 +224,127 @@ int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_sg_tt_init);
-void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
-{
- struct ttm_tt *ttm = &ttm_dma->ttm;
-
- if (ttm->pages)
- kvfree(ttm->pages);
- else
- kvfree(ttm_dma->dma_address);
- ttm->pages = NULL;
- ttm_dma->dma_address = NULL;
-}
-EXPORT_SYMBOL(ttm_dma_tt_fini);
-
-void ttm_tt_unbind(struct ttm_tt *ttm)
-{
- int ret;
-
- if (ttm->state == tt_bound) {
- ret = ttm->func->unbind(ttm);
- BUG_ON(ret);
- ttm->state = tt_unbound;
- }
-}
-
-int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem,
- struct ttm_operation_ctx *ctx)
-{
- int ret = 0;
-
- if (!ttm)
- return -EINVAL;
-
- if (ttm->state == tt_bound)
- return 0;
-
- ret = ttm_tt_populate(ttm, ctx);
- if (ret)
- return ret;
-
- ret = ttm->func->bind(ttm, bo_mem);
- if (unlikely(ret != 0))
- return ret;
-
- ttm->state = tt_bound;
-
- return 0;
-}
-EXPORT_SYMBOL(ttm_tt_bind);
-
int ttm_tt_swapin(struct ttm_tt *ttm)
{
struct address_space *swap_space;
struct file *swap_storage;
struct page *from_page;
struct page *to_page;
- int i;
- int ret = -ENOMEM;
+ gfp_t gfp_mask;
+ int i, ret;
swap_storage = ttm->swap_storage;
BUG_ON(swap_storage == NULL);
swap_space = swap_storage->f_mapping;
+ gfp_mask = mapping_gfp_mask(swap_space);
for (i = 0; i < ttm->num_pages; ++i) {
- gfp_t gfp_mask = mapping_gfp_mask(swap_space);
-
- gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
- from_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
-
+ from_page = shmem_read_mapping_page_gfp(swap_space, i,
+ gfp_mask);
if (IS_ERR(from_page)) {
ret = PTR_ERR(from_page);
goto out_err;
}
to_page = ttm->pages[i];
- if (unlikely(to_page == NULL))
+ if (unlikely(to_page == NULL)) {
+ ret = -ENOMEM;
goto out_err;
+ }
copy_highpage(to_page, from_page);
put_page(from_page);
}
- if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
- fput(swap_storage);
+ fput(swap_storage);
ttm->swap_storage = NULL;
- ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
+ ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED;
return 0;
+
out_err:
return ret;
}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_swapin);
+
+/**
+ * ttm_tt_backup() - Helper to back up a struct ttm_tt.
+ * @bdev: The TTM device.
+ * @tt: The struct ttm_tt.
+ * @flags: Flags that govern the backup behaviour.
+ *
+ * Update the page accounting and call ttm_pool_shrink_tt to free pages
+ * or back them up.
+ *
+ * Return: Number of pages freed or swapped out, or negative error code on
+ * error.
+ */
+long ttm_tt_backup(struct ttm_device *bdev, struct ttm_tt *tt,
+ const struct ttm_backup_flags flags)
+{
+ long ret;
+
+ if (WARN_ON(IS_ERR_OR_NULL(tt->backup)))
+ return 0;
+
+ ret = ttm_pool_backup(&bdev->pool, tt, &flags);
+ if (ret > 0) {
+ tt->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED;
+ tt->page_flags |= TTM_TT_FLAG_BACKED_UP;
+ }
+
+ return ret;
+}
-int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
+int ttm_tt_restore(struct ttm_device *bdev, struct ttm_tt *tt,
+ const struct ttm_operation_ctx *ctx)
{
+ int ret = ttm_pool_restore_and_alloc(&bdev->pool, tt, ctx);
+
+ if (ret)
+ return ret;
+
+ tt->page_flags &= ~TTM_TT_FLAG_BACKED_UP;
+
+ return 0;
+}
+EXPORT_SYMBOL(ttm_tt_restore);
+
+/**
+ * ttm_tt_swapout - swap out tt object
+ *
+ * @bdev: TTM device structure.
+ * @ttm: The struct ttm_tt.
+ * @gfp_flags: Flags to use for memory allocation.
+ *
+ * Swapout a TT object to a shmem_file, return number of pages swapped out or
+ * negative error code.
+ */
+int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
+ gfp_t gfp_flags)
+{
+ loff_t size = (loff_t)ttm->num_pages << PAGE_SHIFT;
struct address_space *swap_space;
struct file *swap_storage;
struct page *from_page;
struct page *to_page;
- int i;
- int ret = -ENOMEM;
-
- BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
- BUG_ON(ttm->caching_state != tt_cached);
-
- if (!persistent_swap_storage) {
- swap_storage = shmem_file_setup("ttm swap",
- ttm->num_pages << PAGE_SHIFT,
- 0);
- if (IS_ERR(swap_storage)) {
- pr_err("Failed allocating swap storage\n");
- return PTR_ERR(swap_storage);
- }
- } else {
- swap_storage = persistent_swap_storage;
+ int i, ret;
+
+ swap_storage = shmem_file_setup("ttm swap", size, 0);
+ if (IS_ERR(swap_storage)) {
+ pr_err("Failed allocating swap storage\n");
+ return PTR_ERR(swap_storage);
}
swap_space = swap_storage->f_mapping;
+ gfp_flags &= mapping_gfp_mask(swap_space);
for (i = 0; i < ttm->num_pages; ++i) {
- gfp_t gfp_mask = mapping_gfp_mask(swap_space);
-
- gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
-
from_page = ttm->pages[i];
if (unlikely(from_page == NULL))
continue;
- to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
+ to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_flags);
if (IS_ERR(to_page)) {
ret = PTR_ERR(to_page);
goto out_err;
@@ -432,69 +355,212 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
put_page(to_page);
}
- ttm_tt_unpopulate(ttm);
+ ttm_tt_unpopulate(bdev, ttm);
ttm->swap_storage = swap_storage;
- ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
- if (persistent_swap_storage)
- ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
+ ttm->page_flags |= TTM_TT_FLAG_SWAPPED;
+
+ return ttm->num_pages;
- return 0;
out_err:
- if (!persistent_swap_storage)
- fput(swap_storage);
+ fput(swap_storage);
return ret;
}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_swapout);
-static void ttm_tt_add_mapping(struct ttm_tt *ttm)
+int ttm_tt_populate(struct ttm_device *bdev,
+ struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
- pgoff_t i;
+ int ret;
- if (ttm->page_flags & TTM_PAGE_FLAG_SG)
- return;
+ if (!ttm)
+ return -EINVAL;
- for (i = 0; i < ttm->num_pages; ++i)
- ttm->pages[i]->mapping = ttm->bdev->dev_mapping;
-}
+ if (ttm_tt_is_populated(ttm))
+ return 0;
-int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
-{
- int ret;
+ if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
+ atomic_long_add(ttm->num_pages, &ttm_pages_allocated);
+ if (ttm_pool_uses_dma32(&bdev->pool))
+ atomic_long_add(ttm->num_pages,
+ &ttm_dma32_pages_allocated);
+ }
- if (ttm->state != tt_unpopulated)
- return 0;
+ while (atomic_long_read(&ttm_pages_allocated) > ttm_pages_limit ||
+ atomic_long_read(&ttm_dma32_pages_allocated) >
+ ttm_dma32_pages_limit) {
+
+ ret = ttm_global_swapout(ctx, GFP_KERNEL);
+ if (ret == 0)
+ break;
+ if (ret < 0)
+ goto error;
+ }
- if (ttm->bdev->driver->ttm_tt_populate)
- ret = ttm->bdev->driver->ttm_tt_populate(ttm, ctx);
+ if (bdev->funcs->ttm_tt_populate)
+ ret = bdev->funcs->ttm_tt_populate(bdev, ttm, ctx);
else
- ret = ttm_pool_populate(ttm, ctx);
- if (!ret)
- ttm_tt_add_mapping(ttm);
+ ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
+ if (ret)
+ goto error;
+
+ ttm->page_flags |= TTM_TT_FLAG_PRIV_POPULATED;
+ ttm->page_flags &= ~TTM_TT_FLAG_BACKED_UP;
+ if (unlikely(ttm->page_flags & TTM_TT_FLAG_SWAPPED)) {
+ ret = ttm_tt_swapin(ttm);
+ if (unlikely(ret != 0)) {
+ ttm_tt_unpopulate(bdev, ttm);
+ return ret;
+ }
+ }
+
+ return 0;
+
+error:
+ if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
+ atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
+ if (ttm_pool_uses_dma32(&bdev->pool))
+ atomic_long_sub(ttm->num_pages,
+ &ttm_dma32_pages_allocated);
+ }
return ret;
}
-static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
-{
- pgoff_t i;
- struct page **page = ttm->pages;
+#if IS_ENABLED(CONFIG_DRM_TTM_KUNIT_TEST)
+EXPORT_SYMBOL(ttm_tt_populate);
+#endif
- if (ttm->page_flags & TTM_PAGE_FLAG_SG)
+void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
+{
+ if (!ttm_tt_is_populated(ttm))
return;
- for (i = 0; i < ttm->num_pages; ++i) {
- (*page)->mapping = NULL;
- (*page++)->index = 0;
+ if (bdev->funcs->ttm_tt_unpopulate)
+ bdev->funcs->ttm_tt_unpopulate(bdev, ttm);
+ else
+ ttm_pool_free(&bdev->pool, ttm);
+
+ if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
+ atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
+ if (ttm_pool_uses_dma32(&bdev->pool))
+ atomic_long_sub(ttm->num_pages,
+ &ttm_dma32_pages_allocated);
}
+
+ ttm->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED;
}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_unpopulate);
-void ttm_tt_unpopulate(struct ttm_tt *ttm)
+#ifdef CONFIG_DEBUG_FS
+
+/* Test the shrinker functions and dump the result */
+static int ttm_tt_debugfs_shrink_show(struct seq_file *m, void *data)
{
- if (ttm->state == tt_unpopulated)
- return;
+ struct ttm_operation_ctx ctx = { false, false };
+
+ seq_printf(m, "%d\n", ttm_global_swapout(&ctx, GFP_KERNEL));
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(ttm_tt_debugfs_shrink);
+
+#endif
+
+
+/*
+ * ttm_tt_mgr_init - register with the MM shrinker
+ *
+ * Register with the MM shrinker for swapping out BOs.
+ */
+void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages)
+{
+#ifdef CONFIG_DEBUG_FS
+ debugfs_create_file("tt_shrink", 0400, ttm_debugfs_root, NULL,
+ &ttm_tt_debugfs_shrink_fops);
+#endif
+
+ if (!ttm_pages_limit)
+ ttm_pages_limit = num_pages;
+
+ if (!ttm_dma32_pages_limit)
+ ttm_dma32_pages_limit = num_dma32_pages;
+}
+
+static void ttm_kmap_iter_tt_map_local(struct ttm_kmap_iter *iter,
+ struct iosys_map *dmap,
+ pgoff_t i)
+{
+ struct ttm_kmap_iter_tt *iter_tt =
+ container_of(iter, typeof(*iter_tt), base);
- ttm_tt_clear_mapping(ttm);
- if (ttm->bdev->driver->ttm_tt_unpopulate)
- ttm->bdev->driver->ttm_tt_unpopulate(ttm);
+ iosys_map_set_vaddr(dmap, kmap_local_page_prot(iter_tt->tt->pages[i],
+ iter_tt->prot));
+}
+
+static void ttm_kmap_iter_tt_unmap_local(struct ttm_kmap_iter *iter,
+ struct iosys_map *map)
+{
+ kunmap_local(map->vaddr);
+}
+
+static const struct ttm_kmap_iter_ops ttm_kmap_iter_tt_ops = {
+ .map_local = ttm_kmap_iter_tt_map_local,
+ .unmap_local = ttm_kmap_iter_tt_unmap_local,
+ .maps_tt = true,
+};
+
+/**
+ * ttm_kmap_iter_tt_init - Initialize a struct ttm_kmap_iter_tt
+ * @iter_tt: The struct ttm_kmap_iter_tt to initialize.
+ * @tt: Struct ttm_tt holding page pointers of the struct ttm_resource.
+ *
+ * Return: Pointer to the embedded struct ttm_kmap_iter.
+ */
+struct ttm_kmap_iter *
+ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
+ struct ttm_tt *tt)
+{
+ iter_tt->base.ops = &ttm_kmap_iter_tt_ops;
+ iter_tt->tt = tt;
+ if (tt)
+ iter_tt->prot = ttm_prot_from_caching(tt->caching, PAGE_KERNEL);
else
- ttm_pool_unpopulate(ttm);
+ iter_tt->prot = PAGE_KERNEL;
+
+ return &iter_tt->base;
+}
+EXPORT_SYMBOL(ttm_kmap_iter_tt_init);
+
+unsigned long ttm_tt_pages_limit(void)
+{
+ return ttm_pages_limit;
+}
+EXPORT_SYMBOL(ttm_tt_pages_limit);
+
+/**
+ * ttm_tt_setup_backup() - Allocate and assign a backup structure for a ttm_tt
+ * @tt: The ttm_tt for wich to allocate and assign a backup structure.
+ *
+ * Assign a backup structure to be used for tt backup. This should
+ * typically be done at bo creation, to avoid allocations at shrinking
+ * time.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ttm_tt_setup_backup(struct ttm_tt *tt)
+{
+ struct file *backup =
+ ttm_backup_shmem_create(((loff_t)tt->num_pages) << PAGE_SHIFT);
+
+ if (WARN_ON_ONCE(!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)))
+ return -EINVAL;
+
+ if (IS_ERR(backup))
+ return PTR_ERR(backup);
+
+ if (tt->backup)
+ ttm_backup_fini(tt->backup);
+
+ tt->backup = backup;
+ return 0;
}
+EXPORT_SYMBOL(ttm_tt_setup_backup);