summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/ttm/ttm_tt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_tt.c')
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c234
1 files changed, 160 insertions, 74 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 24031a8acd2d..611d20ab966d 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -31,14 +31,23 @@
#define pr_fmt(fmt) "[TTM] " fmt
+#include <linux/cc_platform.h>
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/file.h>
+#include <linux/module.h>
#include <linux/sched.h>
-#include <linux/pagemap.h>
#include <linux/shmem_fs.h>
-#include <linux/file.h>
#include <drm/drm_cache.h>
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include <drm/drm_util.h>
+#include <drm/ttm/ttm_backup.h>
+#include <drm/ttm/ttm_bo.h>
+#include <drm/ttm/ttm_tt.h>
#include "ttm_module.h"
+#include "ttm_pool_internal.h"
static unsigned long ttm_pages_limit;
@@ -59,6 +68,7 @@ static atomic_long_t ttm_dma32_pages_allocated;
int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
{
struct ttm_device *bdev = bo->bdev;
+ struct drm_device *ddev = bo->base.dev;
uint32_t page_flags = 0;
dma_resv_assert_held(bo->base.resv);
@@ -69,43 +79,55 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
switch (bo->type) {
case ttm_bo_type_device:
if (zero_alloc)
- page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
+ page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
break;
case ttm_bo_type_kernel:
break;
case ttm_bo_type_sg:
- page_flags |= TTM_PAGE_FLAG_SG;
+ page_flags |= TTM_TT_FLAG_EXTERNAL;
break;
default:
pr_err("Illegal buffer object type\n");
return -EINVAL;
}
+ /*
+ * When using dma_alloc_coherent with memory encryption the
+ * mapped TT pages need to be decrypted or otherwise the drivers
+ * will end up sending encrypted mem to the gpu.
+ */
+ if (ttm_pool_uses_dma_alloc(&bdev->pool) &&
+ cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
+ page_flags |= TTM_TT_FLAG_DECRYPTED;
+ drm_info_once(ddev, "TT memory decryption enabled.");
+ }
bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags);
if (unlikely(bo->ttm == NULL))
return -ENOMEM;
+ WARN_ON(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE &&
+ !(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL));
+
return 0;
}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_create);
/*
* Allocates storage for pointers to the pages that back the ttm.
*/
static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
{
- ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*),
- GFP_KERNEL | __GFP_ZERO);
+ ttm->pages = kvcalloc(ttm->num_pages, sizeof(void*), GFP_KERNEL);
if (!ttm->pages)
return -ENOMEM;
+
return 0;
}
static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm)
{
- ttm->pages = kvmalloc_array(ttm->num_pages,
- sizeof(*ttm->pages) +
- sizeof(*ttm->dma_address),
- GFP_KERNEL | __GFP_ZERO);
+ ttm->pages = kvcalloc(ttm->num_pages, sizeof(*ttm->pages) +
+ sizeof(*ttm->dma_address), GFP_KERNEL);
if (!ttm->pages)
return -ENOMEM;
@@ -115,48 +137,41 @@ static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm)
static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
{
- ttm->dma_address = kvmalloc_array(ttm->num_pages,
- sizeof(*ttm->dma_address),
- GFP_KERNEL | __GFP_ZERO);
+ ttm->dma_address = kvcalloc(ttm->num_pages, sizeof(*ttm->dma_address),
+ GFP_KERNEL);
if (!ttm->dma_address)
return -ENOMEM;
- return 0;
-}
-
-void ttm_tt_destroy_common(struct ttm_device *bdev, struct ttm_tt *ttm)
-{
- ttm_tt_unpopulate(bdev, ttm);
- if (ttm->swap_storage)
- fput(ttm->swap_storage);
-
- ttm->swap_storage = NULL;
+ return 0;
}
-EXPORT_SYMBOL(ttm_tt_destroy_common);
void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
bdev->funcs->ttm_tt_destroy(bdev, ttm);
}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_destroy);
static void ttm_tt_init_fields(struct ttm_tt *ttm,
struct ttm_buffer_object *bo,
uint32_t page_flags,
- enum ttm_caching caching)
+ enum ttm_caching caching,
+ unsigned long extra_pages)
{
- ttm->num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT;
- ttm->caching = ttm_cached;
+ ttm->num_pages = (PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT) + extra_pages;
ttm->page_flags = page_flags;
ttm->dma_address = NULL;
ttm->swap_storage = NULL;
ttm->sg = bo->sg;
ttm->caching = caching;
+ ttm->restore = NULL;
+ ttm->backup = NULL;
}
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
- uint32_t page_flags, enum ttm_caching caching)
+ uint32_t page_flags, enum ttm_caching caching,
+ unsigned long extra_pages)
{
- ttm_tt_init_fields(ttm, bo, page_flags, caching);
+ ttm_tt_init_fields(ttm, bo, page_flags, caching, extra_pages);
if (ttm_tt_alloc_page_directory(ttm)) {
pr_err("Failed allocating page table\n");
@@ -168,6 +183,19 @@ EXPORT_SYMBOL(ttm_tt_init);
void ttm_tt_fini(struct ttm_tt *ttm)
{
+ WARN_ON(ttm->page_flags & TTM_TT_FLAG_PRIV_POPULATED);
+
+ if (ttm->swap_storage)
+ fput(ttm->swap_storage);
+ ttm->swap_storage = NULL;
+
+ if (ttm_tt_is_backed_up(ttm))
+ ttm_pool_drop_backed_up(ttm);
+ if (ttm->backup) {
+ ttm_backup_fini(ttm->backup);
+ ttm->backup = NULL;
+ }
+
if (ttm->pages)
kvfree(ttm->pages);
else
@@ -182,9 +210,9 @@ int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
{
int ret;
- ttm_tt_init_fields(ttm, bo, page_flags, caching);
+ ttm_tt_init_fields(ttm, bo, page_flags, caching, 0);
- if (page_flags & TTM_PAGE_FLAG_SG)
+ if (page_flags & TTM_TT_FLAG_EXTERNAL)
ret = ttm_sg_tt_alloc_page_directory(ttm);
else
ret = ttm_dma_tt_alloc_page_directory(ttm);
@@ -230,13 +258,57 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
fput(swap_storage);
ttm->swap_storage = NULL;
- ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
+ ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED;
return 0;
out_err:
return ret;
}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_swapin);
+
+/**
+ * ttm_tt_backup() - Helper to back up a struct ttm_tt.
+ * @bdev: The TTM device.
+ * @tt: The struct ttm_tt.
+ * @flags: Flags that govern the backup behaviour.
+ *
+ * Update the page accounting and call ttm_pool_shrink_tt to free pages
+ * or back them up.
+ *
+ * Return: Number of pages freed or swapped out, or negative error code on
+ * error.
+ */
+long ttm_tt_backup(struct ttm_device *bdev, struct ttm_tt *tt,
+ const struct ttm_backup_flags flags)
+{
+ long ret;
+
+ if (WARN_ON(IS_ERR_OR_NULL(tt->backup)))
+ return 0;
+
+ ret = ttm_pool_backup(&bdev->pool, tt, &flags);
+ if (ret > 0) {
+ tt->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED;
+ tt->page_flags |= TTM_TT_FLAG_BACKED_UP;
+ }
+
+ return ret;
+}
+
+int ttm_tt_restore(struct ttm_device *bdev, struct ttm_tt *tt,
+ const struct ttm_operation_ctx *ctx)
+{
+ int ret = ttm_pool_restore_and_alloc(&bdev->pool, tt, ctx);
+
+ if (ret)
+ return ret;
+
+ tt->page_flags &= ~TTM_TT_FLAG_BACKED_UP;
+
+ return 0;
+}
+EXPORT_SYMBOL(ttm_tt_restore);
/**
* ttm_tt_swapout - swap out tt object
@@ -285,7 +357,7 @@ int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
ttm_tt_unpopulate(bdev, ttm);
ttm->swap_storage = swap_storage;
- ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
+ ttm->page_flags |= TTM_TT_FLAG_SWAPPED;
return ttm->num_pages;
@@ -294,17 +366,7 @@ out_err:
return ret;
}
-
-static void ttm_tt_add_mapping(struct ttm_device *bdev, struct ttm_tt *ttm)
-{
- pgoff_t i;
-
- if (ttm->page_flags & TTM_PAGE_FLAG_SG)
- return;
-
- for (i = 0; i < ttm->num_pages; ++i)
- ttm->pages[i]->mapping = bdev->dev_mapping;
-}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_swapout);
int ttm_tt_populate(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
@@ -317,9 +379,9 @@ int ttm_tt_populate(struct ttm_device *bdev,
if (ttm_tt_is_populated(ttm))
return 0;
- if (!(ttm->page_flags & TTM_PAGE_FLAG_SG)) {
+ if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
atomic_long_add(ttm->num_pages, &ttm_pages_allocated);
- if (bdev->pool.use_dma32)
+ if (ttm_pool_uses_dma32(&bdev->pool))
atomic_long_add(ttm->num_pages,
&ttm_dma32_pages_allocated);
}
@@ -342,9 +404,9 @@ int ttm_tt_populate(struct ttm_device *bdev,
if (ret)
goto error;
- ttm_tt_add_mapping(bdev, ttm);
- ttm->page_flags |= TTM_PAGE_FLAG_PRIV_POPULATED;
- if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+ ttm->page_flags |= TTM_TT_FLAG_PRIV_POPULATED;
+ ttm->page_flags &= ~TTM_TT_FLAG_BACKED_UP;
+ if (unlikely(ttm->page_flags & TTM_TT_FLAG_SWAPPED)) {
ret = ttm_tt_swapin(ttm);
if (unlikely(ret != 0)) {
ttm_tt_unpopulate(bdev, ttm);
@@ -355,50 +417,39 @@ int ttm_tt_populate(struct ttm_device *bdev,
return 0;
error:
- if (!(ttm->page_flags & TTM_PAGE_FLAG_SG)) {
+ if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
- if (bdev->pool.use_dma32)
+ if (ttm_pool_uses_dma32(&bdev->pool))
atomic_long_sub(ttm->num_pages,
&ttm_dma32_pages_allocated);
}
return ret;
}
-EXPORT_SYMBOL(ttm_tt_populate);
-static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
-{
- pgoff_t i;
- struct page **page = ttm->pages;
-
- if (ttm->page_flags & TTM_PAGE_FLAG_SG)
- return;
-
- for (i = 0; i < ttm->num_pages; ++i) {
- (*page)->mapping = NULL;
- (*page++)->index = 0;
- }
-}
+#if IS_ENABLED(CONFIG_DRM_TTM_KUNIT_TEST)
+EXPORT_SYMBOL(ttm_tt_populate);
+#endif
void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
{
if (!ttm_tt_is_populated(ttm))
return;
- ttm_tt_clear_mapping(ttm);
if (bdev->funcs->ttm_tt_unpopulate)
bdev->funcs->ttm_tt_unpopulate(bdev, ttm);
else
ttm_pool_free(&bdev->pool, ttm);
- if (!(ttm->page_flags & TTM_PAGE_FLAG_SG)) {
+ if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
- if (bdev->pool.use_dma32)
+ if (ttm_pool_uses_dma32(&bdev->pool))
atomic_long_sub(ttm->num_pages,
&ttm_dma32_pages_allocated);
}
- ttm->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED;
+ ttm->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED;
}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_unpopulate);
#ifdef CONFIG_DEBUG_FS
@@ -435,18 +486,18 @@ void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages)
}
static void ttm_kmap_iter_tt_map_local(struct ttm_kmap_iter *iter,
- struct dma_buf_map *dmap,
+ struct iosys_map *dmap,
pgoff_t i)
{
struct ttm_kmap_iter_tt *iter_tt =
container_of(iter, typeof(*iter_tt), base);
- dma_buf_map_set_vaddr(dmap, kmap_local_page_prot(iter_tt->tt->pages[i],
- iter_tt->prot));
+ iosys_map_set_vaddr(dmap, kmap_local_page_prot(iter_tt->tt->pages[i],
+ iter_tt->prot));
}
static void ttm_kmap_iter_tt_unmap_local(struct ttm_kmap_iter *iter,
- struct dma_buf_map *map)
+ struct iosys_map *map)
{
kunmap_local(map->vaddr);
}
@@ -478,3 +529,38 @@ ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
return &iter_tt->base;
}
EXPORT_SYMBOL(ttm_kmap_iter_tt_init);
+
+unsigned long ttm_tt_pages_limit(void)
+{
+ return ttm_pages_limit;
+}
+EXPORT_SYMBOL(ttm_tt_pages_limit);
+
+/**
+ * ttm_tt_setup_backup() - Allocate and assign a backup structure for a ttm_tt
+ * @tt: The ttm_tt for wich to allocate and assign a backup structure.
+ *
+ * Assign a backup structure to be used for tt backup. This should
+ * typically be done at bo creation, to avoid allocations at shrinking
+ * time.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ttm_tt_setup_backup(struct ttm_tt *tt)
+{
+ struct file *backup =
+ ttm_backup_shmem_create(((loff_t)tt->num_pages) << PAGE_SHIFT);
+
+ if (WARN_ON_ONCE(!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)))
+ return -EINVAL;
+
+ if (IS_ERR(backup))
+ return PTR_ERR(backup);
+
+ if (tt->backup)
+ ttm_backup_fini(tt->backup);
+
+ tt->backup = backup;
+ return 0;
+}
+EXPORT_SYMBOL(ttm_tt_setup_backup);