summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/ttm/ttm_bo_util.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_bo_util.c')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c1453
1 files changed, 887 insertions, 566 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 895d77d799e4..2ff35d55e462 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -29,431 +29,175 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_placement.h>
-#include <drm/drm_vma_manager.h>
-#include <linux/io.h>
-#include <linux/highmem.h>
-#include <linux/wait.h>
-#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/swap.h>
#include <linux/vmalloc.h>
-#include <linux/module.h>
-#include <linux/reservation.h>
+
+#include <drm/ttm/ttm_bo.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_tt.h>
+
+#include <drm/drm_cache.h>
+
+#include "ttm_bo_internal.h"
struct ttm_transfer_obj {
struct ttm_buffer_object base;
struct ttm_buffer_object *bo;
};
-void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
-{
- ttm_bo_mem_put(bo, &bo->mem);
-}
-
-int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
- struct ttm_operation_ctx *ctx,
- struct ttm_mem_reg *new_mem)
-{
- struct ttm_tt *ttm = bo->ttm;
- struct ttm_mem_reg *old_mem = &bo->mem;
- int ret;
-
- if (old_mem->mem_type != TTM_PL_SYSTEM) {
- ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
-
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- pr_err("Failed to expire sync object before unbinding TTM\n");
- return ret;
- }
-
- ttm_tt_unbind(ttm);
- ttm_bo_free_old_node(bo);
- ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
- TTM_PL_MASK_MEM);
- old_mem->mem_type = TTM_PL_SYSTEM;
- }
-
- ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
- if (unlikely(ret != 0))
- return ret;
-
- if (new_mem->mem_type != TTM_PL_SYSTEM) {
- ret = ttm_tt_bind(ttm, new_mem, ctx);
- if (unlikely(ret != 0))
- return ret;
- }
-
- *old_mem = *new_mem;
- new_mem->mm_node = NULL;
-
- return 0;
-}
-EXPORT_SYMBOL(ttm_bo_move_ttm);
-
-int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
+int ttm_mem_io_reserve(struct ttm_device *bdev,
+ struct ttm_resource *mem)
{
- if (likely(man->io_reserve_fastpath))
+ if (mem->bus.offset || mem->bus.addr)
return 0;
- if (interruptible)
- return mutex_lock_interruptible(&man->io_reserve_mutex);
+ mem->bus.is_iomem = false;
+ if (!bdev->funcs->io_mem_reserve)
+ return 0;
- mutex_lock(&man->io_reserve_mutex);
- return 0;
+ return bdev->funcs->io_mem_reserve(bdev, mem);
}
-EXPORT_SYMBOL(ttm_mem_io_lock);
-void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
+void ttm_mem_io_free(struct ttm_device *bdev,
+ struct ttm_resource *mem)
{
- if (likely(man->io_reserve_fastpath))
+ if (!mem)
return;
- mutex_unlock(&man->io_reserve_mutex);
-}
-EXPORT_SYMBOL(ttm_mem_io_unlock);
-
-static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
-{
- struct ttm_buffer_object *bo;
-
- if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
- return -EAGAIN;
-
- bo = list_first_entry(&man->io_reserve_lru,
- struct ttm_buffer_object,
- io_reserve_lru);
- list_del_init(&bo->io_reserve_lru);
- ttm_bo_unmap_virtual_locked(bo);
-
- return 0;
-}
-
-
-int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
-{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
- int ret = 0;
-
- if (!bdev->driver->io_mem_reserve)
- return 0;
- if (likely(man->io_reserve_fastpath))
- return bdev->driver->io_mem_reserve(bdev, mem);
-
- if (bdev->driver->io_mem_reserve &&
- mem->bus.io_reserved_count++ == 0) {
-retry:
- ret = bdev->driver->io_mem_reserve(bdev, mem);
- if (ret == -EAGAIN) {
- ret = ttm_mem_io_evict(man);
- if (ret == 0)
- goto retry;
- }
- }
- return ret;
-}
-EXPORT_SYMBOL(ttm_mem_io_reserve);
-
-void ttm_mem_io_free(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
-{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
-
- if (likely(man->io_reserve_fastpath))
+ if (!mem->bus.offset && !mem->bus.addr)
return;
- if (bdev->driver->io_mem_reserve &&
- --mem->bus.io_reserved_count == 0 &&
- bdev->driver->io_mem_free)
- bdev->driver->io_mem_free(bdev, mem);
-
-}
-EXPORT_SYMBOL(ttm_mem_io_free);
-
-int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
-{
- struct ttm_mem_reg *mem = &bo->mem;
- int ret;
-
- if (!mem->bus.io_reserved_vm) {
- struct ttm_mem_type_manager *man =
- &bo->bdev->man[mem->mem_type];
-
- ret = ttm_mem_io_reserve(bo->bdev, mem);
- if (unlikely(ret != 0))
- return ret;
- mem->bus.io_reserved_vm = true;
- if (man->use_io_reserve_lru)
- list_add_tail(&bo->io_reserve_lru,
- &man->io_reserve_lru);
- }
- return 0;
-}
-
-void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
-{
- struct ttm_mem_reg *mem = &bo->mem;
+ if (bdev->funcs->io_mem_free)
+ bdev->funcs->io_mem_free(bdev, mem);
- if (mem->bus.io_reserved_vm) {
- mem->bus.io_reserved_vm = false;
- list_del_init(&bo->io_reserve_lru);
- ttm_mem_io_free(bo->bdev, mem);
- }
+ mem->bus.offset = 0;
+ mem->bus.addr = NULL;
}
-static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
- void **virtual)
+/**
+ * ttm_move_memcpy - Helper to perform a memcpy ttm move operation.
+ * @clear: Whether to clear rather than copy.
+ * @num_pages: Number of pages of the operation.
+ * @dst_iter: A struct ttm_kmap_iter representing the destination resource.
+ * @src_iter: A struct ttm_kmap_iter representing the source resource.
+ *
+ * This function is intended to be able to move out async under a
+ * dma-fence if desired.
+ */
+void ttm_move_memcpy(bool clear,
+ u32 num_pages,
+ struct ttm_kmap_iter *dst_iter,
+ struct ttm_kmap_iter *src_iter)
{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
- int ret;
- void *addr;
+ const struct ttm_kmap_iter_ops *dst_ops = dst_iter->ops;
+ const struct ttm_kmap_iter_ops *src_ops = src_iter->ops;
+ struct iosys_map src_map, dst_map;
+ pgoff_t i;
- *virtual = NULL;
- (void) ttm_mem_io_lock(man, false);
- ret = ttm_mem_io_reserve(bdev, mem);
- ttm_mem_io_unlock(man);
- if (ret || !mem->bus.is_iomem)
- return ret;
+ /* Single TTM move. NOP */
+ if (dst_ops->maps_tt && src_ops->maps_tt)
+ return;
- if (mem->bus.addr) {
- addr = mem->bus.addr;
- } else {
- if (mem->placement & TTM_PL_FLAG_WC)
- addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
- else
- addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
- if (!addr) {
- (void) ttm_mem_io_lock(man, false);
- ttm_mem_io_free(bdev, mem);
- ttm_mem_io_unlock(man);
- return -ENOMEM;
+ /* Don't move nonexistent data. Clear destination instead. */
+ if (clear) {
+ for (i = 0; i < num_pages; ++i) {
+ dst_ops->map_local(dst_iter, &dst_map, i);
+ if (dst_map.is_iomem)
+ memset_io(dst_map.vaddr_iomem, 0, PAGE_SIZE);
+ else
+ memset(dst_map.vaddr, 0, PAGE_SIZE);
+ if (dst_ops->unmap_local)
+ dst_ops->unmap_local(dst_iter, &dst_map);
}
+ return;
}
- *virtual = addr;
- return 0;
-}
-
-static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
- void *virtual)
-{
- struct ttm_mem_type_manager *man;
-
- man = &bdev->man[mem->mem_type];
-
- if (virtual && mem->bus.addr == NULL)
- iounmap(virtual);
- (void) ttm_mem_io_lock(man, false);
- ttm_mem_io_free(bdev, mem);
- ttm_mem_io_unlock(man);
-}
-
-static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
-{
- uint32_t *dstP =
- (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
- uint32_t *srcP =
- (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
- int i;
- for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
- iowrite32(ioread32(srcP++), dstP++);
- return 0;
-}
-
-#ifdef CONFIG_X86
-#define __ttm_kmap_atomic_prot(__page, __prot) kmap_atomic_prot(__page, __prot)
-#define __ttm_kunmap_atomic(__addr) kunmap_atomic(__addr)
-#else
-#define __ttm_kmap_atomic_prot(__page, __prot) vmap(&__page, 1, 0, __prot)
-#define __ttm_kunmap_atomic(__addr) vunmap(__addr)
-#endif
+ for (i = 0; i < num_pages; ++i) {
+ dst_ops->map_local(dst_iter, &dst_map, i);
+ src_ops->map_local(src_iter, &src_map, i);
+ drm_memcpy_from_wc(&dst_map, &src_map, PAGE_SIZE);
-/**
- * ttm_kmap_atomic_prot - Efficient kernel map of a single page with
- * specified page protection.
- *
- * @page: The page to map.
- * @prot: The page protection.
- *
- * This function maps a TTM page using the kmap_atomic api if available,
- * otherwise falls back to vmap. The user must make sure that the
- * specified page does not have an aliased mapping with a different caching
- * policy unless the architecture explicitly allows it. Also mapping and
- * unmapping using this api must be correctly nested. Unmapping should
- * occur in the reverse order of mapping.
- */
-void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot)
-{
- if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
- return kmap_atomic(page);
- else
- return __ttm_kmap_atomic_prot(page, prot);
+ if (src_ops->unmap_local)
+ src_ops->unmap_local(src_iter, &src_map);
+ if (dst_ops->unmap_local)
+ dst_ops->unmap_local(dst_iter, &dst_map);
+ }
}
-EXPORT_SYMBOL(ttm_kmap_atomic_prot);
+EXPORT_SYMBOL(ttm_move_memcpy);
/**
- * ttm_kunmap_atomic_prot - Unmap a page that was mapped using
- * ttm_kmap_atomic_prot.
+ * ttm_bo_move_memcpy
*
- * @addr: The virtual address from the map.
- * @prot: The page protection.
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @ctx: operation context
+ * @dst_mem: struct ttm_resource indicating where to move.
+ *
+ * Fallback move function for a mappable buffer object in mappable memory.
+ * The function will, if successful,
+ * free any old aperture space, and set (@new_mem)->mm_node to NULL,
+ * and update the (@bo)->mem placement flags. If unsuccessful, the old
+ * data remains untouched, and it's up to the caller to free the
+ * memory space indicated by @new_mem.
+ * Returns:
+ * !0: Failure.
*/
-void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot)
-{
- if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
- kunmap_atomic(addr);
- else
- __ttm_kunmap_atomic(addr);
-}
-EXPORT_SYMBOL(ttm_kunmap_atomic_prot);
-
-static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
- unsigned long page,
- pgprot_t prot)
-{
- struct page *d = ttm->pages[page];
- void *dst;
-
- if (!d)
- return -ENOMEM;
-
- src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
- dst = ttm_kmap_atomic_prot(d, prot);
- if (!dst)
- return -ENOMEM;
-
- memcpy_fromio(dst, src, PAGE_SIZE);
-
- ttm_kunmap_atomic_prot(dst, prot);
-
- return 0;
-}
-
-static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
- unsigned long page,
- pgprot_t prot)
-{
- struct page *s = ttm->pages[page];
- void *src;
-
- if (!s)
- return -ENOMEM;
-
- dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
- src = ttm_kmap_atomic_prot(s, prot);
- if (!src)
- return -ENOMEM;
-
- memcpy_toio(dst, src, PAGE_SIZE);
-
- ttm_kunmap_atomic_prot(src, prot);
-
- return 0;
-}
-
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx,
- struct ttm_mem_reg *new_mem)
+ struct ttm_resource *dst_mem)
{
- struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
+ struct ttm_device *bdev = bo->bdev;
+ struct ttm_resource_manager *dst_man =
+ ttm_manager_type(bo->bdev, dst_mem->mem_type);
struct ttm_tt *ttm = bo->ttm;
- struct ttm_mem_reg *old_mem = &bo->mem;
- struct ttm_mem_reg old_copy = *old_mem;
- void *old_iomap;
- void *new_iomap;
- int ret;
- unsigned long i;
- unsigned long page;
- unsigned long add = 0;
- int dir;
-
- ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
- if (ret)
- return ret;
-
- ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
- if (ret)
- return ret;
- ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
- if (ret)
- goto out;
-
- /*
- * Single TTM move. NOP.
- */
- if (old_iomap == NULL && new_iomap == NULL)
- goto out2;
+ struct ttm_resource *src_mem = bo->resource;
+ struct ttm_resource_manager *src_man;
+ union {
+ struct ttm_kmap_iter_tt tt;
+ struct ttm_kmap_iter_linear_io io;
+ } _dst_iter, _src_iter;
+ struct ttm_kmap_iter *dst_iter, *src_iter;
+ bool clear;
+ int ret = 0;
- /*
- * Don't move nonexistent data. Clear destination instead.
- */
- if (old_iomap == NULL &&
- (ttm == NULL || (ttm->state == tt_unpopulated &&
- !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
- memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
- goto out2;
- }
+ if (WARN_ON(!src_mem))
+ return -EINVAL;
- /*
- * TTM might be null for moves within the same region.
- */
- if (ttm) {
- ret = ttm_tt_populate(ttm, ctx);
+ src_man = ttm_manager_type(bdev, src_mem->mem_type);
+ if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) ||
+ dst_man->use_tt)) {
+ ret = ttm_bo_populate(bo, ctx);
if (ret)
- goto out1;
+ return ret;
}
- add = 0;
- dir = 1;
-
- if ((old_mem->mem_type == new_mem->mem_type) &&
- (new_mem->start < old_mem->start + old_mem->size)) {
- dir = -1;
- add = new_mem->num_pages - 1;
+ dst_iter = ttm_kmap_iter_linear_io_init(&_dst_iter.io, bdev, dst_mem);
+ if (PTR_ERR(dst_iter) == -EINVAL && dst_man->use_tt)
+ dst_iter = ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm);
+ if (IS_ERR(dst_iter))
+ return PTR_ERR(dst_iter);
+
+ src_iter = ttm_kmap_iter_linear_io_init(&_src_iter.io, bdev, src_mem);
+ if (PTR_ERR(src_iter) == -EINVAL && src_man->use_tt)
+ src_iter = ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm);
+ if (IS_ERR(src_iter)) {
+ ret = PTR_ERR(src_iter);
+ goto out_src_iter;
}
- for (i = 0; i < new_mem->num_pages; ++i) {
- page = i * dir + add;
- if (old_iomap == NULL) {
- pgprot_t prot = ttm_io_prot(old_mem->placement,
- PAGE_KERNEL);
- ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
- prot);
- } else if (new_iomap == NULL) {
- pgprot_t prot = ttm_io_prot(new_mem->placement,
- PAGE_KERNEL);
- ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
- prot);
- } else {
- ret = ttm_copy_io_page(new_iomap, old_iomap, page);
- }
- if (ret)
- goto out1;
- }
- mb();
-out2:
- old_copy = *old_mem;
- *old_mem = *new_mem;
- new_mem->mm_node = NULL;
-
- if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
- ttm_tt_destroy(ttm);
- bo->ttm = NULL;
- }
+ clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm));
+ if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)))
+ ttm_move_memcpy(clear, PFN_UP(dst_mem->size), dst_iter, src_iter);
-out1:
- ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
-out:
- ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
+ if (!src_iter->ops->maps_tt)
+ ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
+ ttm_bo_move_sync_cleanup(bo, dst_mem);
+
+out_src_iter:
+ if (!dst_iter->ops->maps_tt)
+ ttm_kmap_iter_linear_io_fini(&_dst_iter.io, bdev, dst_mem);
- /*
- * On error, keep the mm node!
- */
- if (!ret)
- ttm_bo_mem_put(bo, &old_copy);
return ret;
}
EXPORT_SYMBOL(ttm_bo_move_memcpy);
@@ -463,6 +207,7 @@ static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
struct ttm_transfer_obj *fbo;
fbo = container_of(bo, struct ttm_transfer_obj, base);
+ dma_resv_fini(&fbo->base.base._resv);
ttm_bo_put(fbo->bo);
kfree(fbo);
}
@@ -493,62 +238,76 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
return -ENOMEM;
fbo->base = *bo;
- fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT;
-
- ttm_bo_get(bo);
- fbo->bo = bo;
/**
* Fix up members that we shouldn't copy directly:
* TODO: Explicit member copy would probably be better here.
*/
- atomic_inc(&bo->bdev->glob->bo_count);
- INIT_LIST_HEAD(&fbo->base.ddestroy);
- INIT_LIST_HEAD(&fbo->base.lru);
- INIT_LIST_HEAD(&fbo->base.swap);
- INIT_LIST_HEAD(&fbo->base.io_reserve_lru);
- mutex_init(&fbo->base.wu_mutex);
- fbo->base.moving = NULL;
- drm_vma_node_reset(&fbo->base.vma_node);
- atomic_set(&fbo->base.cpu_writers, 0);
-
- kref_init(&fbo->base.list_kref);
+ atomic_inc(&ttm_glob.bo_count);
+ drm_vma_node_reset(&fbo->base.base.vma_node);
+
kref_init(&fbo->base.kref);
fbo->base.destroy = &ttm_transfered_destroy;
- fbo->base.acc_size = 0;
- fbo->base.resv = &fbo->base.ttm_resv;
- reservation_object_init(fbo->base.resv);
- ret = reservation_object_trylock(fbo->base.resv);
+ fbo->base.pin_count = 0;
+ if (bo->type != ttm_bo_type_sg)
+ fbo->base.base.resv = &fbo->base.base._resv;
+
+ dma_resv_init(&fbo->base.base._resv);
+ fbo->base.base.dev = NULL;
+ ret = dma_resv_trylock(&fbo->base.base._resv);
WARN_ON(!ret);
+ ret = dma_resv_reserve_fences(&fbo->base.base._resv, TTM_NUM_MOVE_FENCES);
+ if (ret) {
+ dma_resv_unlock(&fbo->base.base._resv);
+ kfree(fbo);
+ return ret;
+ }
+
+ if (fbo->base.resource) {
+ ttm_resource_set_bo(fbo->base.resource, &fbo->base);
+ bo->resource = NULL;
+ ttm_bo_set_bulk_move(&fbo->base, NULL);
+ } else {
+ fbo->base.bulk_move = NULL;
+ }
+
+ ttm_bo_get(bo);
+ fbo->bo = bo;
+
+ ttm_bo_move_to_lru_tail_unlocked(&fbo->base);
+
*new_obj = &fbo->base;
return 0;
}
-pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
+/**
+ * ttm_io_prot
+ *
+ * @bo: ttm buffer object
+ * @res: ttm resource object
+ * @tmp: Page protection flag for a normal, cached mapping.
+ *
+ * Utility function that returns the pgprot_t that should be used for
+ * setting up a PTE with the caching model indicated by @c_state.
+ */
+pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
+ pgprot_t tmp)
{
- /* Cached mappings need no adjustment */
- if (caching_flags & TTM_PL_FLAG_CACHED)
- return tmp;
+ struct ttm_resource_manager *man;
+ enum ttm_caching caching;
+
+ man = ttm_manager_type(bo->bdev, res->mem_type);
+ if (man->use_tt) {
+ caching = bo->ttm->caching;
+ if (bo->ttm->page_flags & TTM_TT_FLAG_DECRYPTED)
+ tmp = pgprot_decrypted(tmp);
+ } else {
+ caching = res->bus.caching;
+ }
-#if defined(__i386__) || defined(__x86_64__)
- if (caching_flags & TTM_PL_FLAG_WC)
- tmp = pgprot_writecombine(tmp);
- else if (boot_cpu_data.x86 > 3)
- tmp = pgprot_noncached(tmp);
-#endif
-#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
- defined(__powerpc__)
- if (caching_flags & TTM_PL_FLAG_WC)
- tmp = pgprot_writecombine(tmp);
- else
- tmp = pgprot_noncached(tmp);
-#endif
-#if defined(__sparc__) || defined(__mips__)
- tmp = pgprot_noncached(tmp);
-#endif
- return tmp;
+ return ttm_prot_from_caching(caching, tmp);
}
EXPORT_SYMBOL(ttm_io_prot);
@@ -557,19 +316,23 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
unsigned long size,
struct ttm_bo_kmap_obj *map)
{
- struct ttm_mem_reg *mem = &bo->mem;
+ struct ttm_resource *mem = bo->resource;
- if (bo->mem.bus.addr) {
+ if (bo->resource->bus.addr) {
map->bo_kmap_type = ttm_bo_map_premapped;
- map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
+ map->virtual = ((u8 *)bo->resource->bus.addr) + offset;
} else {
+ resource_size_t res = bo->resource->bus.offset + offset;
+
map->bo_kmap_type = ttm_bo_map_iomap;
- if (mem->placement & TTM_PL_FLAG_WC)
- map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
- size);
+ if (mem->bus.caching == ttm_write_combined)
+ map->virtual = ioremap_wc(res, size);
+#ifdef CONFIG_X86
+ else if (mem->bus.caching == ttm_cached)
+ map->virtual = ioremap_cache(res, size);
+#endif
else
- map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
- size);
+ map->virtual = ioremap(res, size);
}
return (!map->virtual) ? -ENOMEM : 0;
}
@@ -579,22 +342,25 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
{
- struct ttm_mem_reg *mem = &bo->mem;
+ struct ttm_resource *mem = bo->resource;
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
};
struct ttm_tt *ttm = bo->ttm;
+ struct ttm_resource_manager *man =
+ ttm_manager_type(bo->bdev, bo->resource->mem_type);
pgprot_t prot;
int ret;
BUG_ON(!ttm);
- ret = ttm_tt_populate(ttm, &ctx);
+ ret = ttm_bo_populate(bo, &ctx);
if (ret)
return ret;
- if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
+ if (num_pages == 1 && ttm->caching == ttm_cached &&
+ !(man->use_tt && (ttm->page_flags & TTM_TT_FLAG_DECRYPTED))) {
/*
* We're mapping a single page, and the desired
* page protection is consistent with the bo.
@@ -608,7 +374,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
* We need to use vmap to get the desired page protection
* or to make the buffer object look contiguous.
*/
- prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
+ prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
map->bo_kmap_type = ttm_bo_map_vmap;
map->virtual = vmap(ttm->pages + start_page, num_pages,
0, prot);
@@ -616,28 +382,66 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
return (!map->virtual) ? -ENOMEM : 0;
}
+/**
+ * ttm_bo_kmap_try_from_panic
+ *
+ * @bo: The buffer object
+ * @page: The page to map
+ *
+ * Sets up a kernel virtual mapping using kmap_local_page_try_from_panic().
+ * This should only be called from the panic handler, if you make sure the bo
+ * is the one being displayed, so is properly allocated, and protected.
+ *
+ * Returns the vaddr, that you can use to write to the bo, and that you should
+ * pass to kunmap_local() when you're done with this page, or NULL if the bo
+ * is in iomem.
+ */
+void *ttm_bo_kmap_try_from_panic(struct ttm_buffer_object *bo, unsigned long page)
+{
+ if (page + 1 > PFN_UP(bo->resource->size))
+ return NULL;
+
+ if (!bo->resource->bus.is_iomem && bo->ttm->pages && bo->ttm->pages[page])
+ return kmap_local_page_try_from_panic(bo->ttm->pages[page]);
+
+ return NULL;
+}
+EXPORT_SYMBOL(ttm_bo_kmap_try_from_panic);
+
+/**
+ * ttm_bo_kmap
+ *
+ * @bo: The buffer object.
+ * @start_page: The first page to map.
+ * @num_pages: Number of pages to map.
+ * @map: pointer to a struct ttm_bo_kmap_obj representing the map.
+ *
+ * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
+ * data in the buffer object. The ttm_kmap_obj_virtual function can then be
+ * used to obtain a virtual address to the data.
+ *
+ * Returns
+ * -ENOMEM: Out of memory.
+ * -EINVAL: Invalid range.
+ */
int ttm_bo_kmap(struct ttm_buffer_object *bo,
unsigned long start_page, unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
{
- struct ttm_mem_type_manager *man =
- &bo->bdev->man[bo->mem.mem_type];
unsigned long offset, size;
int ret;
map->virtual = NULL;
map->bo = bo;
- if (num_pages > bo->num_pages)
+ if (num_pages > PFN_UP(bo->resource->size))
return -EINVAL;
- if (start_page > bo->num_pages)
+ if ((start_page + num_pages) > PFN_UP(bo->resource->size))
return -EINVAL;
- (void) ttm_mem_io_lock(man, false);
- ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
- ttm_mem_io_unlock(man);
+ ret = ttm_mem_io_reserve(bo->bdev, bo->resource);
if (ret)
return ret;
- if (!bo->mem.bus.is_iomem) {
+ if (!bo->resource->bus.is_iomem) {
return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
} else {
offset = start_page << PAGE_SHIFT;
@@ -647,12 +451,15 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_kmap);
+/**
+ * ttm_bo_kunmap
+ *
+ * @map: Object describing the map to unmap.
+ *
+ * Unmaps a kernel map set up by ttm_bo_kmap.
+ */
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
{
- struct ttm_buffer_object *bo = map->bo;
- struct ttm_mem_type_manager *man =
- &bo->bdev->man[bo->mem.mem_type];
-
if (!map->virtual)
return;
switch (map->bo_kmap_type) {
@@ -670,188 +477,702 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
default:
BUG();
}
- (void) ttm_mem_io_lock(man, false);
- ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
- ttm_mem_io_unlock(man);
+ ttm_mem_io_free(map->bo->bdev, map->bo->resource);
map->virtual = NULL;
map->page = NULL;
}
EXPORT_SYMBOL(ttm_bo_kunmap);
-int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
- struct dma_fence *fence,
- bool evict,
- struct ttm_mem_reg *new_mem)
+/**
+ * ttm_bo_vmap
+ *
+ * @bo: The buffer object.
+ * @map: pointer to a struct iosys_map representing the map.
+ *
+ * Sets up a kernel virtual mapping, using ioremap or vmap to the
+ * data in the buffer object. The parameter @map returns the virtual
+ * address as struct iosys_map. Unmap the buffer with ttm_bo_vunmap().
+ *
+ * Returns
+ * -ENOMEM: Out of memory.
+ * -EINVAL: Invalid range.
+ */
+int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map)
{
- struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
- struct ttm_mem_reg *old_mem = &bo->mem;
+ struct ttm_resource *mem = bo->resource;
int ret;
- struct ttm_buffer_object *ghost_obj;
- reservation_object_add_excl_fence(bo->resv, fence);
- if (evict) {
- ret = ttm_bo_wait(bo, false, false);
- if (ret)
- return ret;
+ dma_resv_assert_held(bo->base.resv);
- if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
- ttm_tt_destroy(bo->ttm);
- bo->ttm = NULL;
- }
- ttm_bo_free_old_node(bo);
- } else {
- /**
- * This should help pipeline ordinary buffer moves.
- *
- * Hang old buffer memory on a new buffer object,
- * and leave it to be released when the GPU
- * operation has completed.
- */
+ ret = ttm_mem_io_reserve(bo->bdev, mem);
+ if (ret)
+ return ret;
+
+ if (mem->bus.is_iomem) {
+ void __iomem *vaddr_iomem;
+
+ if (mem->bus.addr)
+ vaddr_iomem = (void __iomem *)mem->bus.addr;
+ else if (mem->bus.caching == ttm_write_combined)
+ vaddr_iomem = ioremap_wc(mem->bus.offset,
+ bo->base.size);
+#ifdef CONFIG_X86
+ else if (mem->bus.caching == ttm_cached)
+ vaddr_iomem = ioremap_cache(mem->bus.offset,
+ bo->base.size);
+#endif
+ else
+ vaddr_iomem = ioremap(mem->bus.offset, bo->base.size);
+
+ if (!vaddr_iomem)
+ return -ENOMEM;
- dma_fence_put(bo->moving);
- bo->moving = dma_fence_get(fence);
+ iosys_map_set_vaddr_iomem(map, vaddr_iomem);
- ret = ttm_buffer_object_transfer(bo, &ghost_obj);
+ } else {
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
+ struct ttm_tt *ttm = bo->ttm;
+ pgprot_t prot;
+ void *vaddr;
+
+ ret = ttm_bo_populate(bo, &ctx);
if (ret)
return ret;
- reservation_object_add_excl_fence(ghost_obj->resv, fence);
-
- /**
- * If we're not moving to fixed memory, the TTM object
- * needs to stay alive. Otherwhise hang it on the ghost
- * bo to be unbound and destroyed.
+ /*
+ * We need to use vmap to get the desired page protection
+ * or to make the buffer object look contiguous.
*/
+ prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
+ vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot);
+ if (!vaddr)
+ return -ENOMEM;
- if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
- ghost_obj->ttm = NULL;
- else
- bo->ttm = NULL;
-
- ttm_bo_unreserve(ghost_obj);
- ttm_bo_put(ghost_obj);
+ iosys_map_set_vaddr(map, vaddr);
}
- *old_mem = *new_mem;
- new_mem->mm_node = NULL;
-
return 0;
}
-EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
+EXPORT_SYMBOL(ttm_bo_vmap);
-int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
- struct dma_fence *fence, bool evict,
- struct ttm_mem_reg *new_mem)
+/**
+ * ttm_bo_vunmap
+ *
+ * @bo: The buffer object.
+ * @map: Object describing the map to unmap.
+ *
+ * Unmaps a kernel map set up by ttm_bo_vmap().
+ */
+void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map)
{
- struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_mem_reg *old_mem = &bo->mem;
+ struct ttm_resource *mem = bo->resource;
- struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
- struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
+ dma_resv_assert_held(bo->base.resv);
- int ret;
+ if (iosys_map_is_null(map))
+ return;
- reservation_object_add_excl_fence(bo->resv, fence);
+ if (!map->is_iomem)
+ vunmap(map->vaddr);
+ else if (!mem->bus.addr)
+ iounmap(map->vaddr_iomem);
+ iosys_map_clear(map);
- if (!evict) {
- struct ttm_buffer_object *ghost_obj;
+ ttm_mem_io_free(bo->bdev, bo->resource);
+}
+EXPORT_SYMBOL(ttm_bo_vunmap);
- /**
- * This should help pipeline ordinary buffer moves.
- *
- * Hang old buffer memory on a new buffer object,
- * and leave it to be released when the GPU
- * operation has completed.
- */
+static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
+ bool dst_use_tt)
+{
+ long ret;
- dma_fence_put(bo->moving);
- bo->moving = dma_fence_get(fence);
+ ret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
+ false, 15 * HZ);
+ if (ret == 0)
+ return -EBUSY;
+ if (ret < 0)
+ return ret;
- ret = ttm_buffer_object_transfer(bo, &ghost_obj);
- if (ret)
- return ret;
+ if (!dst_use_tt)
+ ttm_bo_tt_destroy(bo);
+ ttm_resource_free(bo, &bo->resource);
+ return 0;
+}
- reservation_object_add_excl_fence(ghost_obj->resv, fence);
+static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
+ struct dma_fence *fence,
+ bool dst_use_tt)
+{
+ struct ttm_buffer_object *ghost_obj;
+ int ret;
- /**
- * If we're not moving to fixed memory, the TTM object
- * needs to stay alive. Otherwhise hang it on the ghost
- * bo to be unbound and destroyed.
- */
+ /**
+ * This should help pipeline ordinary buffer moves.
+ *
+ * Hang old buffer memory on a new buffer object,
+ * and leave it to be released when the GPU
+ * operation has completed.
+ */
- if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
- ghost_obj->ttm = NULL;
- else
- bo->ttm = NULL;
+ ret = ttm_buffer_object_transfer(bo, &ghost_obj);
+ if (ret)
+ return ret;
- ttm_bo_unreserve(ghost_obj);
- ttm_bo_put(ghost_obj);
+ dma_resv_add_fence(&ghost_obj->base._resv, fence,
+ DMA_RESV_USAGE_KERNEL);
- } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
+ /**
+ * If we're not moving to fixed memory, the TTM object
+ * needs to stay alive. Otherwhise hang it on the ghost
+ * bo to be unbound and destroyed.
+ */
- /**
- * BO doesn't have a TTM we need to bind/unbind. Just remember
- * this eviction and free up the allocation
- */
+ if (dst_use_tt)
+ ghost_obj->ttm = NULL;
+ else
+ bo->ttm = NULL;
- spin_lock(&from->move_lock);
- if (!from->move || dma_fence_is_later(fence, from->move)) {
- dma_fence_put(from->move);
- from->move = dma_fence_get(fence);
- }
- spin_unlock(&from->move_lock);
+ dma_resv_unlock(&ghost_obj->base._resv);
+ ttm_bo_put(ghost_obj);
+ return 0;
+}
- ttm_bo_free_old_node(bo);
+static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
+ struct dma_fence *fence)
+{
+ struct ttm_device *bdev = bo->bdev;
+ struct ttm_resource_manager *from;
+ struct dma_fence *tmp;
+ int i;
- dma_fence_put(bo->moving);
- bo->moving = dma_fence_get(fence);
+ from = ttm_manager_type(bdev, bo->resource->mem_type);
+ /**
+ * BO doesn't have a TTM we need to bind/unbind. Just remember
+ * this eviction and free up the allocation.
+ * The fence will be saved in the first free slot or in the slot
+ * already used to store a fence from the same context. Since
+ * drivers can't use more than TTM_NUM_MOVE_FENCES contexts for
+ * evictions we should always find a slot to use.
+ */
+ spin_lock(&from->eviction_lock);
+ for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) {
+ tmp = from->eviction_fences[i];
+ if (!tmp)
+ break;
+ if (fence->context != tmp->context)
+ continue;
+ if (dma_fence_is_later(fence, tmp)) {
+ dma_fence_put(tmp);
+ break;
+ }
+ goto unlock;
+ }
+ if (i < TTM_NUM_MOVE_FENCES) {
+ from->eviction_fences[i] = dma_fence_get(fence);
} else {
- /**
- * Last resort, wait for the move to be completed.
- *
- * Should never happen in pratice.
- */
+ WARN(1, "not enough fence slots for all fence contexts");
+ spin_unlock(&from->eviction_lock);
+ dma_fence_wait(fence, false);
+ goto end;
+ }
- ret = ttm_bo_wait(bo, false, false);
- if (ret)
- return ret;
+unlock:
+ spin_unlock(&from->eviction_lock);
+end:
+ ttm_resource_free(bo, &bo->resource);
+}
- if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
- ttm_tt_destroy(bo->ttm);
- bo->ttm = NULL;
- }
- ttm_bo_free_old_node(bo);
- }
+/**
+ * ttm_bo_move_accel_cleanup - cleanup helper for hw copies
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @fence: A fence object that signals when moving is complete.
+ * @evict: This is an evict move. Don't return until the buffer is idle.
+ * @pipeline: evictions are to be pipelined.
+ * @new_mem: struct ttm_resource indicating where to move.
+ *
+ * Accelerated move function to be called when an accelerated move
+ * has been scheduled. The function will create a new temporary buffer object
+ * representing the old placement, and put the sync object on both buffer
+ * objects. After that the newly created buffer object is unref'd to be
+ * destroyed when the move is complete. This will help pipeline
+ * buffer moves.
+ */
+int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
+ struct dma_fence *fence,
+ bool evict,
+ bool pipeline,
+ struct ttm_resource *new_mem)
+{
+ struct ttm_device *bdev = bo->bdev;
+ struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->resource->mem_type);
+ struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
+ int ret = 0;
- *old_mem = *new_mem;
- new_mem->mm_node = NULL;
+ dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
+ if (!evict)
+ ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
+ else if (!from->use_tt && pipeline)
+ ttm_bo_move_pipeline_evict(bo, fence);
+ else
+ ret = ttm_bo_wait_free_node(bo, man->use_tt);
+
+ if (ret)
+ return ret;
+
+ ttm_bo_assign_mem(bo, new_mem);
return 0;
}
-EXPORT_SYMBOL(ttm_bo_pipeline_move);
+EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
+
+/**
+ * ttm_bo_move_sync_cleanup - cleanup by waiting for the move to finish
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @new_mem: struct ttm_resource indicating where to move.
+ *
+ * Special case of ttm_bo_move_accel_cleanup where the bo is guaranteed
+ * by the caller to be idle. Typically used after memcpy buffer moves.
+ */
+void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
+ struct ttm_resource *new_mem)
+{
+ struct ttm_device *bdev = bo->bdev;
+ struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
+ int ret;
+
+ ret = ttm_bo_wait_free_node(bo, man->use_tt);
+ if (WARN_ON(ret))
+ return;
+
+ ttm_bo_assign_mem(bo, new_mem);
+}
+EXPORT_SYMBOL(ttm_bo_move_sync_cleanup);
+/**
+ * ttm_bo_pipeline_gutting - purge the contents of a bo
+ * @bo: The buffer object
+ *
+ * Purge the contents of a bo, async if the bo is not idle.
+ * After a successful call, the bo is left unpopulated in
+ * system placement. The function may wait uninterruptible
+ * for idle on OOM.
+ *
+ * Return: 0 if successful, negative error code on failure.
+ */
int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
{
struct ttm_buffer_object *ghost;
+ struct ttm_tt *ttm;
int ret;
- ret = ttm_buffer_object_transfer(bo, &ghost);
+ /* If already idle, no need for ghost object dance. */
+ if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP)) {
+ if (!bo->ttm) {
+ /* See comment below about clearing. */
+ ret = ttm_tt_create(bo, true);
+ if (ret)
+ return ret;
+ } else {
+ ttm_tt_unpopulate(bo->bdev, bo->ttm);
+ if (bo->type == ttm_bo_type_device)
+ ttm_tt_mark_for_clear(bo->ttm);
+ }
+ ttm_resource_free(bo, &bo->resource);
+ return 0;
+ }
+
+ /*
+ * We need an unpopulated ttm_tt after giving our current one,
+ * if any, to the ghost object. And we can't afford to fail
+ * creating one *after* the operation. If the bo subsequently gets
+ * resurrected, make sure it's cleared (if ttm_bo_type_device)
+ * to avoid leaking sensitive information to user-space.
+ */
+
+ ttm = bo->ttm;
+ bo->ttm = NULL;
+ ret = ttm_tt_create(bo, true);
+ swap(bo->ttm, ttm);
if (ret)
return ret;
- ret = reservation_object_copy_fences(ghost->resv, bo->resv);
- /* Last resort, wait for the BO to be idle when we are OOM */
+ ret = ttm_buffer_object_transfer(bo, &ghost);
if (ret)
- ttm_bo_wait(bo, false, false);
+ goto error_destroy_tt;
- memset(&bo->mem, 0, sizeof(bo->mem));
- bo->mem.mem_type = TTM_PL_SYSTEM;
- bo->ttm = NULL;
+ ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
+ /* Last resort, wait for the BO to be idle when we are OOM */
+ if (ret) {
+ dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+ }
- ttm_bo_unreserve(ghost);
+ dma_resv_unlock(&ghost->base._resv);
ttm_bo_put(ghost);
-
+ bo->ttm = ttm;
return 0;
+
+error_destroy_tt:
+ ttm_tt_destroy(bo->bdev, ttm);
+ return ret;
+}
+
+static bool ttm_lru_walk_trylock(struct ttm_bo_lru_cursor *curs,
+ struct ttm_buffer_object *bo)
+{
+ struct ttm_operation_ctx *ctx = curs->arg->ctx;
+
+ curs->needs_unlock = false;
+
+ if (dma_resv_trylock(bo->base.resv)) {
+ curs->needs_unlock = true;
+ return true;
+ }
+
+ if (bo->base.resv == ctx->resv && ctx->allow_res_evict) {
+ dma_resv_assert_held(bo->base.resv);
+ return true;
+ }
+
+ return false;
+}
+
+static int ttm_lru_walk_ticketlock(struct ttm_bo_lru_cursor *curs,
+ struct ttm_buffer_object *bo)
+{
+ struct ttm_lru_walk_arg *arg = curs->arg;
+ struct dma_resv *resv = bo->base.resv;
+ int ret;
+
+ if (arg->ctx->interruptible)
+ ret = dma_resv_lock_interruptible(resv, arg->ticket);
+ else
+ ret = dma_resv_lock(resv, arg->ticket);
+
+ if (!ret) {
+ curs->needs_unlock = true;
+ /*
+ * Only a single ticketlock per loop. Ticketlocks are prone
+ * to return -EDEADLK causing the eviction to fail, so
+ * after waiting for the ticketlock, revert back to
+ * trylocking for this walk.
+ */
+ arg->ticket = NULL;
+ } else if (ret == -EDEADLK) {
+ /* Caller needs to exit the ww transaction. */
+ ret = -ENOSPC;
+ }
+
+ return ret;
+}
+
+/**
+ * ttm_lru_walk_for_evict() - Perform a LRU list walk, with actions taken on
+ * valid items.
+ * @walk: describe the walks and actions taken
+ * @bdev: The TTM device.
+ * @man: The struct ttm_resource manager whose LRU lists we're walking.
+ * @target: The end condition for the walk.
+ *
+ * The LRU lists of @man are walk, and for each struct ttm_resource encountered,
+ * the corresponding ttm_buffer_object is locked and taken a reference on, and
+ * the LRU lock is dropped. the LRU lock may be dropped before locking and, in
+ * that case, it's verified that the item actually remains on the LRU list after
+ * the lock, and that the buffer object didn't switch resource in between.
+ *
+ * With a locked object, the actions indicated by @walk->process_bo are
+ * performed, and after that, the bo is unlocked, the refcount dropped and the
+ * next struct ttm_resource is processed. Here, the walker relies on
+ * TTM's restartable LRU list implementation.
+ *
+ * Typically @walk->process_bo() would return the number of pages evicted,
+ * swapped or shrunken, so that when the total exceeds @target, or when the
+ * LRU list has been walked in full, iteration is terminated. It's also terminated
+ * on error. Note that the definition of @target is done by the caller, it
+ * could have a different meaning than the number of pages.
+ *
+ * Note that the way dma_resv individualization is done, locking needs to be done
+ * either with the LRU lock held (trylocking only) or with a reference on the
+ * object.
+ *
+ * Return: The progress made towards target or negative error code on error.
+ */
+s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
+ struct ttm_resource_manager *man, s64 target)
+{
+ struct ttm_bo_lru_cursor cursor;
+ struct ttm_buffer_object *bo;
+ s64 progress = 0;
+ s64 lret;
+
+ ttm_bo_lru_for_each_reserved_guarded(&cursor, man, &walk->arg, bo) {
+ lret = walk->ops->process_bo(walk, bo);
+ if (lret == -EBUSY || lret == -EALREADY)
+ lret = 0;
+ progress = (lret < 0) ? lret : progress + lret;
+ if (progress < 0 || progress >= target)
+ break;
+ }
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ return progress;
+}
+EXPORT_SYMBOL(ttm_lru_walk_for_evict);
+
+static void ttm_bo_lru_cursor_cleanup_bo(struct ttm_bo_lru_cursor *curs)
+{
+ struct ttm_buffer_object *bo = curs->bo;
+
+ if (bo) {
+ if (curs->needs_unlock)
+ dma_resv_unlock(bo->base.resv);
+ ttm_bo_put(bo);
+ curs->bo = NULL;
+ }
+}
+
+/**
+ * ttm_bo_lru_cursor_fini() - Stop using a struct ttm_bo_lru_cursor
+ * and clean up any iteration it was used for.
+ * @curs: The cursor.
+ */
+void ttm_bo_lru_cursor_fini(struct ttm_bo_lru_cursor *curs)
+{
+ spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock;
+
+ ttm_bo_lru_cursor_cleanup_bo(curs);
+ spin_lock(lru_lock);
+ ttm_resource_cursor_fini(&curs->res_curs);
+ spin_unlock(lru_lock);
+}
+EXPORT_SYMBOL(ttm_bo_lru_cursor_fini);
+
+/**
+ * ttm_bo_lru_cursor_init() - Initialize a struct ttm_bo_lru_cursor
+ * @curs: The ttm_bo_lru_cursor to initialize.
+ * @man: The ttm resource_manager whose LRU lists to iterate over.
+ * @arg: The ttm_lru_walk_arg to govern the walk.
+ *
+ * Initialize a struct ttm_bo_lru_cursor.
+ *
+ * Return: Pointer to @curs. The function does not fail.
+ */
+struct ttm_bo_lru_cursor *
+ttm_bo_lru_cursor_init(struct ttm_bo_lru_cursor *curs,
+ struct ttm_resource_manager *man,
+ struct ttm_lru_walk_arg *arg)
+{
+ memset(curs, 0, sizeof(*curs));
+ ttm_resource_cursor_init(&curs->res_curs, man);
+ curs->arg = arg;
+
+ return curs;
+}
+EXPORT_SYMBOL(ttm_bo_lru_cursor_init);
+
+static struct ttm_buffer_object *
+__ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs)
+{
+ spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock;
+ struct ttm_resource *res = NULL;
+ struct ttm_buffer_object *bo;
+ struct ttm_lru_walk_arg *arg = curs->arg;
+ bool first = !curs->bo;
+
+ ttm_bo_lru_cursor_cleanup_bo(curs);
+
+ spin_lock(lru_lock);
+ for (;;) {
+ int mem_type, ret = 0;
+ bool bo_locked = false;
+
+ if (first) {
+ res = ttm_resource_manager_first(&curs->res_curs);
+ first = false;
+ } else {
+ res = ttm_resource_manager_next(&curs->res_curs);
+ }
+ if (!res)
+ break;
+
+ bo = res->bo;
+ if (ttm_lru_walk_trylock(curs, bo))
+ bo_locked = true;
+ else if (!arg->ticket || arg->ctx->no_wait_gpu || arg->trylock_only)
+ continue;
+
+ if (!ttm_bo_get_unless_zero(bo)) {
+ if (curs->needs_unlock)
+ dma_resv_unlock(bo->base.resv);
+ continue;
+ }
+
+ mem_type = res->mem_type;
+ spin_unlock(lru_lock);
+ if (!bo_locked)
+ ret = ttm_lru_walk_ticketlock(curs, bo);
+
+ /*
+ * Note that in between the release of the lru lock and the
+ * ticketlock, the bo may have switched resource,
+ * and also memory type, since the resource may have been
+ * freed and allocated again with a different memory type.
+ * In that case, just skip it.
+ */
+ curs->bo = bo;
+ if (!ret && bo->resource && bo->resource->mem_type == mem_type)
+ return bo;
+
+ ttm_bo_lru_cursor_cleanup_bo(curs);
+ if (ret && ret != -EALREADY)
+ return ERR_PTR(ret);
+
+ spin_lock(lru_lock);
+ }
+
+ spin_unlock(lru_lock);
+ return res ? bo : NULL;
+}
+
+/**
+ * ttm_bo_lru_cursor_next() - Continue iterating a manager's LRU lists
+ * to find and lock buffer object.
+ * @curs: The cursor initialized using ttm_bo_lru_cursor_init() and
+ * ttm_bo_lru_cursor_first().
+ *
+ * Return: A pointer to a locked and reference-counted buffer object,
+ * or NULL if none could be found and looping should be terminated.
+ */
+struct ttm_buffer_object *ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs)
+{
+ return __ttm_bo_lru_cursor_next(curs);
+}
+EXPORT_SYMBOL(ttm_bo_lru_cursor_next);
+
+/**
+ * ttm_bo_lru_cursor_first() - Start iterating a manager's LRU lists
+ * to find and lock buffer object.
+ * @curs: The cursor initialized using ttm_bo_lru_cursor_init().
+ *
+ * Return: A pointer to a locked and reference-counted buffer object,
+ * or NULL if none could be found and looping should be terminated.
+ */
+struct ttm_buffer_object *ttm_bo_lru_cursor_first(struct ttm_bo_lru_cursor *curs)
+{
+ ttm_bo_lru_cursor_cleanup_bo(curs);
+ return __ttm_bo_lru_cursor_next(curs);
+}
+EXPORT_SYMBOL(ttm_bo_lru_cursor_first);
+
+/**
+ * ttm_bo_shrink() - Helper to shrink a ttm buffer object.
+ * @ctx: The struct ttm_operation_ctx used for the shrinking operation.
+ * @bo: The buffer object.
+ * @flags: Flags governing the shrinking behaviour.
+ *
+ * The function uses the ttm_tt_back_up functionality to back up or
+ * purge a struct ttm_tt. If the bo is not in system, it's first
+ * moved there.
+ *
+ * Return: The number of pages shrunken or purged, or
+ * negative error code on failure.
+ */
+long ttm_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
+ const struct ttm_bo_shrink_flags flags)
+{
+ static const struct ttm_place sys_placement_flags = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .mem_type = TTM_PL_SYSTEM,
+ .flags = 0,
+ };
+ static struct ttm_placement sys_placement = {
+ .num_placement = 1,
+ .placement = &sys_placement_flags,
+ };
+ struct ttm_tt *tt = bo->ttm;
+ long lret;
+
+ dma_resv_assert_held(bo->base.resv);
+
+ if (flags.allow_move && bo->resource->mem_type != TTM_PL_SYSTEM) {
+ int ret = ttm_bo_validate(bo, &sys_placement, ctx);
+
+ /* Consider -ENOMEM and -ENOSPC non-fatal. */
+ if (ret) {
+ if (ret == -ENOMEM || ret == -ENOSPC)
+ ret = -EBUSY;
+ return ret;
+ }
+ }
+
+ ttm_bo_unmap_virtual(bo);
+ lret = ttm_bo_wait_ctx(bo, ctx);
+ if (lret < 0)
+ return lret;
+
+ if (bo->bulk_move) {
+ spin_lock(&bo->bdev->lru_lock);
+ ttm_resource_del_bulk_move(bo->resource, bo);
+ spin_unlock(&bo->bdev->lru_lock);
+ }
+
+ lret = ttm_tt_backup(bo->bdev, tt, (struct ttm_backup_flags)
+ {.purge = flags.purge,
+ .writeback = flags.writeback});
+
+ if (lret <= 0 && bo->bulk_move) {
+ spin_lock(&bo->bdev->lru_lock);
+ ttm_resource_add_bulk_move(bo->resource, bo);
+ spin_unlock(&bo->bdev->lru_lock);
+ }
+
+ if (lret < 0 && lret != -EINTR)
+ return -EBUSY;
+
+ return lret;
+}
+EXPORT_SYMBOL(ttm_bo_shrink);
+
+/**
+ * ttm_bo_shrink_suitable() - Whether a bo is suitable for shinking
+ * @ctx: The struct ttm_operation_ctx governing the shrinking.
+ * @bo: The candidate for shrinking.
+ *
+ * Check whether the object, given the information available to TTM,
+ * is suitable for shinking, This function can and should be used
+ * before attempting to shrink an object.
+ *
+ * Return: true if suitable. false if not.
+ */
+bool ttm_bo_shrink_suitable(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
+{
+ return bo->ttm && ttm_tt_is_populated(bo->ttm) && !bo->pin_count &&
+ (!ctx->no_wait_gpu ||
+ dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP));
+}
+EXPORT_SYMBOL(ttm_bo_shrink_suitable);
+
+/**
+ * ttm_bo_shrink_avoid_wait() - Whether to avoid waiting for GPU
+ * during shrinking
+ *
+ * In some situations, like direct reclaim, waiting (in particular gpu waiting)
+ * should be avoided since it may stall a system that could otherwise make progress
+ * shrinking something else less time consuming.
+ *
+ * Return: true if gpu waiting should be avoided, false if not.
+ */
+bool ttm_bo_shrink_avoid_wait(void)
+{
+ return !current_is_kswapd();
}
+EXPORT_SYMBOL(ttm_bo_shrink_avoid_wait);