diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_region_ttm.c')
| -rw-r--r-- | drivers/gpu/drm/i915/intel_region_ttm.c | 234 |
1 files changed, 137 insertions, 97 deletions
diff --git a/drivers/gpu/drm/i915/intel_region_ttm.c b/drivers/gpu/drm/i915/intel_region_ttm.c index 82a6727ede46..47a69aad5c3f 100644 --- a/drivers/gpu/drm/i915/intel_region_ttm.c +++ b/drivers/gpu/drm/i915/intel_region_ttm.c @@ -2,15 +2,17 @@ /* * Copyright © 2021 Intel Corporation */ -#include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_device.h> #include <drm/ttm/ttm_range_manager.h> #include "i915_drv.h" #include "i915_scatterlist.h" +#include "i915_ttm_buddy_manager.h" #include "intel_region_ttm.h" +#include "gem/i915_gem_region.h" +#include "gem/i915_gem_ttm.h" /* For the funcs/ops export only */ /** * DOC: TTM support structure * @@ -20,9 +22,6 @@ * i915 GEM regions to TTM memory types and resource managers. */ -/* A Zero-initialized driver for now. We don't have a TTM backend yet. */ -static struct ttm_device_funcs i915_ttm_bo_driver; - /** * intel_region_ttm_device_init - Initialize a TTM device * @dev_priv: Pointer to an i915 device private structure. @@ -33,9 +32,9 @@ int intel_region_ttm_device_init(struct drm_i915_private *dev_priv) { struct drm_device *drm = &dev_priv->drm; - return ttm_device_init(&dev_priv->bdev, &i915_ttm_bo_driver, + return ttm_device_init(&dev_priv->bdev, i915_ttm_driver(), drm->dev, drm->anon_inode->i_mapping, - drm->vma_offset_manager, false, false); + drm->vma_offset_manager, 0); } /** @@ -52,12 +51,16 @@ void intel_region_ttm_device_fini(struct drm_i915_private *dev_priv) * driver-private types for now, reserving TTM_PL_VRAM for stolen * memory and TTM_PL_TT for GGTT use if decided to implement this. */ -static int intel_region_to_ttm_type(struct intel_memory_region *mem) +int intel_region_to_ttm_type(const struct intel_memory_region *mem) { int type; GEM_BUG_ON(mem->type != INTEL_MEMORY_LOCAL && - mem->type != INTEL_MEMORY_MOCK); + mem->type != INTEL_MEMORY_MOCK && + mem->type != INTEL_MEMORY_SYSTEM); + + if (mem->type == INTEL_MEMORY_SYSTEM) + return TTM_PL_SYSTEM; type = mem->instance + TTM_PL_PRIV; GEM_BUG_ON(type >= TTM_NUM_MEM_TYPES); @@ -65,72 +68,30 @@ static int intel_region_to_ttm_type(struct intel_memory_region *mem) return type; } -static struct ttm_resource * -intel_region_ttm_node_reserve(struct intel_memory_region *mem, - resource_size_t offset, - resource_size_t size) -{ - struct ttm_resource_manager *man = mem->region_private; - struct ttm_place place = {}; - struct ttm_buffer_object mock_bo = {}; - struct ttm_resource *res; - int ret; - - /* - * Having to use a mock_bo is unfortunate but stems from some - * drivers having private managers that insist to know what the - * allocate memory is intended for, using it to send private - * data to the manager. Also recently the bo has been used to send - * alignment info to the manager. Assume that apart from the latter, - * none of the managers we use will ever access the buffer object - * members, hoping we can pass the alignment info in the - * struct ttm_place in the future. - */ - - place.fpfn = offset >> PAGE_SHIFT; - place.lpfn = place.fpfn + (size >> PAGE_SHIFT); - mock_bo.base.size = size; - ret = man->func->alloc(man, &mock_bo, &place, &res); - if (ret == -ENOSPC) - ret = -ENXIO; - - return ret ? ERR_PTR(ret) : res; -} - /** - * intel_region_ttm_node_free - Free a node allocated from a resource manager - * @mem: The region the node was allocated from. - * @node: The opaque node representing an allocation. + * intel_region_ttm_init - Initialize a memory region for TTM. + * @mem: The region to initialize. + * + * This function initializes a suitable TTM resource manager for the + * region, and if it's a LMEM region type, attaches it to the TTM + * device. MOCK regions are NOT attached to the TTM device, since we don't + * have one for the mock selftests. + * + * Return: 0 on success, negative error code on failure. */ -void intel_region_ttm_node_free(struct intel_memory_region *mem, - struct ttm_resource *res) -{ - struct ttm_resource_manager *man = mem->region_private; - - man->func->free(man, res); -} - -static const struct intel_memory_region_private_ops priv_ops = { - .reserve = intel_region_ttm_node_reserve, - .free = intel_region_ttm_node_free, -}; - int intel_region_ttm_init(struct intel_memory_region *mem) { struct ttm_device *bdev = &mem->i915->bdev; int mem_type = intel_region_to_ttm_type(mem); int ret; - ret = ttm_range_man_init(bdev, mem_type, false, - resource_size(&mem->region) >> PAGE_SHIFT); + ret = i915_ttm_buddy_man_init(bdev, mem_type, false, + resource_size(&mem->region), + resource_size(&mem->io), + mem->min_page_size, PAGE_SIZE); if (ret) return ret; - mem->chunk_size = PAGE_SIZE; - mem->max_order = - get_order(rounddown_pow_of_two(resource_size(&mem->region))); - mem->is_range_manager = true; - mem->priv_ops = &priv_ops; mem->region_private = ttm_manager_type(bdev, mem_type); return 0; @@ -144,21 +105,53 @@ int intel_region_ttm_init(struct intel_memory_region *mem) * memory region, and if it was registered with the TTM device, * removes that registration. */ -void intel_region_ttm_fini(struct intel_memory_region *mem) +int intel_region_ttm_fini(struct intel_memory_region *mem) { - int ret; + struct ttm_resource_manager *man = mem->region_private; + int ret = -EBUSY; + int count; - ret = ttm_range_man_fini(&mem->i915->bdev, - intel_region_to_ttm_type(mem)); + /* + * Put the region's move fences. This releases requests that + * may hold on to contexts and vms that may hold on to buffer + * objects placed in this region. + */ + if (man) + ttm_resource_manager_cleanup(man); + + /* Flush objects from region. */ + for (count = 0; count < 10; ++count) { + i915_gem_flush_free_objects(mem->i915); + + mutex_lock(&mem->objects.lock); + if (list_empty(&mem->objects.list)) + ret = 0; + mutex_unlock(&mem->objects.lock); + if (!ret) + break; + + msleep(20); + drain_workqueue(mem->i915->bdev.wq); + } + + /* If we leaked objects, Don't free the region causing use after free */ + if (ret || !man) + return ret; + + ret = i915_ttm_buddy_man_fini(&mem->i915->bdev, + intel_region_to_ttm_type(mem)); GEM_WARN_ON(ret); mem->region_private = NULL; + + return ret; } /** - * intel_region_ttm_node_to_st - Convert an opaque TTM resource manager node - * to an sg_table. + * intel_region_ttm_resource_to_rsgt - + * Convert an opaque TTM resource manager resource to a refcounted sg_table. * @mem: The memory region. - * @node: The resource manager node obtained from the TTM resource manager. + * @res: The resource manager resource obtained from the TTM resource manager. + * @page_alignment: Required page alignment for each sg entry. Power of two. * * The gem backends typically use sg-tables for operations on the underlying * io_memory. So provide a way for the backends to translate the @@ -166,20 +159,29 @@ void intel_region_ttm_fini(struct intel_memory_region *mem) * * Return: A malloced sg_table on success, an error pointer on failure. */ -struct sg_table *intel_region_ttm_node_to_st(struct intel_memory_region *mem, - struct ttm_resource *res) +struct i915_refct_sgt * +intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem, + struct ttm_resource *res, + u32 page_alignment) { - struct ttm_range_mgr_node *range_node = - container_of(res, typeof(*range_node), base); + if (mem->is_range_manager) { + struct ttm_range_mgr_node *range_node = + to_ttm_range_mgr_node(res); - GEM_WARN_ON(!mem->is_range_manager); - return i915_sg_from_mm_node(&range_node->mm_nodes[0], - mem->region.start); + return i915_rsgt_from_mm_node(&range_node->mm_nodes[0], + mem->region.start, + page_alignment); + } else { + return i915_rsgt_from_buddy_resource(res, mem->region.start, + page_alignment); + } } +#ifdef CONFIG_DRM_I915_SELFTEST /** - * intel_region_ttm_node_alloc - Allocate memory resources from a region + * intel_region_ttm_resource_alloc - Allocate memory resources from a region * @mem: The memory region, + * @offset: BO offset * @size: The requested size in bytes * @flags: Allocation flags * @@ -187,15 +189,16 @@ struct sg_table *intel_region_ttm_node_to_st(struct intel_memory_region *mem, * memory from standalone TTM range managers, without the TTM eviction * functionality. Don't use if you are not completely sure that's the * case. The returned opaque node can be converted to an sg_table using - * intel_region_ttm_node_to_st(), and can be freed using - * intel_region_ttm_node_free(). + * intel_region_ttm_resource_to_st(), and can be freed using + * intel_region_ttm_resource_free(). * * Return: A valid pointer on success, an error pointer on failure. */ struct ttm_resource * -intel_region_ttm_node_alloc(struct intel_memory_region *mem, - resource_size_t size, - unsigned int flags) +intel_region_ttm_resource_alloc(struct intel_memory_region *mem, + resource_size_t offset, + resource_size_t size, + unsigned int flags) { struct ttm_resource_manager *man = mem->region_private; struct ttm_place place = {}; @@ -203,24 +206,61 @@ intel_region_ttm_node_alloc(struct intel_memory_region *mem, struct ttm_resource *res; int ret; - /* - * We ignore the flags for now since we're using the range - * manager and contigous and min page size would be fulfilled - * by default if size is min page size aligned. - */ - mock_bo.base.size = size; - - if (mem->is_range_manager) { - if (size >= SZ_1G) - mock_bo.page_alignment = SZ_1G >> PAGE_SHIFT; - else if (size >= SZ_2M) - mock_bo.page_alignment = SZ_2M >> PAGE_SHIFT; - else if (size >= SZ_64K) - mock_bo.page_alignment = SZ_64K >> PAGE_SHIFT; + if (flags & I915_BO_ALLOC_CONTIGUOUS) + place.flags |= TTM_PL_FLAG_CONTIGUOUS; + if (offset != I915_BO_INVALID_OFFSET) { + if (WARN_ON(overflows_type(offset >> PAGE_SHIFT, place.fpfn))) { + ret = -E2BIG; + goto out; + } + place.fpfn = offset >> PAGE_SHIFT; + if (WARN_ON(overflows_type(place.fpfn + (size >> PAGE_SHIFT), place.lpfn))) { + ret = -E2BIG; + goto out; + } + place.lpfn = place.fpfn + (size >> PAGE_SHIFT); + } else if (resource_size(&mem->io) && resource_size(&mem->io) < mem->total) { + if (flags & I915_BO_ALLOC_GPU_ONLY) { + place.flags |= TTM_PL_FLAG_TOPDOWN; + } else { + place.fpfn = 0; + if (WARN_ON(overflows_type(resource_size(&mem->io) >> PAGE_SHIFT, place.lpfn))) { + ret = -E2BIG; + goto out; + } + place.lpfn = resource_size(&mem->io) >> PAGE_SHIFT; + } } + mock_bo.base.size = size; + mock_bo.bdev = &mem->i915->bdev; + ret = man->func->alloc(man, &mock_bo, &place, &res); + +out: if (ret == -ENOSPC) ret = -ENXIO; + if (!ret) + res->bo = NULL; /* Rather blow up, then some uaf */ return ret ? ERR_PTR(ret) : res; } + +#endif + +/** + * intel_region_ttm_resource_free - Free a resource allocated from a resource manager + * @mem: The region the resource was allocated from. + * @res: The opaque resource representing an allocation. + */ +void intel_region_ttm_resource_free(struct intel_memory_region *mem, + struct ttm_resource *res) +{ + struct ttm_resource_manager *man = mem->region_private; + struct ttm_buffer_object mock_bo = {}; + + mock_bo.base.size = res->size; + mock_bo.bdev = &mem->i915->bdev; + res->bo = &mock_bo; + + man->func->free(man, res); +} |
