From 3eb7d96e94150304011d214750b45766cf62d9c9 Mon Sep 17 00:00:00 2001 From: Christian König Date: Sat, 17 Apr 2021 18:48:36 +0200 Subject: drm/ttm: flip over the range manager to self allocated nodes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Start with the range manager to make the resource object the base class for the allocated nodes. While at it cleanup a lot of the code around that. Signed-off-by: Christian König Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20210602100914.46246-2-christian.koenig@amd.com --- drivers/gpu/drm/ttm/ttm_range_manager.c | 56 ++++++++++++++++++++++++--------- 1 file changed, 41 insertions(+), 15 deletions(-) (limited to 'drivers/gpu/drm/ttm/ttm_range_manager.c') diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c index b9d5da6e6a81..ce5d07ca384c 100644 --- a/drivers/gpu/drm/ttm/ttm_range_manager.c +++ b/drivers/gpu/drm/ttm/ttm_range_manager.c @@ -29,12 +29,13 @@ * Authors: Thomas Hellstrom */ -#include +#include #include +#include +#include #include #include #include -#include /* * Currently we use a spinlock for the lock, but a mutex *may* be @@ -60,8 +61,8 @@ static int ttm_range_man_alloc(struct ttm_resource_manager *man, struct ttm_resource *mem) { struct ttm_range_manager *rman = to_range_manager(man); + struct ttm_range_mgr_node *node; struct drm_mm *mm = &rman->mm; - struct drm_mm_node *node; enum drm_mm_insert_mode mode; unsigned long lpfn; int ret; @@ -70,7 +71,7 @@ static int ttm_range_man_alloc(struct ttm_resource_manager *man, if (!lpfn) lpfn = man->size; - node = kzalloc(sizeof(*node), GFP_KERNEL); + node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL); if (!node) return -ENOMEM; @@ -78,17 +79,19 @@ static int ttm_range_man_alloc(struct ttm_resource_manager *man, if (place->flags & TTM_PL_FLAG_TOPDOWN) mode = DRM_MM_INSERT_HIGH; + ttm_resource_init(bo, place, &node->base); + spin_lock(&rman->lock); - ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages, - bo->page_alignment, 0, + ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0], + mem->num_pages, bo->page_alignment, 0, place->fpfn, lpfn, mode); spin_unlock(&rman->lock); if (unlikely(ret)) { kfree(node); } else { - mem->mm_node = node; - mem->start = node->start; + mem->mm_node = &node->mm_nodes[0]; + mem->start = node->mm_nodes[0].start; } return ret; @@ -98,15 +101,19 @@ static void ttm_range_man_free(struct ttm_resource_manager *man, struct ttm_resource *mem) { struct ttm_range_manager *rman = to_range_manager(man); + struct ttm_range_mgr_node *node; - if (mem->mm_node) { - spin_lock(&rman->lock); - drm_mm_remove_node(mem->mm_node); - spin_unlock(&rman->lock); + if (!mem->mm_node) + return; - kfree(mem->mm_node); - mem->mm_node = NULL; - } + node = to_ttm_range_mgr_node(mem); + + spin_lock(&rman->lock); + drm_mm_remove_node(&node->mm_nodes[0]); + spin_unlock(&rman->lock); + + kfree(node); + mem->mm_node = NULL; } static void ttm_range_man_debug(struct ttm_resource_manager *man, @@ -125,6 +132,17 @@ static const struct ttm_resource_manager_func ttm_range_manager_func = { .debug = ttm_range_man_debug }; +/** + * ttm_range_man_init + * + * @bdev: ttm device + * @type: memory manager type + * @use_tt: if the memory manager uses tt + * @p_size: size of area to be managed in pages. + * + * Initialise a generic range manager for the selected memory type. + * The range manager is installed for this device in the type slot. + */ int ttm_range_man_init(struct ttm_device *bdev, unsigned type, bool use_tt, unsigned long p_size) @@ -152,6 +170,14 @@ int ttm_range_man_init(struct ttm_device *bdev, } EXPORT_SYMBOL(ttm_range_man_init); +/** + * ttm_range_man_fini + * + * @bdev: ttm device + * @type: memory manager type + * + * Remove the generic range manager from a slot and tear it down. + */ int ttm_range_man_fini(struct ttm_device *bdev, unsigned type) { -- cgit