diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_scatterlist.c')
| -rw-r--r-- | drivers/gpu/drm/i915/i915_scatterlist.c | 63 |
1 files changed, 43 insertions, 20 deletions
diff --git a/drivers/gpu/drm/i915/i915_scatterlist.c b/drivers/gpu/drm/i915/i915_scatterlist.c index 41f2adb6a583..4d830740946d 100644 --- a/drivers/gpu/drm/i915/i915_scatterlist.c +++ b/drivers/gpu/drm/i915/i915_scatterlist.c @@ -5,10 +5,9 @@ */ #include "i915_scatterlist.h" - -#include "i915_buddy.h" #include "i915_ttm_buddy_manager.h" +#include <drm/drm_buddy.h> #include <drm/drm_mm.h> #include <linux/slab.h> @@ -57,7 +56,7 @@ static const struct i915_refct_sgt_ops rsgt_ops = { /** * i915_refct_sgt_init - Initialize a struct i915_refct_sgt with default ops * @rsgt: The struct i915_refct_sgt to initialize. - * size: The size of the underlying memory buffer. + * @size: The size of the underlying memory buffer. */ void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size) { @@ -69,6 +68,7 @@ void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size) * drm_mm_node * @node: The drm_mm_node. * @region_start: An offset to add to the dma addresses of the sg list. + * @page_alignment: Required page alignment for each sg entry. Power of two. * * Create a struct sg_table, initializing it from a struct drm_mm_node, * taking a maximum segment length into account, splitting into segments @@ -78,23 +78,33 @@ void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size) * error code cast to an error pointer on failure. */ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node, - u64 region_start) + u64 region_start, + u32 page_alignment) { - const u64 max_segment = SZ_1G; /* Do we have a limit on this? */ - u64 segment_pages = max_segment >> PAGE_SHIFT; + const u32 max_segment = round_down(UINT_MAX, page_alignment); + const u32 segment_pages = max_segment >> PAGE_SHIFT; u64 block_size, offset, prev_end; struct i915_refct_sgt *rsgt; struct sg_table *st; struct scatterlist *sg; - rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL); + GEM_BUG_ON(!max_segment); + + rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL | __GFP_NOWARN); if (!rsgt) return ERR_PTR(-ENOMEM); i915_refct_sgt_init(rsgt, node->size << PAGE_SHIFT); st = &rsgt->table; - if (sg_alloc_table(st, DIV_ROUND_UP(node->size, segment_pages), - GFP_KERNEL)) { + /* restricted by sg_alloc_table */ + if (WARN_ON(overflows_type(DIV_ROUND_UP_ULL(node->size, segment_pages), + unsigned int))) { + i915_refct_sgt_put(rsgt); + return ERR_PTR(-E2BIG); + } + + if (sg_alloc_table(st, DIV_ROUND_UP_ULL(node->size, segment_pages), + GFP_KERNEL | __GFP_NOWARN)) { i915_refct_sgt_put(rsgt); return ERR_PTR(-ENOMEM); } @@ -113,12 +123,14 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node, sg = __sg_next(sg); sg_dma_address(sg) = region_start + offset; + GEM_BUG_ON(!IS_ALIGNED(sg_dma_address(sg), + page_alignment)); sg_dma_len(sg) = 0; sg->length = 0; st->nents++; } - len = min(block_size, max_segment - sg->length); + len = min_t(u64, block_size, max_segment - sg->length); sg->length += len; sg_dma_len(sg) += len; @@ -139,6 +151,7 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node, * i915_buddy_block list * @res: The struct i915_ttm_buddy_resource. * @region_start: An offset to add to the dma addresses of the sg list. + * @page_alignment: Required page alignment for each sg entry. Power of two. * * Create a struct sg_table, initializing it from struct i915_buddy_block list, * taking a maximum segment length into account, splitting into segments @@ -148,28 +161,36 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node, * error code cast to an error pointer on failure. */ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res, - u64 region_start) + u64 region_start, + u32 page_alignment) { struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res); - const u64 size = res->num_pages << PAGE_SHIFT; - const u64 max_segment = rounddown(UINT_MAX, PAGE_SIZE); - struct i915_buddy_mm *mm = bman_res->mm; + const u64 size = res->size; + const u32 max_segment = round_down(UINT_MAX, page_alignment); + struct drm_buddy *mm = bman_res->mm; struct list_head *blocks = &bman_res->blocks; - struct i915_buddy_block *block; + struct drm_buddy_block *block; struct i915_refct_sgt *rsgt; struct scatterlist *sg; struct sg_table *st; resource_size_t prev_end; GEM_BUG_ON(list_empty(blocks)); + GEM_BUG_ON(!max_segment); - rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL); + rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL | __GFP_NOWARN); if (!rsgt) return ERR_PTR(-ENOMEM); i915_refct_sgt_init(rsgt, size); st = &rsgt->table; - if (sg_alloc_table(st, res->num_pages, GFP_KERNEL)) { + /* restricted by sg_alloc_table */ + if (WARN_ON(overflows_type(PFN_UP(res->size), unsigned int))) { + i915_refct_sgt_put(rsgt); + return ERR_PTR(-E2BIG); + } + + if (sg_alloc_table(st, PFN_UP(res->size), GFP_KERNEL | __GFP_NOWARN)) { i915_refct_sgt_put(rsgt); return ERR_PTR(-ENOMEM); } @@ -181,8 +202,8 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res, list_for_each_entry(block, blocks, link) { u64 block_size, offset; - block_size = min_t(u64, size, i915_buddy_block_size(mm, block)); - offset = i915_buddy_block_offset(block); + block_size = min_t(u64, size, drm_buddy_block_size(mm, block)); + offset = drm_buddy_block_offset(block); while (block_size) { u64 len; @@ -192,12 +213,14 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res, sg = __sg_next(sg); sg_dma_address(sg) = region_start + offset; + GEM_BUG_ON(!IS_ALIGNED(sg_dma_address(sg), + page_alignment)); sg_dma_len(sg) = 0; sg->length = 0; st->nents++; } - len = min(block_size, max_segment - sg->length); + len = min_t(u64, block_size, max_segment - sg->length); sg->length += len; sg_dma_len(sg) += len; |
