diff options
Diffstat (limited to 'drivers/dma-buf/heaps')
-rw-r--r-- | drivers/dma-buf/heaps/Kconfig | 10 | ||||
-rw-r--r-- | drivers/dma-buf/heaps/cma_heap.c | 36 | ||||
-rw-r--r-- | drivers/dma-buf/heaps/system_heap.c | 43 |
3 files changed, 56 insertions, 33 deletions
diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig index a5eef06c4226..bb369b38b001 100644 --- a/drivers/dma-buf/heaps/Kconfig +++ b/drivers/dma-buf/heaps/Kconfig @@ -12,3 +12,13 @@ config DMABUF_HEAPS_CMA Choose this option to enable dma-buf CMA heap. This heap is backed by the Contiguous Memory Allocator (CMA). If your system has these regions, you should say Y here. + +config DMABUF_HEAPS_CMA_LEGACY + bool "Legacy DMA-BUF CMA Heap" + default y + depends on DMABUF_HEAPS_CMA + help + Add a duplicate CMA-backed dma-buf heap with legacy naming derived + from the CMA area's devicetree node, or "reserved" if the area is not + defined in the devicetree. This uses the same underlying allocator as + CONFIG_DMABUF_HEAPS_CMA. diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c index 9512d050563a..0df007111975 100644 --- a/drivers/dma-buf/heaps/cma_heap.c +++ b/drivers/dma-buf/heaps/cma_heap.c @@ -9,6 +9,9 @@ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ * Andrew F. Davis <afd@ti.com> */ + +#define pr_fmt(fmt) "cma_heap: " fmt + #include <linux/cma.h> #include <linux/dma-buf.h> #include <linux/dma-heap.h> @@ -22,6 +25,7 @@ #include <linux/slab.h> #include <linux/vmalloc.h> +#define DEFAULT_CMA_NAME "default_cma_region" struct cma_heap { struct dma_heap *heap; @@ -366,17 +370,17 @@ static const struct dma_heap_ops cma_heap_ops = { .allocate = cma_heap_allocate, }; -static int __init __add_cma_heap(struct cma *cma, void *data) +static int __init __add_cma_heap(struct cma *cma, const char *name) { - struct cma_heap *cma_heap; struct dma_heap_export_info exp_info; + struct cma_heap *cma_heap; cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL); if (!cma_heap) return -ENOMEM; cma_heap->cma = cma; - exp_info.name = cma_get_name(cma); + exp_info.name = name; exp_info.ops = &cma_heap_ops; exp_info.priv = cma_heap; @@ -394,12 +398,30 @@ static int __init __add_cma_heap(struct cma *cma, void *data) static int __init add_default_cma_heap(void) { struct cma *default_cma = dev_get_cma_area(NULL); - int ret = 0; + const char *legacy_cma_name; + int ret; - if (default_cma) - ret = __add_cma_heap(default_cma, NULL); + if (!default_cma) + return 0; - return ret; + ret = __add_cma_heap(default_cma, DEFAULT_CMA_NAME); + if (ret) + return ret; + + if (IS_ENABLED(CONFIG_DMABUF_HEAPS_CMA_LEGACY)) { + legacy_cma_name = cma_get_name(default_cma); + if (!strcmp(legacy_cma_name, DEFAULT_CMA_NAME)) { + pr_warn("legacy name and default name are the same, skipping legacy heap\n"); + return 0; + } + + ret = __add_cma_heap(default_cma, legacy_cma_name); + if (ret) + pr_warn("failed to add legacy heap: %pe\n", + ERR_PTR(ret)); + } + + return 0; } module_init(add_default_cma_heap); MODULE_DESCRIPTION("DMA-BUF CMA Heap"); diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c index 82b1b714300d..bbe7881f1360 100644 --- a/drivers/dma-buf/heaps/system_heap.c +++ b/drivers/dma-buf/heaps/system_heap.c @@ -33,7 +33,7 @@ struct system_heap_buffer { struct dma_heap_attachment { struct device *dev; - struct sg_table *table; + struct sg_table table; struct list_head list; bool mapped; }; @@ -52,29 +52,22 @@ static gfp_t order_flags[] = {HIGH_ORDER_GFP, HIGH_ORDER_GFP, LOW_ORDER_GFP}; static const unsigned int orders[] = {8, 4, 0}; #define NUM_ORDERS ARRAY_SIZE(orders) -static struct sg_table *dup_sg_table(struct sg_table *table) +static int dup_sg_table(struct sg_table *from, struct sg_table *to) { - struct sg_table *new_table; - int ret, i; struct scatterlist *sg, *new_sg; + int ret, i; - new_table = kzalloc(sizeof(*new_table), GFP_KERNEL); - if (!new_table) - return ERR_PTR(-ENOMEM); - - ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL); - if (ret) { - kfree(new_table); - return ERR_PTR(-ENOMEM); - } + ret = sg_alloc_table(to, from->orig_nents, GFP_KERNEL); + if (ret) + return ret; - new_sg = new_table->sgl; - for_each_sgtable_sg(table, sg, i) { + new_sg = to->sgl; + for_each_sgtable_sg(from, sg, i) { sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset); new_sg = sg_next(new_sg); } - return new_table; + return 0; } static int system_heap_attach(struct dma_buf *dmabuf, @@ -82,19 +75,18 @@ static int system_heap_attach(struct dma_buf *dmabuf, { struct system_heap_buffer *buffer = dmabuf->priv; struct dma_heap_attachment *a; - struct sg_table *table; + int ret; a = kzalloc(sizeof(*a), GFP_KERNEL); if (!a) return -ENOMEM; - table = dup_sg_table(&buffer->sg_table); - if (IS_ERR(table)) { + ret = dup_sg_table(&buffer->sg_table, &a->table); + if (ret) { kfree(a); - return -ENOMEM; + return ret; } - a->table = table; a->dev = attachment->dev; INIT_LIST_HEAD(&a->list); a->mapped = false; @@ -118,8 +110,7 @@ static void system_heap_detach(struct dma_buf *dmabuf, list_del(&a->list); mutex_unlock(&buffer->lock); - sg_free_table(a->table); - kfree(a->table); + sg_free_table(&a->table); kfree(a); } @@ -127,7 +118,7 @@ static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attac enum dma_data_direction direction) { struct dma_heap_attachment *a = attachment->priv; - struct sg_table *table = a->table; + struct sg_table *table = &a->table; int ret; ret = dma_map_sgtable(attachment->dev, table, direction, 0); @@ -162,7 +153,7 @@ static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, list_for_each_entry(a, &buffer->attachments, list) { if (!a->mapped) continue; - dma_sync_sgtable_for_cpu(a->dev, a->table, direction); + dma_sync_sgtable_for_cpu(a->dev, &a->table, direction); } mutex_unlock(&buffer->lock); @@ -183,7 +174,7 @@ static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, list_for_each_entry(a, &buffer->attachments, list) { if (!a->mapped) continue; - dma_sync_sgtable_for_device(a->dev, a->table, direction); + dma_sync_sgtable_for_device(a->dev, &a->table, direction); } mutex_unlock(&buffer->lock); |