diff options
Diffstat (limited to 'drivers/dma-buf/heaps/cma_heap.c')
| -rw-r--r-- | drivers/dma-buf/heaps/cma_heap.c | 75 |
1 files changed, 53 insertions, 22 deletions
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c index a7f048048864..42f88193eab9 100644 --- a/drivers/dma-buf/heaps/cma_heap.c +++ b/drivers/dma-buf/heaps/cma_heap.c @@ -9,20 +9,39 @@ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ * Andrew F. Davis <afd@ti.com> */ + +#define pr_fmt(fmt) "cma_heap: " fmt + #include <linux/cma.h> #include <linux/dma-buf.h> +#include <linux/dma-buf/heaps/cma.h> #include <linux/dma-heap.h> #include <linux/dma-map-ops.h> -#include <linux/dma-resv.h> #include <linux/err.h> #include <linux/highmem.h> #include <linux/io.h> #include <linux/mm.h> #include <linux/module.h> +#include <linux/of.h> +#include <linux/of_reserved_mem.h> #include <linux/scatterlist.h> #include <linux/slab.h> #include <linux/vmalloc.h> +#define DEFAULT_CMA_NAME "default_cma_region" + +static struct cma *dma_areas[MAX_CMA_AREAS] __initdata; +static unsigned int dma_areas_num __initdata; + +int __init dma_heap_cma_register_heap(struct cma *cma) +{ + if (dma_areas_num >= ARRAY_SIZE(dma_areas)) + return -EINVAL; + + dma_areas[dma_areas_num++] = cma; + + return 0; +} struct cma_heap { struct dma_heap *heap; @@ -166,13 +185,10 @@ static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf) struct vm_area_struct *vma = vmf->vma; struct cma_heap_buffer *buffer = vma->vm_private_data; - if (vmf->pgoff > buffer->pagecount) + if (vmf->pgoff >= buffer->pagecount) return VM_FAULT_SIGBUS; - vmf->page = buffer->pages[vmf->pgoff]; - get_page(vmf->page); - - return 0; + return vmf_insert_pfn(vma, vmf->address, page_to_pfn(buffer->pages[vmf->pgoff])); } static const struct vm_operations_struct dma_heap_vm_ops = { @@ -183,11 +199,11 @@ static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) { struct cma_heap_buffer *buffer = dmabuf->priv; - dma_resv_assert_held(dmabuf->resv); - if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) return -EINVAL; + vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); + vma->vm_ops = &dma_heap_vm_ops; vma->vm_private_data = buffer; @@ -278,8 +294,8 @@ static const struct dma_buf_ops cma_heap_buf_ops = { static struct dma_buf *cma_heap_allocate(struct dma_heap *heap, unsigned long len, - unsigned long fd_flags, - unsigned long heap_flags) + u32 fd_flags, + u64 heap_flags) { struct cma_heap *cma_heap = dma_heap_get_drvdata(heap); struct cma_heap_buffer *buffer; @@ -313,13 +329,13 @@ static struct dma_buf *cma_heap_allocate(struct dma_heap *heap, struct page *page = cma_pages; while (nr_clear_pages > 0) { - void *vaddr = kmap_atomic(page); + void *vaddr = kmap_local_page(page); memset(vaddr, 0, PAGE_SIZE); - kunmap_atomic(vaddr); + kunmap_local(vaddr); /* * Avoid wasting time zeroing memory if the process - * has been killed by by SIGKILL + * has been killed by SIGKILL. */ if (fatal_signal_pending(current)) goto free_cma; @@ -370,17 +386,17 @@ static const struct dma_heap_ops cma_heap_ops = { .allocate = cma_heap_allocate, }; -static int __add_cma_heap(struct cma *cma, void *data) +static int __init __add_cma_heap(struct cma *cma, const char *name) { - struct cma_heap *cma_heap; struct dma_heap_export_info exp_info; + struct cma_heap *cma_heap; cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL); if (!cma_heap) return -ENOMEM; cma_heap->cma = cma; - exp_info.name = cma_get_name(cma); + exp_info.name = name; exp_info.ops = &cma_heap_ops; exp_info.priv = cma_heap; @@ -395,15 +411,30 @@ static int __add_cma_heap(struct cma *cma, void *data) return 0; } -static int add_default_cma_heap(void) +static int __init add_cma_heaps(void) { struct cma *default_cma = dev_get_cma_area(NULL); - int ret = 0; + unsigned int i; + int ret; - if (default_cma) - ret = __add_cma_heap(default_cma, NULL); + if (default_cma) { + ret = __add_cma_heap(default_cma, DEFAULT_CMA_NAME); + if (ret) + return ret; + } - return ret; + for (i = 0; i < dma_areas_num; i++) { + struct cma *cma = dma_areas[i]; + + ret = __add_cma_heap(cma, cma_get_name(cma)); + if (ret) { + pr_warn("Failed to add CMA heap %s", cma_get_name(cma)); + continue; + } + + } + + return 0; } -module_init(add_default_cma_heap); +module_init(add_cma_heaps); MODULE_DESCRIPTION("DMA-BUF CMA Heap"); |
