From 9d2ffe5c62554f2795859bb661eb138759eee980 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Mon, 25 Apr 2016 22:08:52 +0300 Subject: xtensa: enable HAVE_DMA_CONTIGUOUS Enable HAVE_DMA_CONTIGUOUS, reserve contiguous memory at bootmem_init, use dma_alloc_from_contiguous and dma_release_from_contiguous in xtensa_dma_alloc/free. This allows for big contiguous DMA buffer allocation from designated area configured in the device tree. Signed-off-by: Max Filippov --- arch/xtensa/kernel/pci-dma.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) (limited to 'arch/xtensa/kernel/pci-dma.c') diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c index 1e68806d6695..10e5b8acf944 100644 --- a/arch/xtensa/kernel/pci-dma.c +++ b/arch/xtensa/kernel/pci-dma.c @@ -15,6 +15,7 @@ * Joe Taylor */ +#include #include #include #include @@ -146,6 +147,8 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size, { unsigned long ret; unsigned long uncached = 0; + unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; + struct page *page = NULL; /* ignore region speicifiers */ @@ -153,11 +156,18 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size, if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) flag |= GFP_DMA; - ret = (unsigned long)__get_free_pages(flag, get_order(size)); - if (ret == 0) + if (gfpflags_allow_blocking(flag)) + page = dma_alloc_from_contiguous(dev, count, get_order(size)); + + if (!page) + page = alloc_pages(flag, get_order(size)); + + if (!page) return NULL; + ret = (unsigned long)page_address(page); + /* We currently don't support coherent memory outside KSEG */ BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR || @@ -170,16 +180,19 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size, return (void *)uncached; } -static void xtensa_dma_free(struct device *hwdev, size_t size, void *vaddr, +static void xtensa_dma_free(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { unsigned long addr = (unsigned long)vaddr + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR; + struct page *page = virt_to_page(addr); + unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; BUG_ON(addr < XCHAL_KSEG_CACHED_VADDR || addr > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1); - free_pages(addr, get_order(size)); + if (!dma_release_from_contiguous(dev, page, count)) + __free_pages(page, get_order(size)); } static dma_addr_t xtensa_map_page(struct device *dev, struct page *page, -- cgit