summaryrefslogtreecommitdiff
path: root/drivers/iommu/dma-iommu.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2019-05-20 09:29:42 +0200
committerJoerg Roedel <jroedel@suse.de>2019-05-27 17:31:11 +0200
commitee1ef05d02b03118e4c197e1193329f5b64246b7 (patch)
tree61e34e68b7e7ff52086b99d458a10b55680e3137 /drivers/iommu/dma-iommu.c
parent9ad5d6eddcb0fa7c227c0612011221e715e8ef49 (diff)
iommu/dma: Refactor iommu_dma_alloc, part 2
All the logic in iommu_dma_alloc that deals with page allocation from the CMA or page allocators can be split into a self-contained helper, and we can than map the result of that or the atomic pool allocation with the iommu later. This also allows reusing __iommu_dma_free to tear down the allocations and MMU mappings when the IOMMU mapping fails. Based on a patch from Robin Murphy. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/dma-iommu.c')
-rw-r--r--drivers/iommu/dma-iommu.c65
1 files changed, 35 insertions, 30 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 4e27a29f4458..84761adbb1d4 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -972,35 +972,14 @@ static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
__iommu_dma_free(dev, size, cpu_addr);
}
-static void *iommu_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
+static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
+ struct page **pagep, gfp_t gfp, unsigned long attrs)
{
bool coherent = dev_is_dma_coherent(dev);
- int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
size_t alloc_size = PAGE_ALIGN(size);
struct page *page = NULL;
void *cpu_addr;
- gfp |= __GFP_ZERO;
-
- if (gfpflags_allow_blocking(gfp) &&
- !(attrs & DMA_ATTR_FORCE_CONTIGUOUS))
- return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs);
-
- if (!gfpflags_allow_blocking(gfp) && !coherent) {
- cpu_addr = dma_alloc_from_pool(alloc_size, &page, gfp);
- if (!cpu_addr)
- return NULL;
-
- *handle = __iommu_dma_map(dev, page_to_phys(page), size,
- ioprot);
- if (*handle == DMA_MAPPING_ERROR) {
- dma_free_from_pool(cpu_addr, alloc_size);
- return NULL;
- }
- return cpu_addr;
- }
-
if (gfpflags_allow_blocking(gfp))
page = dma_alloc_from_contiguous(dev, alloc_size >> PAGE_SHIFT,
get_order(alloc_size),
@@ -1010,33 +989,59 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
if (!page)
return NULL;
- *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot);
- if (*handle == DMA_MAPPING_ERROR)
- goto out_free_pages;
-
if (!coherent || PageHighMem(page)) {
pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
cpu_addr = dma_common_contiguous_remap(page, alloc_size,
VM_USERMAP, prot, __builtin_return_address(0));
if (!cpu_addr)
- goto out_unmap;
+ goto out_free_pages;
if (!coherent)
arch_dma_prep_coherent(page, size);
} else {
cpu_addr = page_address(page);
}
+
+ *pagep = page;
memset(cpu_addr, 0, alloc_size);
return cpu_addr;
-out_unmap:
- __iommu_dma_unmap(dev, *handle, size);
out_free_pages:
if (!dma_release_from_contiguous(dev, page, alloc_size >> PAGE_SHIFT))
__free_pages(page, get_order(alloc_size));
return NULL;
}
+static void *iommu_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
+{
+ bool coherent = dev_is_dma_coherent(dev);
+ int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
+ struct page *page = NULL;
+ void *cpu_addr;
+
+ gfp |= __GFP_ZERO;
+
+ if (gfpflags_allow_blocking(gfp) &&
+ !(attrs & DMA_ATTR_FORCE_CONTIGUOUS))
+ return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs);
+
+ if (!gfpflags_allow_blocking(gfp) && !coherent)
+ cpu_addr = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp);
+ else
+ cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
+ if (!cpu_addr)
+ return NULL;
+
+ *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot);
+ if (*handle == DMA_MAPPING_ERROR) {
+ __iommu_dma_free(dev, size, cpu_addr);
+ return NULL;
+ }
+
+ return cpu_addr;
+}
+
static int __iommu_dma_mmap_pfn(struct vm_area_struct *vma,
unsigned long pfn, size_t size)
{