diff options
Diffstat (limited to 'arch/arm/mm/dma-mapping.c')
| -rw-r--r-- | arch/arm/mm/dma-mapping.c | 247 |
1 files changed, 89 insertions, 158 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index b4a33358d2e9..a4c765d24692 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -25,7 +25,7 @@ #include <linux/sizes.h> #include <linux/cma.h> -#include <asm/memory.h> +#include <asm/page.h> #include <asm/highmem.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> @@ -258,12 +258,14 @@ static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata; static int dma_mmu_remap_num __initdata; +#ifdef CONFIG_DMA_CMA void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { dma_mmu_remap[dma_mmu_remap_num].base = base; dma_mmu_remap[dma_mmu_remap_num].size = size; dma_mmu_remap_num++; } +#endif void __init dma_contiguous_remap(void) { @@ -622,16 +624,14 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, kfree(buf); } -static void dma_cache_maint_page(struct page *page, unsigned long offset, - size_t size, enum dma_data_direction dir, +static void dma_cache_maint_page(phys_addr_t phys, size_t size, + enum dma_data_direction dir, void (*op)(const void *, size_t, int)) { - unsigned long pfn; + unsigned long offset = offset_in_page(phys); + unsigned long pfn = __phys_to_pfn(phys); size_t left = size; - pfn = page_to_pfn(page) + offset / PAGE_SIZE; - offset %= PAGE_SIZE; - /* * A single sg entry may refer to multiple physically contiguous * pages. But we still need to process highmem pages individually. @@ -642,17 +642,18 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, size_t len = left; void *vaddr; - page = pfn_to_page(pfn); - - if (PageHighMem(page)) { + phys = __pfn_to_phys(pfn); + if (PhysHighMem(phys)) { if (len + offset > PAGE_SIZE) len = PAGE_SIZE - offset; if (cache_is_vipt_nonaliasing()) { - vaddr = kmap_atomic(page); + vaddr = kmap_atomic_pfn(pfn); op(vaddr + offset, len, dir); kunmap_atomic(vaddr); } else { + struct page *page = phys_to_page(phys); + vaddr = kmap_high_get(page); if (vaddr) { op(vaddr + offset, len, dir); @@ -660,7 +661,8 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, } } } else { - vaddr = page_address(page) + offset; + phys += offset; + vaddr = phys_to_virt(phys); op(vaddr, len, dir); } offset = 0; @@ -674,14 +676,11 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, * Note: Drivers should NOT use this function directly. * Use the driver DMA support - see dma-mapping.h (dma_sync_*) */ -static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, - size_t size, enum dma_data_direction dir) +void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, + enum dma_data_direction dir) { - phys_addr_t paddr; + dma_cache_maint_page(paddr, size, dir, dmac_map_area); - dma_cache_maint_page(page, off, size, dir, dmac_map_area); - - paddr = page_to_phys(page) + off; if (dir == DMA_FROM_DEVICE) { outer_inv_range(paddr, paddr + size); } else { @@ -690,36 +689,36 @@ static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, /* FIXME: non-speculating: flush on bidirectional mappings? */ } -static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, - size_t size, enum dma_data_direction dir) +void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, + enum dma_data_direction dir) { - phys_addr_t paddr = page_to_phys(page) + off; - /* FIXME: non-speculating: not required */ /* in any case, don't bother invalidating if DMA to device */ if (dir != DMA_TO_DEVICE) { outer_inv_range(paddr, paddr + size); - dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); + dma_cache_maint_page(paddr, size, dir, dmac_unmap_area); } /* * Mark the D-cache clean for these pages to avoid extra flushing. */ if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) { - unsigned long pfn; - size_t left = size; - - pfn = page_to_pfn(page) + off / PAGE_SIZE; - off %= PAGE_SIZE; - if (off) { - pfn++; - left -= PAGE_SIZE - off; - } - while (left >= PAGE_SIZE) { - page = pfn_to_page(pfn++); - set_bit(PG_dcache_clean, &page->flags); - left -= PAGE_SIZE; + struct folio *folio = pfn_folio(paddr / PAGE_SIZE); + size_t offset = offset_in_folio(folio, paddr); + + for (;;) { + size_t sz = folio_size(folio) - offset; + + if (size < sz) + break; + if (!offset) + set_bit(PG_dcache_clean, &folio->flags.f); + offset = 0; + size -= sz; + if (!size) + break; + folio = folio_next(folio); } } } @@ -733,6 +732,9 @@ static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs) if (attrs & DMA_ATTR_PRIVILEGED) prot |= IOMMU_PRIV; + if (attrs & DMA_ATTR_MMIO) + prot |= IOMMU_MMIO; + switch (dir) { case DMA_BIDIRECTIONAL: return prot | IOMMU_READ | IOMMU_WRITE; @@ -855,10 +857,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, int i = 0; int order_idx = 0; - if (array_size <= PAGE_SIZE) - pages = kzalloc(array_size, GFP_KERNEL); - else - pages = vzalloc(array_size); + pages = kvzalloc(array_size, GFP_KERNEL); if (!pages) return NULL; @@ -1204,7 +1203,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, unsigned int len = PAGE_ALIGN(s->offset + s->length); if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); + arch_sync_dma_for_device(sg_phys(s), s->length, dir); prot = __dma_info_to_prot(dir, attrs); @@ -1306,8 +1305,7 @@ static void arm_iommu_unmap_sg(struct device *dev, __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s)); if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - __dma_page_dev_to_cpu(sg_page(s), s->offset, - s->length, dir); + arch_sync_dma_for_cpu(sg_phys(s), s->length, dir); } } @@ -1329,7 +1327,7 @@ static void arm_iommu_sync_sg_for_cpu(struct device *dev, return; for_each_sg(sg, s, nents, i) - __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); + arch_sync_dma_for_cpu(sg_phys(s), s->length, dir); } @@ -1351,29 +1349,31 @@ static void arm_iommu_sync_sg_for_device(struct device *dev, return; for_each_sg(sg, s, nents, i) - __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); + arch_sync_dma_for_device(sg_phys(s), s->length, dir); } /** - * arm_iommu_map_page + * arm_iommu_map_phys * @dev: valid struct device pointer - * @page: page that buffer resides in - * @offset: offset into page for start of buffer + * @phys: physical address that buffer resides in * @size: size of buffer to map * @dir: DMA transfer direction + * @attrs: DMA mapping attributes * * IOMMU aware version of arm_dma_map_page() */ -static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir, - unsigned long attrs) +static dma_addr_t arm_iommu_map_phys(struct device *dev, phys_addr_t phys, + size_t size, enum dma_data_direction dir, unsigned long attrs) { struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); + int len = PAGE_ALIGN(size + offset_in_page(phys)); + phys_addr_t addr = phys & PAGE_MASK; dma_addr_t dma_addr; - int ret, prot, len = PAGE_ALIGN(size + offset); + int ret, prot; - if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - __dma_page_cpu_to_dev(page, offset, size, dir); + if (!dev->dma_coherent && + !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO))) + arch_sync_dma_for_device(phys, size, dir); dma_addr = __alloc_iova(mapping, len); if (dma_addr == DMA_MAPPING_ERROR) @@ -1381,12 +1381,11 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, prot = __dma_info_to_prot(dir, attrs); - ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, - prot, GFP_KERNEL); + ret = iommu_map(mapping->domain, dma_addr, addr, len, prot, GFP_KERNEL); if (ret < 0) goto fail; - return dma_addr + offset; + return dma_addr + offset_in_page(phys); fail: __free_iova(mapping, dma_addr, len); return DMA_MAPPING_ERROR; @@ -1398,82 +1397,27 @@ fail: * @handle: DMA address of buffer * @size: size of buffer (same as passed to dma_map_page) * @dir: DMA transfer direction (same as passed to dma_map_page) + * @attrs: DMA mapping attributes * - * IOMMU aware version of arm_dma_unmap_page() + * IOMMU aware version of arm_dma_unmap_phys() */ -static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, +static void arm_iommu_unmap_phys(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir, unsigned long attrs) { struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); dma_addr_t iova = handle & PAGE_MASK; - struct page *page; int offset = handle & ~PAGE_MASK; int len = PAGE_ALIGN(size + offset); if (!iova) return; - if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { - page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); - __dma_page_dev_to_cpu(page, offset, size, dir); - } - - iommu_unmap(mapping->domain, iova, len); - __free_iova(mapping, iova, len); -} - -/** - * arm_iommu_map_resource - map a device resource for DMA - * @dev: valid struct device pointer - * @phys_addr: physical address of resource - * @size: size of resource to map - * @dir: DMA transfer direction - */ -static dma_addr_t arm_iommu_map_resource(struct device *dev, - phys_addr_t phys_addr, size_t size, - enum dma_data_direction dir, unsigned long attrs) -{ - struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); - dma_addr_t dma_addr; - int ret, prot; - phys_addr_t addr = phys_addr & PAGE_MASK; - unsigned int offset = phys_addr & ~PAGE_MASK; - size_t len = PAGE_ALIGN(size + offset); - - dma_addr = __alloc_iova(mapping, len); - if (dma_addr == DMA_MAPPING_ERROR) - return dma_addr; + if (!dev->dma_coherent && + !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO))) { + phys_addr_t phys = iommu_iova_to_phys(mapping->domain, iova); - prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO; - - ret = iommu_map(mapping->domain, dma_addr, addr, len, prot, GFP_KERNEL); - if (ret < 0) - goto fail; - - return dma_addr + offset; -fail: - __free_iova(mapping, dma_addr, len); - return DMA_MAPPING_ERROR; -} - -/** - * arm_iommu_unmap_resource - unmap a device DMA resource - * @dev: valid struct device pointer - * @dma_handle: DMA address to resource - * @size: size of resource to map - * @dir: DMA transfer direction - */ -static void arm_iommu_unmap_resource(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); - dma_addr_t iova = dma_handle & PAGE_MASK; - unsigned int offset = dma_handle & ~PAGE_MASK; - size_t len = PAGE_ALIGN(size + offset); - - if (!iova) - return; + arch_sync_dma_for_cpu(phys + offset, size, dir); + } iommu_unmap(mapping->domain, iova, len); __free_iova(mapping, iova, len); @@ -1484,14 +1428,14 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev, { struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); dma_addr_t iova = handle & PAGE_MASK; - struct page *page; unsigned int offset = handle & ~PAGE_MASK; + phys_addr_t phys; if (dev->dma_coherent || !iova) return; - page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); - __dma_page_dev_to_cpu(page, offset, size, dir); + phys = iommu_iova_to_phys(mapping->domain, iova); + arch_sync_dma_for_cpu(phys + offset, size, dir); } static void arm_iommu_sync_single_for_device(struct device *dev, @@ -1499,14 +1443,14 @@ static void arm_iommu_sync_single_for_device(struct device *dev, { struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); dma_addr_t iova = handle & PAGE_MASK; - struct page *page; unsigned int offset = handle & ~PAGE_MASK; + phys_addr_t phys; if (dev->dma_coherent || !iova) return; - page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); - __dma_page_cpu_to_dev(page, offset, size, dir); + phys = iommu_iova_to_phys(mapping->domain, iova); + arch_sync_dma_for_device(phys + offset, size, dir); } static const struct dma_map_ops iommu_ops = { @@ -1515,8 +1459,8 @@ static const struct dma_map_ops iommu_ops = { .mmap = arm_iommu_mmap_attrs, .get_sgtable = arm_iommu_get_sgtable, - .map_page = arm_iommu_map_page, - .unmap_page = arm_iommu_unmap_page, + .map_phys = arm_iommu_map_phys, + .unmap_phys = arm_iommu_unmap_phys, .sync_single_for_cpu = arm_iommu_sync_single_for_cpu, .sync_single_for_device = arm_iommu_sync_single_for_device, @@ -1524,14 +1468,11 @@ static const struct dma_map_ops iommu_ops = { .unmap_sg = arm_iommu_unmap_sg, .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu, .sync_sg_for_device = arm_iommu_sync_sg_for_device, - - .map_resource = arm_iommu_map_resource, - .unmap_resource = arm_iommu_unmap_resource, }; /** * arm_iommu_create_mapping - * @bus: pointer to the bus holding the client device (for IOMMU calls) + * @dev: pointer to the client device (for IOMMU calls) * @base: start address of the valid IO address space * @size: maximum size of the valid IO address space * @@ -1543,7 +1484,7 @@ static const struct dma_map_ops iommu_ops = { * arm_iommu_attach_device function. */ struct dma_iommu_mapping * -arm_iommu_create_mapping(const struct bus_type *bus, dma_addr_t base, u64 size) +arm_iommu_create_mapping(struct device *dev, dma_addr_t base, u64 size) { unsigned int bits = size >> PAGE_SHIFT; unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long); @@ -1584,9 +1525,11 @@ arm_iommu_create_mapping(const struct bus_type *bus, dma_addr_t base, u64 size) spin_lock_init(&mapping->lock); - mapping->domain = iommu_domain_alloc(bus); - if (!mapping->domain) + mapping->domain = iommu_paging_domain_alloc(dev); + if (IS_ERR(mapping->domain)) { + err = PTR_ERR(mapping->domain); goto err4; + } kref_init(&mapping->kref); return mapping; @@ -1708,12 +1651,16 @@ void arm_iommu_detach_device(struct device *dev) } EXPORT_SYMBOL_GPL(arm_iommu_detach_device); -static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, - const struct iommu_ops *iommu, bool coherent) +static void arm_setup_iommu_dma_ops(struct device *dev) { struct dma_iommu_mapping *mapping; + u64 dma_base = 0, size = 1ULL << 32; - mapping = arm_iommu_create_mapping(dev->bus, dma_base, size); + if (dev->dma_range_map) { + dma_base = dma_range_map_min(dev->dma_range_map); + size = dma_range_map_max(dev->dma_range_map) - dma_base; + } + mapping = arm_iommu_create_mapping(dev, dma_base, size); if (IS_ERR(mapping)) { pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n", size, dev_name(dev)); @@ -1743,8 +1690,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) #else -static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, - const struct iommu_ops *iommu, bool coherent) +static void arm_setup_iommu_dma_ops(struct device *dev) { } @@ -1752,8 +1698,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) { } #endif /* CONFIG_ARM_DMA_USE_IOMMU */ -void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, - const struct iommu_ops *iommu, bool coherent) +void arch_setup_dma_ops(struct device *dev, bool coherent) { /* * Due to legacy code that sets the ->dma_coherent flag from a bus @@ -1772,8 +1717,8 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, if (dev->dma_ops) return; - if (iommu) - arm_setup_iommu_dma_ops(dev, dma_base, size, iommu, coherent); + if (device_iommu_mapped(dev)) + arm_setup_iommu_dma_ops(dev); xen_setup_dma_ops(dev); dev->archdata.dma_ops_setup = true; @@ -1789,20 +1734,6 @@ void arch_teardown_dma_ops(struct device *dev) set_dma_ops(dev, NULL); } -void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, - enum dma_data_direction dir) -{ - __dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), - size, dir); -} - -void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, - enum dma_data_direction dir) -{ - __dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), - size, dir); -} - void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { |
