diff options
Diffstat (limited to 'arch/sparc/kernel/pci_sun4v.c')
| -rw-r--r-- | arch/sparc/kernel/pci_sun4v.c | 86 |
1 files changed, 44 insertions, 42 deletions
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index fa0e42b4cbfb..791f0a76665f 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c @@ -15,7 +15,9 @@ #include <linux/msi.h> #include <linux/export.h> #include <linux/log2.h> -#include <linux/of_device.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/dma-map-ops.h> #include <asm/iommu-common.h> #include <asm/iommu.h> @@ -73,6 +75,11 @@ static inline void iommu_batch_start(struct device *dev, unsigned long prot, uns p->npages = 0; } +static inline bool iommu_use_atu(struct iommu *iommu, u64 mask) +{ + return iommu->atu && mask > DMA_BIT_MASK(32); +} + /* Interrupts must be disabled. */ static long iommu_batch_flush(struct iommu_batch *p, u64 mask) { @@ -92,7 +99,7 @@ static long iommu_batch_flush(struct iommu_batch *p, u64 mask) prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE); while (npages != 0) { - if (mask <= DMA_BIT_MASK(32)) { + if (!iommu_use_atu(pbm->iommu, mask)) { num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), npages, @@ -179,7 +186,6 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, unsigned long flags, order, first_page, npages, n; unsigned long prot = 0; struct iommu *iommu; - struct atu *atu; struct iommu_map_table *tbl; struct page *page; void *ret; @@ -188,7 +194,7 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, size = IO_PAGE_ALIGN(size); order = get_order(size); - if (unlikely(order >= MAX_ORDER)) + if (unlikely(order > MAX_PAGE_ORDER)) return NULL; npages = size >> IO_PAGE_SHIFT; @@ -205,13 +211,11 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, memset((char *)first_page, 0, PAGE_SIZE << order); iommu = dev->archdata.iommu; - atu = iommu->atu; - mask = dev->coherent_dma_mask; - if (mask <= DMA_BIT_MASK(32)) + if (!iommu_use_atu(iommu, mask)) tbl = &iommu->tbl; else - tbl = &atu->tbl; + tbl = &iommu->atu->tbl; entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL, (unsigned long)(-1), 0); @@ -252,9 +256,9 @@ range_alloc_fail: return NULL; } -unsigned long dma_4v_iotsb_bind(unsigned long devhandle, - unsigned long iotsb_num, - struct pci_bus *bus_dev) +static unsigned long dma_4v_iotsb_bind(unsigned long devhandle, + unsigned long iotsb_num, + struct pci_bus *bus_dev) { struct pci_dev *pdev; unsigned long err; @@ -333,7 +337,7 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, atu = iommu->atu; devhandle = pbm->devhandle; - if (dvma <= DMA_BIT_MASK(32)) { + if (!iommu_use_atu(iommu, dvma)) { tbl = &iommu->tbl; iotsb_num = 0; /* we don't care for legacy iommu */ } else { @@ -348,9 +352,8 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, free_pages((unsigned long)cpu, order); } -static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t sz, - enum dma_data_direction direction, +static dma_addr_t dma_4v_map_phys(struct device *dev, phys_addr_t phys, + size_t sz, enum dma_data_direction direction, unsigned long attrs) { struct iommu *iommu; @@ -358,23 +361,32 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, struct iommu_map_table *tbl; u64 mask; unsigned long flags, npages, oaddr; - unsigned long i, base_paddr; - unsigned long prot; + unsigned long i, prot; dma_addr_t bus_addr, ret; long entry; + if (unlikely(attrs & DMA_ATTR_MMIO)) + /* + * This check is included because older versions of the code + * lacked MMIO path support, and my ability to test this path + * is limited. However, from a software technical standpoint, + * there is no restriction, as the following code operates + * solely on physical addresses. + */ + goto bad; + iommu = dev->archdata.iommu; atu = iommu->atu; if (unlikely(direction == DMA_NONE)) goto bad; - oaddr = (unsigned long)(page_address(page) + offset); + oaddr = (unsigned long)(phys_to_virt(phys)); npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); npages >>= IO_PAGE_SHIFT; mask = *dev->dma_mask; - if (mask <= DMA_BIT_MASK(32)) + if (!iommu_use_atu(iommu, mask)) tbl = &iommu->tbl; else tbl = &atu->tbl; @@ -387,7 +399,6 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT)); ret = bus_addr | (oaddr & ~IO_PAGE_MASK); - base_paddr = __pa(oaddr & IO_PAGE_MASK); prot = HV_PCI_MAP_ATTR_READ; if (direction != DMA_TO_DEVICE) prot |= HV_PCI_MAP_ATTR_WRITE; @@ -399,8 +410,8 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, iommu_batch_start(dev, prot, entry); - for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) { - long err = iommu_batch_add(base_paddr, mask); + for (i = 0; i < npages; i++, phys += IO_PAGE_SIZE) { + long err = iommu_batch_add(phys, mask); if (unlikely(err < 0L)) goto iommu_map_fail; } @@ -422,7 +433,7 @@ iommu_map_fail: return DMA_MAPPING_ERROR; } -static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, +static void dma_4v_unmap_phys(struct device *dev, dma_addr_t bus_addr, size_t sz, enum dma_data_direction direction, unsigned long attrs) { @@ -483,7 +494,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, iommu = dev->archdata.iommu; if (nelems == 0 || !iommu) - return 0; + return -EINVAL; atu = iommu->atu; prot = HV_PCI_MAP_ATTR_READ; @@ -506,11 +517,10 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, iommu_batch_start(dev, prot, ~0UL); max_seg_size = dma_get_max_seg_size(dev); - seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, - IO_PAGE_SIZE) >> IO_PAGE_SHIFT; + seg_boundary_size = dma_get_seg_boundary_nr_pages(dev, IO_PAGE_SHIFT); mask = *dev->dma_mask; - if (mask <= DMA_BIT_MASK(32)) + if (!iommu_use_atu(iommu, mask)) tbl = &iommu->tbl; else tbl = &atu->tbl; @@ -592,7 +602,6 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, if (outcount < incount) { outs = sg_next(outs); - outs->dma_address = DMA_MAPPING_ERROR; outs->dma_length = 0; } @@ -609,7 +618,6 @@ iommu_map_failed: iommu_tbl_range_free(tbl, vaddr, npages, IOMMU_ERROR_CODE); /* XXX demap? XXX */ - s->dma_address = DMA_MAPPING_ERROR; s->dma_length = 0; } if (s == outs) @@ -617,7 +625,7 @@ iommu_map_failed: } local_irq_restore(flags); - return 0; + return -EINVAL; } static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, @@ -674,25 +682,19 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, static int dma_4v_supported(struct device *dev, u64 device_mask) { struct iommu *iommu = dev->archdata.iommu; - u64 dma_addr_mask = iommu->dma_addr_mask; - if (device_mask > DMA_BIT_MASK(32)) { - if (iommu->atu) - dma_addr_mask = iommu->atu->dma_addr_mask; - else - return 0; - } - - if ((device_mask & dma_addr_mask) == dma_addr_mask) + if (ali_sound_dma_hack(dev, device_mask)) return 1; - return pci64_dma_supported(to_pci_dev(dev), device_mask); + if (device_mask < iommu->dma_addr_mask) + return 0; + return 1; } static const struct dma_map_ops sun4v_dma_ops = { .alloc = dma_4v_alloc_coherent, .free = dma_4v_free_coherent, - .map_page = dma_4v_map_page, - .unmap_page = dma_4v_unmap_page, + .map_phys = dma_4v_map_phys, + .unmap_phys = dma_4v_unmap_phys, .map_sg = dma_4v_map_sg, .unmap_sg = dma_4v_unmap_sg, .dma_supported = dma_4v_supported, |
