diff options
Diffstat (limited to 'arch/alpha/kernel/pci_iommu.c')
| -rw-r--r-- | arch/alpha/kernel/pci_iommu.c | 133 |
1 files changed, 55 insertions, 78 deletions
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index aa0f50d0f823..955b6ca61627 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c @@ -11,8 +11,9 @@ #include <linux/export.h> #include <linux/scatterlist.h> #include <linux/log2.h> -#include <linux/dma-mapping.h> +#include <linux/dma-map-ops.h> #include <linux/iommu-helper.h> +#include <linux/string_choices.h> #include <asm/io.h> #include <asm/hwrpb.h> @@ -71,31 +72,8 @@ iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base, if (align < mem_size) align = mem_size; - -#ifdef CONFIG_DISCONTIGMEM - - arena = memblock_alloc_node(sizeof(*arena), align, nid); - if (!NODE_DATA(nid) || !arena) { - printk("%s: couldn't allocate arena from node %d\n" - " falling back to system-wide allocation\n", - __func__, nid); - arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES); - } - - arena->ptes = memblock_alloc_node(sizeof(*arena), align, nid); - if (!NODE_DATA(nid) || !arena->ptes) { - printk("%s: couldn't allocate arena ptes from node %d\n" - " falling back to system-wide allocation\n", - __func__, nid); - arena->ptes = memblock_alloc_from(mem_size, align, 0); - } - -#else /* CONFIG_DISCONTIGMEM */ - - arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES); - arena->ptes = memblock_alloc_from(mem_size, align, 0); - -#endif /* CONFIG_DISCONTIGMEM */ + arena = memblock_alloc_or_panic(sizeof(*arena), SMP_CACHE_BYTES); + arena->ptes = memblock_alloc_or_panic(mem_size, align); spin_lock_init(&arena->lock); arena->hose = hose; @@ -129,12 +107,7 @@ iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena, unsigned long boundary_size; base = arena->dma_base >> PAGE_SHIFT; - if (dev) { - boundary_size = dma_get_seg_boundary(dev) + 1; - boundary_size >>= PAGE_SHIFT; - } else { - boundary_size = 1UL << (32 - PAGE_SHIFT); - } + boundary_size = dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT); /* Search forward for the first mask-aligned sequence of N free ptes */ ptes = arena->ptes; @@ -149,10 +122,12 @@ again: goto again; } - if (ptes[p+i]) - p = ALIGN(p + i + 1, mask + 1), i = 0; - else + if (ptes[p+i]) { + p = ALIGN(p + i + 1, mask + 1); + i = 0; + } else { i = i + 1; + } } if (i < n) { @@ -237,8 +212,8 @@ static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask) ok = 0; /* If both conditions above are met, we are fine. */ - DBGA("pci_dac_dma_supported %s from %pf\n", - ok ? "yes" : "no", __builtin_return_address(0)); + DBGA("pci_dac_dma_supported %s from %ps\n", + str_yes_no(ok), __builtin_return_address(0)); return ok; } @@ -249,28 +224,26 @@ static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask) until either pci_unmap_single or pci_dma_sync_single is performed. */ static dma_addr_t -pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, +pci_map_single_1(struct pci_dev *pdev, phys_addr_t paddr, size_t size, int dac_allowed) { struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose; dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK; + unsigned long offset = offset_in_page(paddr); struct pci_iommu_arena *arena; long npages, dma_ofs, i; - unsigned long paddr; dma_addr_t ret; unsigned int align = 0; struct device *dev = pdev ? &pdev->dev : NULL; - paddr = __pa(cpu_addr); - #if !DEBUG_NODIRECT /* First check to see if we can use the direct map window. */ if (paddr + size + __direct_map_base - 1 <= max_dma && paddr + size <= __direct_map_size) { ret = paddr + __direct_map_base; - DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %pf\n", - cpu_addr, size, ret, __builtin_return_address(0)); + DBGA2("pci_map_single: [%pa,%zx] -> direct %llx from %ps\n", + &paddr, size, ret, __builtin_return_address(0)); return ret; } @@ -280,8 +253,8 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, if (dac_allowed) { ret = paddr + alpha_mv.pci_dac_offset; - DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %pf\n", - cpu_addr, size, ret, __builtin_return_address(0)); + DBGA2("pci_map_single: [%pa,%zx] -> DAC %llx from %ps\n", + &paddr, size, ret, __builtin_return_address(0)); return ret; } @@ -315,10 +288,10 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr); ret = arena->dma_base + dma_ofs * PAGE_SIZE; - ret += (unsigned long)cpu_addr & ~PAGE_MASK; + ret += offset; - DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %pf\n", - cpu_addr, size, npages, ret, __builtin_return_address(0)); + DBGA2("pci_map_single: [%pa,%zx] np %ld -> sg %llx from %ps\n", + &paddr, size, npages, ret, __builtin_return_address(0)); return ret; } @@ -347,19 +320,18 @@ static struct pci_dev *alpha_gendev_to_pci(struct device *dev) return NULL; } -static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, +static dma_addr_t alpha_pci_map_phys(struct device *dev, phys_addr_t phys, + size_t size, enum dma_data_direction dir, unsigned long attrs) { struct pci_dev *pdev = alpha_gendev_to_pci(dev); int dac_allowed; - BUG_ON(dir == PCI_DMA_NONE); + if (unlikely(attrs & DMA_ATTR_MMIO)) + return DMA_MAPPING_ERROR; - dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; - return pci_map_single_1(pdev, (char *)page_address(page) + offset, - size, dac_allowed); + dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; + return pci_map_single_1(pdev, phys, size, dac_allowed); } /* Unmap a single streaming mode DMA translation. The DMA_ADDR and @@ -368,7 +340,7 @@ static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page, the cpu to the buffer are guaranteed to see whatever the device wrote there. */ -static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr, +static void alpha_pci_unmap_phys(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { @@ -378,20 +350,18 @@ static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr, struct pci_iommu_arena *arena; long dma_ofs, npages; - BUG_ON(dir == PCI_DMA_NONE); - if (dma_addr >= __direct_map_base && dma_addr < __direct_map_base + __direct_map_size) { /* Nothing to do. */ - DBGA2("pci_unmap_single: direct [%llx,%zx] from %pf\n", + DBGA2("pci_unmap_single: direct [%llx,%zx] from %ps\n", dma_addr, size, __builtin_return_address(0)); return; } if (dma_addr > 0xffffffff) { - DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %pf\n", + DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %ps\n", dma_addr, size, __builtin_return_address(0)); return; } @@ -423,7 +393,7 @@ static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr, spin_unlock_irqrestore(&arena->lock, flags); - DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %pf\n", + DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %ps\n", dma_addr, size, npages, __builtin_return_address(0)); } @@ -446,7 +416,7 @@ try_again: cpu_addr = (void *)__get_free_pages(gfp | __GFP_ZERO, order); if (! cpu_addr) { printk(KERN_INFO "pci_alloc_consistent: " - "get_free_pages failed from %pf\n", + "get_free_pages failed from %ps\n", __builtin_return_address(0)); /* ??? Really atomic allocation? Otherwise we could play with vmalloc and sg if we can't find contiguous memory. */ @@ -454,7 +424,7 @@ try_again: } memset(cpu_addr, 0, size); - *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0); + *dma_addrp = pci_map_single_1(pdev, virt_to_phys(cpu_addr), size, 0); if (*dma_addrp == DMA_MAPPING_ERROR) { free_pages((unsigned long)cpu_addr, order); if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA)) @@ -465,7 +435,7 @@ try_again: goto try_again; } - DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %pf\n", + DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %ps\n", size, cpu_addr, *dma_addrp, __builtin_return_address(0)); return cpu_addr; @@ -482,10 +452,10 @@ static void alpha_pci_free_coherent(struct device *dev, size_t size, unsigned long attrs) { struct pci_dev *pdev = alpha_gendev_to_pci(dev); - pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); + dma_unmap_single(&pdev->dev, dma_addr, size, DMA_BIDIRECTIONAL); free_pages((unsigned long)cpu_addr, get_order(size)); - DBGA2("pci_free_consistent: [%llx,%zx] from %pf\n", + DBGA2("pci_free_consistent: [%llx,%zx] from %ps\n", dma_addr, size, __builtin_return_address(0)); } @@ -626,7 +596,7 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end, while (sg+1 < end && (int) sg[1].dma_address == -1) { size += sg[1].length; - sg++; + sg = sg_next(sg); } npages = iommu_num_pages(paddr, size, PAGE_SIZE); @@ -661,17 +631,18 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg, dma_addr_t max_dma; int dac_allowed; - BUG_ON(dir == PCI_DMA_NONE); + BUG_ON(dir == DMA_NONE); dac_allowed = dev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; /* Fast path single entry scatterlists. */ if (nents == 1) { sg->dma_length = sg->length; - sg->dma_address - = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg), - sg->length, dac_allowed); - return sg->dma_address != DMA_MAPPING_ERROR; + sg->dma_address = pci_map_single_1(pdev, sg_phys(sg), + sg->length, dac_allowed); + if (sg->dma_address == DMA_MAPPING_ERROR) + return -EIO; + return 1; } start = sg; @@ -707,8 +678,10 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg, if (out < end) out->dma_length = 0; - if (out - start == 0) + if (out - start == 0) { printk(KERN_WARNING "pci_map_sg failed: no entries?\n"); + return -ENOMEM; + } DBGA("pci_map_sg: %ld entries\n", out - start); return out - start; @@ -720,8 +693,8 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg, /* Some allocation failed while mapping the scatterlist entries. Unmap them now. */ if (out > start) - pci_unmap_sg(pdev, start, out - start, dir); - return 0; + dma_unmap_sg(&pdev->dev, start, out - start, dir); + return -ENOMEM; } /* Unmap a set of streaming mode DMA translations. Again, cpu read @@ -740,7 +713,7 @@ static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg, dma_addr_t max_dma; dma_addr_t fbeg, fend; - BUG_ON(dir == PCI_DMA_NONE); + BUG_ON(dir == DMA_NONE); if (! alpha_mv.mv_pci_tbi) return; @@ -938,10 +911,14 @@ iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count) const struct dma_map_ops alpha_pci_ops = { .alloc = alpha_pci_alloc_coherent, .free = alpha_pci_free_coherent, - .map_page = alpha_pci_map_page, - .unmap_page = alpha_pci_unmap_page, + .map_phys = alpha_pci_map_phys, + .unmap_phys = alpha_pci_unmap_phys, .map_sg = alpha_pci_map_sg, .unmap_sg = alpha_pci_unmap_sg, .dma_supported = alpha_pci_supported, + .mmap = dma_common_mmap, + .get_sgtable = dma_common_get_sgtable, + .alloc_pages_op = dma_common_alloc_pages, + .free_pages = dma_common_free_pages, }; EXPORT_SYMBOL(alpha_pci_ops); |
