summaryrefslogtreecommitdiff
path: root/arch/alpha/kernel/pci_iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/alpha/kernel/pci_iommu.c')
-rw-r--r--arch/alpha/kernel/pci_iommu.c85
1 files changed, 27 insertions, 58 deletions
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index 7f1925a32c99..dc91de50f906 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -11,8 +11,9 @@
#include <linux/export.h>
#include <linux/scatterlist.h>
#include <linux/log2.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-map-ops.h>
#include <linux/iommu-helper.h>
+#include <linux/string_choices.h>
#include <asm/io.h>
#include <asm/hwrpb.h>
@@ -71,43 +72,8 @@ iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
if (align < mem_size)
align = mem_size;
-
-#ifdef CONFIG_DISCONTIGMEM
-
- arena = memblock_alloc_node(sizeof(*arena), align, nid);
- if (!NODE_DATA(nid) || !arena) {
- printk("%s: couldn't allocate arena from node %d\n"
- " falling back to system-wide allocation\n",
- __func__, nid);
- arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
- if (!arena)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- sizeof(*arena));
- }
-
- arena->ptes = memblock_alloc_node(sizeof(*arena), align, nid);
- if (!NODE_DATA(nid) || !arena->ptes) {
- printk("%s: couldn't allocate arena ptes from node %d\n"
- " falling back to system-wide allocation\n",
- __func__, nid);
- arena->ptes = memblock_alloc(mem_size, align);
- if (!arena->ptes)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, mem_size, align);
- }
-
-#else /* CONFIG_DISCONTIGMEM */
-
- arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
- if (!arena)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- sizeof(*arena));
- arena->ptes = memblock_alloc(mem_size, align);
- if (!arena->ptes)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, mem_size, align);
-
-#endif /* CONFIG_DISCONTIGMEM */
+ arena = memblock_alloc_or_panic(sizeof(*arena), SMP_CACHE_BYTES);
+ arena->ptes = memblock_alloc_or_panic(mem_size, align);
spin_lock_init(&arena->lock);
arena->hose = hose;
@@ -141,12 +107,7 @@ iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena,
unsigned long boundary_size;
base = arena->dma_base >> PAGE_SHIFT;
- if (dev) {
- boundary_size = dma_get_seg_boundary(dev) + 1;
- boundary_size >>= PAGE_SHIFT;
- } else {
- boundary_size = 1UL << (32 - PAGE_SHIFT);
- }
+ boundary_size = dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT);
/* Search forward for the first mask-aligned sequence of N free ptes */
ptes = arena->ptes;
@@ -161,10 +122,12 @@ again:
goto again;
}
- if (ptes[p+i])
- p = ALIGN(p + i + 1, mask + 1), i = 0;
- else
+ if (ptes[p+i]) {
+ p = ALIGN(p + i + 1, mask + 1);
+ i = 0;
+ } else {
i = i + 1;
+ }
}
if (i < n) {
@@ -250,7 +213,7 @@ static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
/* If both conditions above are met, we are fine. */
DBGA("pci_dac_dma_supported %s from %ps\n",
- ok ? "yes" : "no", __builtin_return_address(0));
+ str_yes_no(ok), __builtin_return_address(0));
return ok;
}
@@ -367,7 +330,7 @@ static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page,
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
int dac_allowed;
- BUG_ON(dir == PCI_DMA_NONE);
+ BUG_ON(dir == DMA_NONE);
dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
return pci_map_single_1(pdev, (char *)page_address(page) + offset,
@@ -390,7 +353,7 @@ static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr,
struct pci_iommu_arena *arena;
long dma_ofs, npages;
- BUG_ON(dir == PCI_DMA_NONE);
+ BUG_ON(dir == DMA_NONE);
if (dma_addr >= __direct_map_base
&& dma_addr < __direct_map_base + __direct_map_size) {
@@ -494,7 +457,7 @@ static void alpha_pci_free_coherent(struct device *dev, size_t size,
unsigned long attrs)
{
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
- pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_single(&pdev->dev, dma_addr, size, DMA_BIDIRECTIONAL);
free_pages((unsigned long)cpu_addr, get_order(size));
DBGA2("pci_free_consistent: [%llx,%zx] from %ps\n",
@@ -638,7 +601,7 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
while (sg+1 < end && (int) sg[1].dma_address == -1) {
size += sg[1].length;
- sg++;
+ sg = sg_next(sg);
}
npages = iommu_num_pages(paddr, size, PAGE_SIZE);
@@ -673,7 +636,7 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
dma_addr_t max_dma;
int dac_allowed;
- BUG_ON(dir == PCI_DMA_NONE);
+ BUG_ON(dir == DMA_NONE);
dac_allowed = dev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
@@ -683,7 +646,9 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
sg->dma_address
= pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
sg->length, dac_allowed);
- return sg->dma_address != DMA_MAPPING_ERROR;
+ if (sg->dma_address == DMA_MAPPING_ERROR)
+ return -EIO;
+ return 1;
}
start = sg;
@@ -719,8 +684,10 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
if (out < end)
out->dma_length = 0;
- if (out - start == 0)
+ if (out - start == 0) {
printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
+ return -ENOMEM;
+ }
DBGA("pci_map_sg: %ld entries\n", out - start);
return out - start;
@@ -732,8 +699,8 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
/* Some allocation failed while mapping the scatterlist
entries. Unmap them now. */
if (out > start)
- pci_unmap_sg(pdev, start, out - start, dir);
- return 0;
+ dma_unmap_sg(&pdev->dev, start, out - start, dir);
+ return -ENOMEM;
}
/* Unmap a set of streaming mode DMA translations. Again, cpu read
@@ -752,7 +719,7 @@ static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg,
dma_addr_t max_dma;
dma_addr_t fbeg, fend;
- BUG_ON(dir == PCI_DMA_NONE);
+ BUG_ON(dir == DMA_NONE);
if (! alpha_mv.mv_pci_tbi)
return;
@@ -957,5 +924,7 @@ const struct dma_map_ops alpha_pci_ops = {
.dma_supported = alpha_pci_supported,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
+ .alloc_pages_op = dma_common_alloc_pages,
+ .free_pages = dma_common_free_pages,
};
EXPORT_SYMBOL(alpha_pci_ops);