summaryrefslogtreecommitdiff
path: root/arch/sparc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/kernel')
-rw-r--r--arch/sparc/kernel/iommu.c30
-rw-r--r--arch/sparc/kernel/pci_sun4v.c31
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c6
-rw-r--r--arch/sparc/kernel/syscalls/syscall.tbl1
4 files changed, 42 insertions, 26 deletions
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index da0363692528..46ef88bc9c26 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -260,26 +260,35 @@ static void dma_4u_free_coherent(struct device *dev, size_t size,
free_pages((unsigned long)cpu, order);
}
-static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t sz,
- enum dma_data_direction direction,
+static dma_addr_t dma_4u_map_phys(struct device *dev, phys_addr_t phys,
+ size_t sz, enum dma_data_direction direction,
unsigned long attrs)
{
struct iommu *iommu;
struct strbuf *strbuf;
iopte_t *base;
unsigned long flags, npages, oaddr;
- unsigned long i, base_paddr, ctx;
+ unsigned long i, ctx;
u32 bus_addr, ret;
unsigned long iopte_protection;
+ if (unlikely(attrs & DMA_ATTR_MMIO))
+ /*
+ * This check is included because older versions of the code
+ * lacked MMIO path support, and my ability to test this path
+ * is limited. However, from a software technical standpoint,
+ * there is no restriction, as the following code operates
+ * solely on physical addresses.
+ */
+ goto bad_no_ctx;
+
iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc;
if (unlikely(direction == DMA_NONE))
goto bad_no_ctx;
- oaddr = (unsigned long)(page_address(page) + offset);
+ oaddr = (unsigned long)(phys_to_virt(phys));
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
@@ -296,7 +305,6 @@ static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
bus_addr = (iommu->tbl.table_map_base +
((base - iommu->page_table) << IO_PAGE_SHIFT));
ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
- base_paddr = __pa(oaddr & IO_PAGE_MASK);
if (strbuf->strbuf_enabled)
iopte_protection = IOPTE_STREAMING(ctx);
else
@@ -304,8 +312,8 @@ static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
if (direction != DMA_TO_DEVICE)
iopte_protection |= IOPTE_WRITE;
- for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
- iopte_val(*base) = iopte_protection | base_paddr;
+ for (i = 0; i < npages; i++, base++, phys += IO_PAGE_SIZE)
+ iopte_val(*base) = iopte_protection | phys;
return ret;
@@ -383,7 +391,7 @@ do_flush_sync:
vaddr, ctx, npages);
}
-static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
+static void dma_4u_unmap_phys(struct device *dev, dma_addr_t bus_addr,
size_t sz, enum dma_data_direction direction,
unsigned long attrs)
{
@@ -753,8 +761,8 @@ static int dma_4u_supported(struct device *dev, u64 device_mask)
static const struct dma_map_ops sun4u_dma_ops = {
.alloc = dma_4u_alloc_coherent,
.free = dma_4u_free_coherent,
- .map_page = dma_4u_map_page,
- .unmap_page = dma_4u_unmap_page,
+ .map_phys = dma_4u_map_phys,
+ .unmap_phys = dma_4u_unmap_phys,
.map_sg = dma_4u_map_sg,
.unmap_sg = dma_4u_unmap_sg,
.sync_single_for_cpu = dma_4u_sync_single_for_cpu,
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index b720b21ccfbd..791f0a76665f 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -352,9 +352,8 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
free_pages((unsigned long)cpu, order);
}
-static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t sz,
- enum dma_data_direction direction,
+static dma_addr_t dma_4v_map_phys(struct device *dev, phys_addr_t phys,
+ size_t sz, enum dma_data_direction direction,
unsigned long attrs)
{
struct iommu *iommu;
@@ -362,18 +361,27 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
struct iommu_map_table *tbl;
u64 mask;
unsigned long flags, npages, oaddr;
- unsigned long i, base_paddr;
- unsigned long prot;
+ unsigned long i, prot;
dma_addr_t bus_addr, ret;
long entry;
+ if (unlikely(attrs & DMA_ATTR_MMIO))
+ /*
+ * This check is included because older versions of the code
+ * lacked MMIO path support, and my ability to test this path
+ * is limited. However, from a software technical standpoint,
+ * there is no restriction, as the following code operates
+ * solely on physical addresses.
+ */
+ goto bad;
+
iommu = dev->archdata.iommu;
atu = iommu->atu;
if (unlikely(direction == DMA_NONE))
goto bad;
- oaddr = (unsigned long)(page_address(page) + offset);
+ oaddr = (unsigned long)(phys_to_virt(phys));
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
@@ -391,7 +399,6 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
- base_paddr = __pa(oaddr & IO_PAGE_MASK);
prot = HV_PCI_MAP_ATTR_READ;
if (direction != DMA_TO_DEVICE)
prot |= HV_PCI_MAP_ATTR_WRITE;
@@ -403,8 +410,8 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
iommu_batch_start(dev, prot, entry);
- for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
- long err = iommu_batch_add(base_paddr, mask);
+ for (i = 0; i < npages; i++, phys += IO_PAGE_SIZE) {
+ long err = iommu_batch_add(phys, mask);
if (unlikely(err < 0L))
goto iommu_map_fail;
}
@@ -426,7 +433,7 @@ iommu_map_fail:
return DMA_MAPPING_ERROR;
}
-static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
+static void dma_4v_unmap_phys(struct device *dev, dma_addr_t bus_addr,
size_t sz, enum dma_data_direction direction,
unsigned long attrs)
{
@@ -686,8 +693,8 @@ static int dma_4v_supported(struct device *dev, u64 device_mask)
static const struct dma_map_ops sun4v_dma_ops = {
.alloc = dma_4v_alloc_coherent,
.free = dma_4v_free_coherent,
- .map_page = dma_4v_map_page,
- .unmap_page = dma_4v_unmap_page,
+ .map_phys = dma_4v_map_phys,
+ .unmap_phys = dma_4v_unmap_phys,
.map_sg = dma_4v_map_sg,
.unmap_sg = dma_4v_unmap_sg,
.dma_supported = dma_4v_supported,
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 55faf2effa46..dbf118b40601 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -241,7 +241,7 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
if (flags & MAP_FIXED) {
/* Ok, don't mess with it. */
- return mm_get_unmapped_area(current->mm, NULL, orig_addr, len, pgoff, flags);
+ return mm_get_unmapped_area(NULL, orig_addr, len, pgoff, flags);
}
flags &= ~MAP_SHARED;
@@ -254,7 +254,7 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
align_goal = (64UL * 1024);
do {
- addr = mm_get_unmapped_area(current->mm, NULL, orig_addr,
+ addr = mm_get_unmapped_area(NULL, orig_addr,
len + (align_goal - PAGE_SIZE), pgoff, flags);
if (!(addr & ~PAGE_MASK)) {
addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
@@ -273,7 +273,7 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
* be obtained.
*/
if (addr & ~PAGE_MASK)
- addr = mm_get_unmapped_area(current->mm, NULL, orig_addr, len, pgoff, flags);
+ addr = mm_get_unmapped_area(NULL, orig_addr, len, pgoff, flags);
return addr;
}
diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl
index ebb7d06d1044..39aa26b6a50b 100644
--- a/arch/sparc/kernel/syscalls/syscall.tbl
+++ b/arch/sparc/kernel/syscalls/syscall.tbl
@@ -515,3 +515,4 @@
467 common open_tree_attr sys_open_tree_attr
468 common file_getattr sys_file_getattr
469 common file_setattr sys_file_setattr
+470 common listns sys_listns