summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm/ioremap_32.c
diff options
context:
space:
mode:
authorChristophe Leroy <christophe.leroy@c-s.fr>2019-08-20 14:07:17 +0000
committerMichael Ellerman <mpe@ellerman.id.au>2019-08-27 13:03:35 +1000
commitf381d5711f091facd8847a54a2377cc0d1519df2 (patch)
treec187a3aa9f280cafa727be14d92406682ba972a0 /arch/powerpc/mm/ioremap_32.c
parent7cd9b317b630683b0c8eb2dfcfb046003ad6b97b (diff)
powerpc/mm: Move ioremap functions out of pgtable_32/64.c
Create ioremap_32.c and ioremap_64.c and move respective ioremap functions out of pgtable_32.c and pgtable_64.c In the meantime, fix a few comments and changes a printk() to pr_warn(). Also fix a few oversplitted lines. Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/b5c8b02ccefd4ede64c61b53cf64fb5dacb35740.1566309263.git.christophe.leroy@c-s.fr
Diffstat (limited to 'arch/powerpc/mm/ioremap_32.c')
-rw-r--r--arch/powerpc/mm/ioremap_32.c104
1 files changed, 104 insertions, 0 deletions
diff --git a/arch/powerpc/mm/ioremap_32.c b/arch/powerpc/mm/ioremap_32.c
new file mode 100644
index 000000000000..fb43ba71aa54
--- /dev/null
+++ b/arch/powerpc/mm/ioremap_32.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <mm/mmu_decl.h>
+
+void __iomem *ioremap_wt(phys_addr_t addr, unsigned long size)
+{
+ pgprot_t prot = pgprot_cached_wthru(PAGE_KERNEL);
+
+ return __ioremap_caller(addr, size, prot, __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_wt);
+
+void __iomem *
+__ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *caller)
+{
+ unsigned long v, i;
+ phys_addr_t p;
+ int err;
+
+ /*
+ * Choose an address to map it to.
+ * Once the vmalloc system is running, we use it.
+ * Before then, we use space going down from IOREMAP_TOP
+ * (ioremap_bot records where we're up to).
+ */
+ p = addr & PAGE_MASK;
+ size = PAGE_ALIGN(addr + size) - p;
+
+ /*
+ * If the address lies within the first 16 MB, assume it's in ISA
+ * memory space
+ */
+ if (p < 16 * 1024 * 1024)
+ p += _ISA_MEM_BASE;
+
+#ifndef CONFIG_CRASH_DUMP
+ /*
+ * Don't allow anybody to remap normal RAM that we're using.
+ * mem_init() sets high_memory so only do the check after that.
+ */
+ if (slab_is_available() && p <= virt_to_phys(high_memory - 1) &&
+ page_is_ram(__phys_to_pfn(p))) {
+ pr_warn("%s(): phys addr 0x%llx is RAM lr %ps\n", __func__,
+ (unsigned long long)p, __builtin_return_address(0));
+ return NULL;
+ }
+#endif
+
+ if (size == 0)
+ return NULL;
+
+ /*
+ * Is it already mapped? Perhaps overlapped by a previous
+ * mapping.
+ */
+ v = p_block_mapped(p);
+ if (v)
+ goto out;
+
+ if (slab_is_available()) {
+ struct vm_struct *area;
+ area = get_vm_area_caller(size, VM_IOREMAP, caller);
+ if (area == 0)
+ return NULL;
+ area->phys_addr = p;
+ v = (unsigned long)area->addr;
+ } else {
+ v = (ioremap_bot -= size);
+ }
+
+ /*
+ * Should check if it is a candidate for a BAT mapping
+ */
+
+ err = 0;
+ for (i = 0; i < size && err == 0; i += PAGE_SIZE)
+ err = map_kernel_page(v + i, p + i, prot);
+ if (err) {
+ if (slab_is_available())
+ vunmap((void *)v);
+ return NULL;
+ }
+
+out:
+ return (void __iomem *)(v + ((unsigned long)addr & ~PAGE_MASK));
+}
+
+void iounmap(volatile void __iomem *addr)
+{
+ /*
+ * If mapped by BATs then there is nothing to do.
+ * Calling vfree() generates a benign warning.
+ */
+ if (v_block_mapped((unsigned long)addr))
+ return;
+
+ if (addr > high_memory && (unsigned long)addr < ioremap_bot)
+ vunmap((void *)(PAGE_MASK & (unsigned long)addr));
+}
+EXPORT_SYMBOL(iounmap);