summaryrefslogtreecommitdiff
path: root/arch/arm/mm/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r--arch/arm/mm/mmu.c43
1 files changed, 42 insertions, 1 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index d4d082c5c2d4..b438fc4fb77b 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -18,9 +18,11 @@
#include <asm/cputype.h>
#include <asm/mach-types.h>
#include <asm/sections.h>
+#include <asm/cachetype.h>
#include <asm/setup.h>
#include <asm/sizes.h>
#include <asm/tlb.h>
+#include <asm/highmem.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -243,6 +245,10 @@ static struct mem_type mem_types[] = {
.prot_sect = PMD_TYPE_SECT,
.domain = DOMAIN_KERNEL,
},
+ [MT_MEMORY_NONCACHED] = {
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
+ .domain = DOMAIN_KERNEL,
+ },
};
const struct mem_type *get_mem_type(unsigned int type)
@@ -406,9 +412,28 @@ static void __init build_mem_type_table(void)
kern_pgprot |= L_PTE_SHARED;
vecs_pgprot |= L_PTE_SHARED;
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
+ mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
#endif
}
+ /*
+ * Non-cacheable Normal - intended for memory areas that must
+ * not cause dirty cache line writebacks when used
+ */
+ if (cpu_arch >= CPU_ARCH_ARMv6) {
+ if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
+ /* Non-cacheable Normal is XCB = 001 */
+ mem_types[MT_MEMORY_NONCACHED].prot_sect |=
+ PMD_SECT_BUFFERED;
+ } else {
+ /* For both ARMv6 and non-TEX-remapping ARMv7 */
+ mem_types[MT_MEMORY_NONCACHED].prot_sect |=
+ PMD_SECT_TEX(1);
+ }
+ } else {
+ mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
+ }
+
for (i = 0; i < 16; i++) {
unsigned long v = pgprot_val(protection_map[i]);
protection_map[i] = __pgprot(v | user_pgprot);
@@ -677,6 +702,10 @@ static void __init sanity_check_meminfo(void)
if (meminfo.nr_banks >= NR_BANKS) {
printk(KERN_CRIT "NR_BANKS too low, "
"ignoring high memory\n");
+ } else if (cache_is_vipt_aliasing()) {
+ printk(KERN_CRIT "HIGHMEM is not yet supported "
+ "with VIPT aliasing cache, "
+ "ignoring high memory\n");
} else {
memmove(bank + 1, bank,
(meminfo.nr_banks - i) * sizeof(*bank));
@@ -694,7 +723,7 @@ static void __init sanity_check_meminfo(void)
* the vmalloc area.
*/
if (__va(bank->start) >= VMALLOC_MIN ||
- __va(bank->start) < PAGE_OFFSET) {
+ __va(bank->start) < (void *)PAGE_OFFSET) {
printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx "
"(vmalloc region overlap).\n",
bank->start, bank->start + bank->size - 1);
@@ -895,6 +924,17 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
flush_cache_all();
}
+static void __init kmap_init(void)
+{
+#ifdef CONFIG_HIGHMEM
+ pmd_t *pmd = pmd_off_k(PKMAP_BASE);
+ pte_t *pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t));
+ BUG_ON(!pmd_none(*pmd) || !pte);
+ __pmd_populate(pmd, __pa(pte) | _PAGE_KERNEL_TABLE);
+ pkmap_page_table = pte + PTRS_PER_PTE;
+#endif
+}
+
/*
* paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables.
@@ -908,6 +948,7 @@ void __init paging_init(struct machine_desc *mdesc)
prepare_page_table();
bootmem_init();
devicemaps_init(mdesc);
+ kmap_init();
top_pmd = pmd_off_k(0xffff0000);