summaryrefslogtreecommitdiff
path: root/arch/xtensa/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/xtensa/mm')
-rw-r--r--arch/xtensa/mm/Makefile5
-rw-r--r--arch/xtensa/mm/cache.c3
-rw-r--r--arch/xtensa/mm/fault.c22
-rw-r--r--arch/xtensa/mm/init.c36
-rw-r--r--arch/xtensa/mm/kasan_init.c95
-rw-r--r--arch/xtensa/mm/mmu.c31
-rw-r--r--arch/xtensa/mm/tlb.c6
7 files changed, 157 insertions, 41 deletions
diff --git a/arch/xtensa/mm/Makefile b/arch/xtensa/mm/Makefile
index 0b3d296a016a..734888a00dc8 100644
--- a/arch/xtensa/mm/Makefile
+++ b/arch/xtensa/mm/Makefile
@@ -5,3 +5,8 @@
obj-y := init.o misc.o
obj-$(CONFIG_MMU) += cache.o fault.o ioremap.o mmu.o tlb.o
obj-$(CONFIG_HIGHMEM) += highmem.o
+obj-$(CONFIG_KASAN) += kasan_init.o
+
+KASAN_SANITIZE_fault.o := n
+KASAN_SANITIZE_kasan_init.o := n
+KASAN_SANITIZE_mmu.o := n
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
index 3c75c4e597da..57dc231a0709 100644
--- a/arch/xtensa/mm/cache.c
+++ b/arch/xtensa/mm/cache.c
@@ -33,9 +33,6 @@
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
-//#define printd(x...) printk(x)
-#define printd(x...) do { } while(0)
-
/*
* Note:
* The kernel provides one architecture bit PG_arch_1 in the page flags that
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index a14df5aa98c8..8b9b6f44bb06 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -25,8 +25,6 @@
DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST;
void bad_page_fault(struct pt_regs*, unsigned long, int);
-#undef DEBUG_PAGE_FAULT
-
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
@@ -68,10 +66,10 @@ void do_page_fault(struct pt_regs *regs)
exccause == EXCCAUSE_ITLB_MISS ||
exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
-#ifdef DEBUG_PAGE_FAULT
- printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid,
- address, exccause, regs->pc, is_write? "w":"", is_exec? "x":"");
-#endif
+ pr_debug("[%s:%d:%08x:%d:%08lx:%s%s]\n",
+ current->comm, current->pid,
+ address, exccause, regs->pc,
+ is_write ? "w" : "", is_exec ? "x" : "");
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
@@ -247,10 +245,8 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
/* Are we prepared to handle this kernel fault? */
if ((entry = search_exception_tables(regs->pc)) != NULL) {
-#ifdef DEBUG_PAGE_FAULT
- printk(KERN_DEBUG "%s: Exception at pc=%#010lx (%lx)\n",
- current->comm, regs->pc, entry->fixup);
-#endif
+ pr_debug("%s: Exception at pc=%#010lx (%lx)\n",
+ current->comm, regs->pc, entry->fixup);
current->thread.bad_uaddr = address;
regs->pc = entry->fixup;
return;
@@ -259,9 +255,9 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
/* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
- printk(KERN_ALERT "Unable to handle kernel paging request at virtual "
- "address %08lx\n pc = %08lx, ra = %08lx\n",
- address, regs->pc, regs->areg[0]);
+ pr_alert("Unable to handle kernel paging request at virtual "
+ "address %08lx\n pc = %08lx, ra = %08lx\n",
+ address, regs->pc, regs->areg[0]);
die("Oops", regs, sig);
do_exit(sig);
}
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 720fe4e8b497..d776ec0d7b22 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -100,29 +100,51 @@ void __init mem_init(void)
mem_init_print_info(NULL);
pr_info("virtual kernel memory layout:\n"
+#ifdef CONFIG_KASAN
+ " kasan : 0x%08lx - 0x%08lx (%5lu MB)\n"
+#endif
+#ifdef CONFIG_MMU
+ " vmalloc : 0x%08lx - 0x%08lx (%5lu MB)\n"
+#endif
#ifdef CONFIG_HIGHMEM
" pkmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
" fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
#endif
-#ifdef CONFIG_MMU
- " vmalloc : 0x%08lx - 0x%08lx (%5lu MB)\n"
+ " lowmem : 0x%08lx - 0x%08lx (%5lu MB)\n"
+ " .text : 0x%08lx - 0x%08lx (%5lu kB)\n"
+ " .rodata : 0x%08lx - 0x%08lx (%5lu kB)\n"
+ " .data : 0x%08lx - 0x%08lx (%5lu kB)\n"
+ " .init : 0x%08lx - 0x%08lx (%5lu kB)\n"
+ " .bss : 0x%08lx - 0x%08lx (%5lu kB)\n",
+#ifdef CONFIG_KASAN
+ KASAN_SHADOW_START, KASAN_SHADOW_START + KASAN_SHADOW_SIZE,
+ KASAN_SHADOW_SIZE >> 20,
#endif
- " lowmem : 0x%08lx - 0x%08lx (%5lu MB)\n",
+#ifdef CONFIG_MMU
+ VMALLOC_START, VMALLOC_END,
+ (VMALLOC_END - VMALLOC_START) >> 20,
#ifdef CONFIG_HIGHMEM
PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE,
(LAST_PKMAP*PAGE_SIZE) >> 10,
FIXADDR_START, FIXADDR_TOP,
(FIXADDR_TOP - FIXADDR_START) >> 10,
#endif
-#ifdef CONFIG_MMU
- VMALLOC_START, VMALLOC_END,
- (VMALLOC_END - VMALLOC_START) >> 20,
PAGE_OFFSET, PAGE_OFFSET +
(max_low_pfn - min_low_pfn) * PAGE_SIZE,
#else
min_low_pfn * PAGE_SIZE, max_low_pfn * PAGE_SIZE,
#endif
- ((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20);
+ ((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20,
+ (unsigned long)_text, (unsigned long)_etext,
+ (unsigned long)(_etext - _text) >> 10,
+ (unsigned long)__start_rodata, (unsigned long)_sdata,
+ (unsigned long)(_sdata - __start_rodata) >> 10,
+ (unsigned long)_sdata, (unsigned long)_edata,
+ (unsigned long)(_edata - _sdata) >> 10,
+ (unsigned long)__init_begin, (unsigned long)__init_end,
+ (unsigned long)(__init_end - __init_begin) >> 10,
+ (unsigned long)__bss_start, (unsigned long)__bss_stop,
+ (unsigned long)(__bss_stop - __bss_start) >> 10);
}
#ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/xtensa/mm/kasan_init.c b/arch/xtensa/mm/kasan_init.c
new file mode 100644
index 000000000000..6b532b6bd785
--- /dev/null
+++ b/arch/xtensa/mm/kasan_init.c
@@ -0,0 +1,95 @@
+/*
+ * Xtensa KASAN shadow map initialization
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2017 Cadence Design Systems Inc.
+ */
+
+#include <linux/bootmem.h>
+#include <linux/init_task.h>
+#include <linux/kasan.h>
+#include <linux/kernel.h>
+#include <linux/memblock.h>
+#include <asm/initialize_mmu.h>
+#include <asm/tlbflush.h>
+#include <asm/traps.h>
+
+void __init kasan_early_init(void)
+{
+ unsigned long vaddr = KASAN_SHADOW_START;
+ pgd_t *pgd = pgd_offset_k(vaddr);
+ pmd_t *pmd = pmd_offset(pgd, vaddr);
+ int i;
+
+ for (i = 0; i < PTRS_PER_PTE; ++i)
+ set_pte(kasan_zero_pte + i,
+ mk_pte(virt_to_page(kasan_zero_page), PAGE_KERNEL));
+
+ for (vaddr = 0; vaddr < KASAN_SHADOW_SIZE; vaddr += PMD_SIZE, ++pmd) {
+ BUG_ON(!pmd_none(*pmd));
+ set_pmd(pmd, __pmd((unsigned long)kasan_zero_pte));
+ }
+ early_trap_init();
+}
+
+static void __init populate(void *start, void *end)
+{
+ unsigned long n_pages = (end - start) / PAGE_SIZE;
+ unsigned long n_pmds = n_pages / PTRS_PER_PTE;
+ unsigned long i, j;
+ unsigned long vaddr = (unsigned long)start;
+ pgd_t *pgd = pgd_offset_k(vaddr);
+ pmd_t *pmd = pmd_offset(pgd, vaddr);
+ pte_t *pte = memblock_virt_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
+
+ pr_debug("%s: %p - %p\n", __func__, start, end);
+
+ for (i = j = 0; i < n_pmds; ++i) {
+ int k;
+
+ for (k = 0; k < PTRS_PER_PTE; ++k, ++j) {
+ phys_addr_t phys =
+ memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
+ MEMBLOCK_ALLOC_ANYWHERE);
+
+ set_pte(pte + j, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
+ }
+ }
+
+ for (i = 0; i < n_pmds ; ++i, pte += PTRS_PER_PTE)
+ set_pmd(pmd + i, __pmd((unsigned long)pte));
+
+ local_flush_tlb_all();
+ memset(start, 0, end - start);
+}
+
+void __init kasan_init(void)
+{
+ int i;
+
+ BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_START -
+ (KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT));
+ BUILD_BUG_ON(VMALLOC_START < KASAN_START_VADDR);
+
+ /*
+ * Replace shadow map pages that cover addresses from VMALLOC area
+ * start to the end of KSEG with clean writable pages.
+ */
+ populate(kasan_mem_to_shadow((void *)VMALLOC_START),
+ kasan_mem_to_shadow((void *)XCHAL_KSEG_BYPASS_VADDR));
+
+ /* Write protect kasan_zero_page and zero-initialize it again. */
+ for (i = 0; i < PTRS_PER_PTE; ++i)
+ set_pte(kasan_zero_pte + i,
+ mk_pte(virt_to_page(kasan_zero_page), PAGE_KERNEL_RO));
+
+ local_flush_tlb_all();
+ memset(kasan_zero_page, 0, PAGE_SIZE);
+
+ /* At this point kasan is fully initialized. Enable error messages. */
+ current->kasan_depth = 0;
+ pr_info("KernelAddressSanitizer initialized\n");
+}
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index 358d748d9083..9d1ecfc53670 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -56,7 +56,6 @@ static void __init fixedrange_init(void)
void __init paging_init(void)
{
- memset(swapper_pg_dir, 0, PAGE_SIZE);
#ifdef CONFIG_HIGHMEM
fixedrange_init();
pkmap_page_table = init_pmd(PKMAP_BASE, LAST_PKMAP);
@@ -82,6 +81,23 @@ void init_mmu(void)
set_itlbcfg_register(0);
set_dtlbcfg_register(0);
#endif
+ init_kio();
+ local_flush_tlb_all();
+
+ /* Set rasid register to a known value. */
+
+ set_rasid_register(ASID_INSERT(ASID_USER_FIRST));
+
+ /* Set PTEVADDR special register to the start of the page
+ * table, which is in kernel mappable space (ie. not
+ * statically mapped). This register's value is undefined on
+ * reset.
+ */
+ set_ptevaddr_register(XCHAL_PAGE_TABLE_VADDR);
+}
+
+void init_kio(void)
+{
#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
/*
* Update the IO area mapping in case xtensa_kio_paddr has changed
@@ -95,17 +111,4 @@ void init_mmu(void)
write_itlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS),
XCHAL_KIO_BYPASS_VADDR + 6);
#endif
-
- local_flush_tlb_all();
-
- /* Set rasid register to a known value. */
-
- set_rasid_register(ASID_INSERT(ASID_USER_FIRST));
-
- /* Set PTEVADDR special register to the start of the page
- * table, which is in kernel mappable space (ie. not
- * statically mapped). This register's value is undefined on
- * reset.
- */
- set_ptevaddr_register(PGTABLE_START);
}
diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c
index 35c822286bbe..59153d0aa890 100644
--- a/arch/xtensa/mm/tlb.c
+++ b/arch/xtensa/mm/tlb.c
@@ -95,10 +95,8 @@ void local_flush_tlb_range(struct vm_area_struct *vma,
if (mm->context.asid[cpu] == NO_CONTEXT)
return;
-#if 0
- printk("[tlbrange<%02lx,%08lx,%08lx>]\n",
- (unsigned long)mm->context.asid[cpu], start, end);
-#endif
+ pr_debug("[tlbrange<%02lx,%08lx,%08lx>]\n",
+ (unsigned long)mm->context.asid[cpu], start, end);
local_irq_save(flags);
if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {