summaryrefslogtreecommitdiff
path: root/arch/mn10300/mm/pgtable.c
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2008-02-08 04:19:31 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-08 09:22:30 -0800
commitb920de1b77b72ca9432ac3f97edb26541e65e5dd (patch)
tree40fa9be1470e929c47927dea7eddf184c0204229 /arch/mn10300/mm/pgtable.c
parentef3d534754f31fed9c3b976fee1ece1b3bc38282 (diff)
mn10300: add the MN10300/AM33 architecture to the kernel
Add architecture support for the MN10300/AM33 CPUs produced by MEI to the kernel. This patch also adds board support for the ASB2303 with the ASB2308 daughter board, and the ASB2305. The only processor supported is the MN103E010, which is an AM33v2 core plus on-chip devices. [akpm@linux-foundation.org: nuke cvs control strings] Signed-off-by: Masakazu Urade <urade.masakazu@jp.panasonic.com> Signed-off-by: Koichi Yasutake <yasutake.koichi@jp.panasonic.com> Signed-off-by: David Howells <dhowells@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/mn10300/mm/pgtable.c')
-rw-r--r--arch/mn10300/mm/pgtable.c197
1 files changed, 197 insertions, 0 deletions
diff --git a/arch/mn10300/mm/pgtable.c b/arch/mn10300/mm/pgtable.c
new file mode 100644
index 000000000000..a477038752ba
--- /dev/null
+++ b/arch/mn10300/mm/pgtable.c
@@ -0,0 +1,197 @@
+/* MN10300 Page table management
+ *
+ * Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Modified by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/smp.h>
+#include <linux/highmem.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/spinlock.h>
+#include <linux/quicklist.h>
+
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+
+void show_mem(void)
+{
+ unsigned long i;
+ int free = 0, total = 0, reserved = 0, shared = 0;
+
+ int cached = 0;
+ printk(KERN_INFO "Mem-info:\n");
+ show_free_areas();
+ i = max_mapnr;
+ while (i-- > 0) {
+ total++;
+ if (PageReserved(mem_map + i))
+ reserved++;
+ else if (PageSwapCache(mem_map + i))
+ cached++;
+ else if (!page_count(mem_map + i))
+ free++;
+ else
+ shared += page_count(mem_map + i) - 1;
+ }
+ printk(KERN_INFO "%d pages of RAM\n", total);
+ printk(KERN_INFO "%d free pages\n", free);
+ printk(KERN_INFO "%d reserved pages\n", reserved);
+ printk(KERN_INFO "%d pages shared\n", shared);
+ printk(KERN_INFO "%d pages swap cached\n", cached);
+}
+
+/*
+ * Associate a large virtual page frame with a given physical page frame
+ * and protection flags for that frame. pfn is for the base of the page,
+ * vaddr is what the page gets mapped to - both must be properly aligned.
+ * The pmd must already be instantiated. Assumes PAE mode.
+ */
+void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
+ printk(KERN_ERR "set_pmd_pfn: vaddr misaligned\n");
+ return; /* BUG(); */
+ }
+ if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
+ printk(KERN_ERR "set_pmd_pfn: pfn misaligned\n");
+ return; /* BUG(); */
+ }
+ pgd = swapper_pg_dir + pgd_index(vaddr);
+ if (pgd_none(*pgd)) {
+ printk(KERN_ERR "set_pmd_pfn: pgd_none\n");
+ return; /* BUG(); */
+ }
+ pud = pud_offset(pgd, vaddr);
+ pmd = pmd_offset(pud, vaddr);
+ set_pmd(pmd, pfn_pmd(pfn, flags));
+ /*
+ * It's enough to flush this one mapping.
+ * (PGE mappings get flushed as well)
+ */
+ __flush_tlb_one(vaddr);
+}
+
+pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+{
+ pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
+ if (pte)
+ clear_page(pte);
+ return pte;
+}
+
+struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ struct page *pte;
+
+#ifdef CONFIG_HIGHPTE
+ pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
+#else
+ pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
+#endif
+ if (pte)
+ clear_highpage(pte);
+ return pte;
+}
+
+/*
+ * List of all pgd's needed for non-PAE so it can invalidate entries
+ * in both cached and uncached pgd's; not needed for PAE since the
+ * kernel pmd is shared. If PAE were not to share the pmd a similar
+ * tactic would be needed. This is essentially codepath-based locking
+ * against pageattr.c; it is the unique case in which a valid change
+ * of kernel pagetables can't be lazily synchronized by vmalloc faults.
+ * vmalloc faults work because attached pagetables are never freed.
+ * If the locking proves to be non-performant, a ticketing scheme with
+ * checks at dup_mmap(), exec(), and other mmlist addition points
+ * could be used. The locking scheme was chosen on the basis of
+ * manfred's recommendations and having no core impact whatsoever.
+ * -- wli
+ */
+DEFINE_SPINLOCK(pgd_lock);
+struct page *pgd_list;
+
+static inline void pgd_list_add(pgd_t *pgd)
+{
+ struct page *page = virt_to_page(pgd);
+ page->index = (unsigned long) pgd_list;
+ if (pgd_list)
+ set_page_private(pgd_list, (unsigned long) &page->index);
+ pgd_list = page;
+ set_page_private(page, (unsigned long) &pgd_list);
+}
+
+static inline void pgd_list_del(pgd_t *pgd)
+{
+ struct page *next, **pprev, *page = virt_to_page(pgd);
+ next = (struct page *) page->index;
+ pprev = (struct page **) page_private(page);
+ *pprev = next;
+ if (next)
+ set_page_private(next, (unsigned long) pprev);
+}
+
+void pgd_ctor(void *pgd)
+{
+ unsigned long flags;
+
+ if (PTRS_PER_PMD == 1)
+ spin_lock_irqsave(&pgd_lock, flags);
+
+ memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
+ swapper_pg_dir + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+
+ if (PTRS_PER_PMD > 1)
+ return;
+
+ pgd_list_add(pgd);
+ spin_unlock_irqrestore(&pgd_lock, flags);
+ memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+}
+
+/* never called when PTRS_PER_PMD > 1 */
+void pgd_dtor(void *pgd)
+{
+ unsigned long flags; /* can be called from interrupt context */
+
+ spin_lock_irqsave(&pgd_lock, flags);
+ pgd_list_del(pgd);
+ spin_unlock_irqrestore(&pgd_lock, flags);
+}
+
+pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ return quicklist_alloc(0, GFP_KERNEL, pgd_ctor);
+}
+
+void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ quicklist_free(0, pgd_dtor, pgd);
+}
+
+void __init pgtable_cache_init(void)
+{
+}
+
+void check_pgt_cache(void)
+{
+ quicklist_trim(0, pgd_dtor, 25, 16);
+}