summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm/8xx_mmu.c
diff options
context:
space:
mode:
authorChristophe Leroy <christophe.leroy@c-s.fr>2016-02-09 17:07:50 +0100
committerScott Wood <oss@buserror.net>2016-03-11 17:18:01 -0600
commita372acfac51e0d5858f8f6f84da52defcabf054b (patch)
tree5e7c1a109e7c1167f1638f9e6f7f0154e09d2eef /arch/powerpc/mm/8xx_mmu.c
parent913a6b3d10d85a032eff2f31254c35e0976f5e32 (diff)
powerpc/8xx: Map linear kernel RAM with 8M pages
On a live running system (VoIP gateway for Air Trafic Control), over a 10 minutes period (with 277s idle), we get 87 millions DTLB misses and approximatly 35 secondes are spent in DTLB handler. This represents 5.8% of the overall time and even 10.8% of the non-idle time. Among those 87 millions DTLB misses, 15% are on user addresses and 85% are on kernel addresses. And within the kernel addresses, 93% are on addresses from the linear address space and only 7% are on addresses from the virtual address space. MPC8xx has no BATs but it has 8Mb page size. This patch implements mapping of kernel RAM using 8Mb pages, on the same model as what is done on the 40x. In 4k pages mode, each PGD entry maps a 4Mb area: we map every two entries to the same 8Mb physical page. In each second entry, we add 4Mb to the page physical address to ease life of the FixupDAR routine. This is just ignored by HW. In 16k pages mode, each PGD entry maps a 64Mb area: each PGD entry will point to the first page of the area. The DTLB handler adds the 3 bits from EPN to map the correct page. With this patch applied, we now get only 13 millions TLB misses during the 10 minutes period. The idle time has increased to 313s and the overall time spent in DTLB miss handler is 6.3s, which represents 1% of the overall time and 2.2% of non-idle time. Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> Signed-off-by: Scott Wood <oss@buserror.net>
Diffstat (limited to 'arch/powerpc/mm/8xx_mmu.c')
-rw-r--r--arch/powerpc/mm/8xx_mmu.c83
1 files changed, 83 insertions, 0 deletions
diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c
new file mode 100644
index 000000000000..2d42745f6cff
--- /dev/null
+++ b/arch/powerpc/mm/8xx_mmu.c
@@ -0,0 +1,83 @@
+/*
+ * This file contains the routines for initializing the MMU
+ * on the 8xx series of chips.
+ * -- christophe
+ *
+ * Derived from arch/powerpc/mm/40x_mmu.c:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/memblock.h>
+
+#include "mmu_decl.h"
+
+extern int __map_without_ltlbs;
+/*
+ * MMU_init_hw does the chip-specific initialization of the MMU hardware.
+ */
+void __init MMU_init_hw(void)
+{
+ /* Nothing to do for the time being but keep it similar to other PPC */
+}
+
+#define LARGE_PAGE_SIZE_4M (1<<22)
+#define LARGE_PAGE_SIZE_8M (1<<23)
+#define LARGE_PAGE_SIZE_64M (1<<26)
+
+unsigned long __init mmu_mapin_ram(unsigned long top)
+{
+ unsigned long v, s, mapped;
+ phys_addr_t p;
+
+ v = KERNELBASE;
+ p = 0;
+ s = top;
+
+ if (__map_without_ltlbs)
+ return 0;
+
+#ifdef CONFIG_PPC_4K_PAGES
+ while (s >= LARGE_PAGE_SIZE_8M) {
+ pmd_t *pmdp;
+ unsigned long val = p | MD_PS8MEG;
+
+ pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v);
+ *pmdp++ = __pmd(val);
+ *pmdp++ = __pmd(val + LARGE_PAGE_SIZE_4M);
+
+ v += LARGE_PAGE_SIZE_8M;
+ p += LARGE_PAGE_SIZE_8M;
+ s -= LARGE_PAGE_SIZE_8M;
+ }
+#else /* CONFIG_PPC_16K_PAGES */
+ while (s >= LARGE_PAGE_SIZE_64M) {
+ pmd_t *pmdp;
+ unsigned long val = p | MD_PS8MEG;
+
+ pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v);
+ *pmdp++ = __pmd(val);
+
+ v += LARGE_PAGE_SIZE_64M;
+ p += LARGE_PAGE_SIZE_64M;
+ s -= LARGE_PAGE_SIZE_64M;
+ }
+#endif
+
+ mapped = top - s;
+
+ /* If the size of RAM is not an exact power of two, we may not
+ * have covered RAM in its entirety with 8 MiB
+ * pages. Consequently, restrict the top end of RAM currently
+ * allocable so that calls to the MEMBLOCK to allocate PTEs for "tail"
+ * coverage with normal-sized pages (or other reasons) do not
+ * attempt to allocate outside the allowed range.
+ */
+ memblock_set_current_limit(mapped);
+
+ return mapped;
+}