summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
authorMichael Ellerman <mpe@ellerman.id.au>2016-12-16 15:05:38 +1100
committerMichael Ellerman <mpe@ellerman.id.au>2016-12-16 15:05:38 +1100
commitc6f6634721c871bfab4235e1cbcad208d3063798 (patch)
tree9cc1d0307b9c5a3a84021419d5f80bea8bbfc49e /arch/powerpc
parentff45000fcb56b5b0f1a14a865d3541746d838a0a (diff)
parentbaae856ebdeeaefbadd4a02cdb54b7c2277ff4dd (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/scottwood/linux into next
Freescale updates from Scott: "Highlights include 8xx hugepage support, qbman fixes/cleanup, device tree updates, and some misc cleanup."
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/boot/dts/fsl/t1023rdb.dts29
-rw-r--r--arch/powerpc/boot/dts/fsl/t1023si-post.dtsi103
-rw-r--r--arch/powerpc/boot/dts/fsl/t1024qds.dts29
-rw-r--r--arch/powerpc/boot/dts/fsl/t1024rdb.dts33
-rw-r--r--arch/powerpc/boot/dts/fsl/t1042d4rdb.dts52
-rw-r--r--arch/powerpc/boot/dts/fsl/t4240rdb.dts4
-rw-r--r--arch/powerpc/configs/fsl-emb-nonhw.config6
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgalloc.h44
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h40
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h3
-rw-r--r--arch/powerpc/include/asm/hugetlb.h19
-rw-r--r--arch/powerpc/include/asm/mmu-8xx.h35
-rw-r--r--arch/powerpc/include/asm/mmu.h23
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgalloc.h44
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h42
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-8xx.h1
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h2
-rw-r--r--arch/powerpc/include/asm/nohash/pgtable.h4
-rw-r--r--arch/powerpc/include/asm/pgtable.h2
-rw-r--r--arch/powerpc/include/asm/reg_8xx.h2
-rw-r--r--arch/powerpc/kernel/head_8xx.S119
-rw-r--r--arch/powerpc/mm/Makefile3
-rw-r--r--arch/powerpc/mm/hugetlbpage.c216
-rw-r--r--arch/powerpc/mm/init-common.c107
-rw-r--r--arch/powerpc/mm/init_64.c77
-rw-r--r--arch/powerpc/mm/pgtable_32.c37
-rw-r--r--arch/powerpc/mm/tlb_nohash.c21
-rw-r--r--arch/powerpc/platforms/83xx/suspend.c6
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig2
-rw-r--r--arch/powerpc/platforms/8xx/Kconfig1
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype1
-rw-r--r--arch/powerpc/sysdev/fsl_lbc.c2
-rw-r--r--arch/powerpc/sysdev/fsl_pmc.c6
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c48
34 files changed, 803 insertions, 360 deletions
diff --git a/arch/powerpc/boot/dts/fsl/t1023rdb.dts b/arch/powerpc/boot/dts/fsl/t1023rdb.dts
index 29757623e5ba..5ba6fbfca274 100644
--- a/arch/powerpc/boot/dts/fsl/t1023rdb.dts
+++ b/arch/powerpc/boot/dts/fsl/t1023rdb.dts
@@ -41,6 +41,27 @@
#size-cells = <2>;
interrupt-parent = <&mpic>;
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+
+ qman_fqd: qman-fqd {
+ size = <0 0x400000>;
+ alignment = <0 0x400000>;
+ };
+
+ qman_pfdr: qman-pfdr {
+ size = <0 0x2000000>;
+ alignment = <0 0x2000000>;
+ };
+ };
+
ifc: localbus@ffe124000 {
reg = <0xf 0xfe124000 0 0x2000>;
ranges = <0 0 0xf 0xe8000000 0x08000000
@@ -72,6 +93,14 @@
ranges = <0x00000000 0xf 0x00000000 0x01072000>;
};
+ bportals: bman-portals@ff4000000 {
+ ranges = <0x0 0xf 0xf4000000 0x2000000>;
+ };
+
+ qportals: qman-portals@ff6000000 {
+ ranges = <0x0 0xf 0xf6000000 0x2000000>;
+ };
+
soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi
index 6e0b4892a740..da2894c59479 100644
--- a/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi
@@ -34,6 +34,21 @@
#include <dt-bindings/thermal/thermal.h>
+&bman_fbpr {
+ compatible = "fsl,bman-fbpr";
+ alloc-ranges = <0 0 0x10000 0>;
+};
+
+&qman_fqd {
+ compatible = "fsl,qman-fqd";
+ alloc-ranges = <0 0 0x10000 0>;
+};
+
+&qman_pfdr {
+ compatible = "fsl,qman-pfdr";
+ alloc-ranges = <0 0 0x10000 0>;
+};
+
&ifc {
#address-cells = <2>;
#size-cells = <1>;
@@ -180,6 +195,92 @@
};
};
+&bportals {
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ compatible = "simple-bus";
+
+ bman-portal@0 {
+ cell-index = <0x0>;
+ compatible = "fsl,bman-portal";
+ reg = <0x0 0x4000>, <0x1000000 0x1000>;
+ interrupts = <105 2 0 0>;
+ };
+ bman-portal@4000 {
+ cell-index = <0x1>;
+ compatible = "fsl,bman-portal";
+ reg = <0x4000 0x4000>, <0x1001000 0x1000>;
+ interrupts = <107 2 0 0>;
+ };
+ bman-portal@8000 {
+ cell-index = <2>;
+ compatible = "fsl,bman-portal";
+ reg = <0x8000 0x4000>, <0x1002000 0x1000>;
+ interrupts = <109 2 0 0>;
+ };
+ bman-portal@c000 {
+ cell-index = <0x3>;
+ compatible = "fsl,bman-portal";
+ reg = <0xc000 0x4000>, <0x1003000 0x1000>;
+ interrupts = <111 2 0 0>;
+ };
+ bman-portal@10000 {
+ cell-index = <0x4>;
+ compatible = "fsl,bman-portal";
+ reg = <0x10000 0x4000>, <0x1004000 0x1000>;
+ interrupts = <113 2 0 0>;
+ };
+ bman-portal@14000 {
+ cell-index = <0x5>;
+ compatible = "fsl,bman-portal";
+ reg = <0x14000 0x4000>, <0x1005000 0x1000>;
+ interrupts = <115 2 0 0>;
+ };
+};
+
+&qportals {
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ compatible = "simple-bus";
+
+ qportal0: qman-portal@0 {
+ compatible = "fsl,qman-portal";
+ reg = <0x0 0x4000>, <0x1000000 0x1000>;
+ interrupts = <104 0x2 0 0>;
+ cell-index = <0x0>;
+ };
+ qportal1: qman-portal@4000 {
+ compatible = "fsl,qman-portal";
+ reg = <0x4000 0x4000>, <0x1001000 0x1000>;
+ interrupts = <106 0x2 0 0>;
+ cell-index = <0x1>;
+ };
+ qportal2: qman-portal@8000 {
+ compatible = "fsl,qman-portal";
+ reg = <0x8000 0x4000>, <0x1002000 0x1000>;
+ interrupts = <108 0x2 0 0>;
+ cell-index = <0x2>;
+ };
+ qportal3: qman-portal@c000 {
+ compatible = "fsl,qman-portal";
+ reg = <0xc000 0x4000>, <0x1003000 0x1000>;
+ interrupts = <110 0x2 0 0>;
+ cell-index = <0x3>;
+ };
+ qportal4: qman-portal@10000 {
+ compatible = "fsl,qman-portal";
+ reg = <0x10000 0x4000>, <0x1004000 0x1000>;
+ interrupts = <112 0x2 0 0>;
+ cell-index = <0x4>;
+ };
+ qportal5: qman-portal@14000 {
+ compatible = "fsl,qman-portal";
+ reg = <0x14000 0x4000>, <0x1005000 0x1000>;
+ interrupts = <114 0x2 0 0>;
+ cell-index = <0x5>;
+ };
+};
+
&soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -413,6 +514,8 @@
};
/include/ "qoriq-sec5.0-0.dtsi"
+/include/ "qoriq-qman3.dtsi"
+/include/ "qoriq-bman1.dtsi"
/include/ "qoriq-fman3l-0.dtsi"
/include/ "qoriq-fman3-0-10g-0-best-effort.dtsi"
diff --git a/arch/powerpc/boot/dts/fsl/t1024qds.dts b/arch/powerpc/boot/dts/fsl/t1024qds.dts
index 772143da367f..d6858b7cd93f 100644
--- a/arch/powerpc/boot/dts/fsl/t1024qds.dts
+++ b/arch/powerpc/boot/dts/fsl/t1024qds.dts
@@ -41,6 +41,27 @@
#size-cells = <2>;
interrupt-parent = <&mpic>;
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+
+ qman_fqd: qman-fqd {
+ size = <0 0x400000>;
+ alignment = <0 0x400000>;
+ };
+
+ qman_pfdr: qman-pfdr {
+ size = <0 0x2000000>;
+ alignment = <0 0x2000000>;
+ };
+ };
+
ifc: localbus@ffe124000 {
reg = <0xf 0xfe124000 0 0x2000>;
ranges = <0 0 0xf 0xe8000000 0x08000000
@@ -80,6 +101,14 @@
ranges = <0x00000000 0xf 0x00000000 0x01072000>;
};
+ bportals: bman-portals@ff4000000 {
+ ranges = <0x0 0xf 0xf4000000 0x2000000>;
+ };
+
+ qportals: qman-portals@ff6000000 {
+ ranges = <0x0 0xf 0xf6000000 0x2000000>;
+ };
+
soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/fsl/t1024rdb.dts b/arch/powerpc/boot/dts/fsl/t1024rdb.dts
index 302cdd22b4bb..73a645324bc1 100644
--- a/arch/powerpc/boot/dts/fsl/t1024rdb.dts
+++ b/arch/powerpc/boot/dts/fsl/t1024rdb.dts
@@ -41,6 +41,31 @@
#size-cells = <2>;
interrupt-parent = <&mpic>;
+ aliases {
+ sg_2500_aqr105_phy4 = &sg_2500_aqr105_phy4;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+
+ qman_fqd: qman-fqd {
+ size = <0 0x400000>;
+ alignment = <0 0x400000>;
+ };
+
+ qman_pfdr: qman-pfdr {
+ size = <0 0x2000000>;
+ alignment = <0 0x2000000>;
+ };
+ };
+
ifc: localbus@ffe124000 {
reg = <0xf 0xfe124000 0 0x2000>;
ranges = <0 0 0xf 0xe8000000 0x08000000
@@ -82,6 +107,14 @@
ranges = <0x00000000 0xf 0x00000000 0x01072000>;
};
+ bportals: bman-portals@ff4000000 {
+ ranges = <0x0 0xf 0xf4000000 0x2000000>;
+ };
+
+ qportals: qman-portals@ff6000000 {
+ ranges = <0x0 0xf 0xf6000000 0x2000000>;
+ };
+
soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/fsl/t1042d4rdb.dts b/arch/powerpc/boot/dts/fsl/t1042d4rdb.dts
index 2a5a90dd272e..fcd2aeb5b8ac 100644
--- a/arch/powerpc/boot/dts/fsl/t1042d4rdb.dts
+++ b/arch/powerpc/boot/dts/fsl/t1042d4rdb.dts
@@ -48,6 +48,58 @@
"fsl,deepsleep-cpld";
};
};
+
+ soc: soc@ffe000000 {
+ fman0: fman@400000 {
+ ethernet@e0000 {
+ phy-handle = <&phy_sgmii_0>;
+ phy-connection-type = "sgmii";
+ };
+
+ ethernet@e2000 {
+ phy-handle = <&phy_sgmii_1>;
+ phy-connection-type = "sgmii";
+ };
+
+ ethernet@e4000 {
+ phy-handle = <&phy_sgmii_2>;
+ phy-connection-type = "sgmii";
+ };
+
+ ethernet@e6000 {
+ phy-handle = <&phy_rgmii_0>;
+ phy-connection-type = "rgmii";
+ };
+
+ ethernet@e8000 {
+ phy-handle = <&phy_rgmii_1>;
+ phy-connection-type = "rgmii";
+ };
+
+ mdio0: mdio@fc000 {
+ phy_sgmii_0: ethernet-phy@02 {
+ reg = <0x02>;
+ };
+
+ phy_sgmii_1: ethernet-phy@03 {
+ reg = <0x03>;
+ };
+
+ phy_sgmii_2: ethernet-phy@01 {
+ reg = <0x01>;
+ };
+
+ phy_rgmii_0: ethernet-phy@04 {
+ reg = <0x04>;
+ };
+
+ phy_rgmii_1: ethernet-phy@05 {
+ reg = <0x05>;
+ };
+ };
+ };
+ };
+
};
#include "t1042si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/fsl/t4240rdb.dts b/arch/powerpc/boot/dts/fsl/t4240rdb.dts
index cc0a264b8acb..8166c660712a 100644
--- a/arch/powerpc/boot/dts/fsl/t4240rdb.dts
+++ b/arch/powerpc/boot/dts/fsl/t4240rdb.dts
@@ -125,6 +125,10 @@
};
i2c@118000 {
+ hwmon@2f {
+ compatible = "winbond,w83793";
+ reg = <0x2f>;
+ };
eeprom@52 {
compatible = "at24,24c256";
reg = <0x52>;
diff --git a/arch/powerpc/configs/fsl-emb-nonhw.config b/arch/powerpc/configs/fsl-emb-nonhw.config
index 1a61e81ab0cd..cc49c95494da 100644
--- a/arch/powerpc/configs/fsl-emb-nonhw.config
+++ b/arch/powerpc/configs/fsl-emb-nonhw.config
@@ -44,6 +44,7 @@ CONFIG_FORCE_MAX_ZONEORDER=13
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAME_WARN=1024
CONFIG_FTL=y
+CONFIG_GPIO_GENERIC_PLATFORM=y
CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m
CONFIG_HIGH_RES_TIMERS=y
@@ -104,8 +105,13 @@ CONFIG_PACKET=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_PERF_EVENTS=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_GPIO=y
+CONFIG_POWER_RESET_GPIO_RESTART=y
CONFIG_QNX4FS_FS=m
CONFIG_RCU_TRACE=y
+CONFIG_RESET_CONTROLLER=y
CONFIG_ROOT_NFS=y
CONFIG_SYSV_FS=m
CONFIG_SYSVIPC=y
diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h
index 8e21bb492dca..d310546e5d9d 100644
--- a/arch/powerpc/include/asm/book3s/32/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h
@@ -2,14 +2,42 @@
#define _ASM_POWERPC_BOOK3S_32_PGALLOC_H
#include <linux/threads.h>
+#include <linux/slab.h>
-/* For 32-bit, all levels of page tables are just drawn from get_free_page() */
-#define MAX_PGTABLE_INDEX_SIZE 0
+/*
+ * Functions that deal with pagetables that could be at any level of
+ * the table need to be passed an "index_size" so they know how to
+ * handle allocation. For PTE pages (which are linked to a struct
+ * page for now, and drawn from the main get_free_pages() pool), the
+ * allocation size will be (2^index_size * sizeof(pointer)) and
+ * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
+ *
+ * The maximum index size needs to be big enough to allow any
+ * pagetable sizes we need, but small enough to fit in the low bits of
+ * any page table pointer. In other words all pagetables, even tiny
+ * ones, must be aligned to allow at least enough low 0 bits to
+ * contain this value. This value is also used as a mask, so it must
+ * be one less than a power of two.
+ */
+#define MAX_PGTABLE_INDEX_SIZE 0xf
extern void __bad_pte(pmd_t *pmd);
-extern pgd_t *pgd_alloc(struct mm_struct *mm);
-extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
+extern struct kmem_cache *pgtable_cache[];
+#define PGT_CACHE(shift) ({ \
+ BUG_ON(!(shift)); \
+ pgtable_cache[(shift) - 1]; \
+ })
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL);
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
+}
/*
* We don't have any real pmd's, and this code never triggers because
@@ -68,8 +96,12 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
static inline void pgtable_free(void *table, unsigned index_size)
{
- BUG_ON(index_size); /* 32-bit doesn't use this */
- free_page((unsigned long)table);
+ if (!index_size) {
+ free_page((unsigned long)table);
+ } else {
+ BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
+ kmem_cache_free(PGT_CACHE(index_size), table);
+ }
}
#define check_pgt_cache() do { } while (0)
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index dc58980f3ad9..012223638815 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -8,6 +8,23 @@
/* And here we include common definitions */
#include <asm/pte-common.h>
+#define PTE_INDEX_SIZE PTE_SHIFT
+#define PMD_INDEX_SIZE 0
+#define PUD_INDEX_SIZE 0
+#define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
+
+#define PMD_CACHE_INDEX PMD_INDEX_SIZE
+
+#ifndef __ASSEMBLY__
+#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
+#define PMD_TABLE_SIZE 0
+#define PUD_TABLE_SIZE 0
+#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
+#endif /* __ASSEMBLY__ */
+
+#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
+#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
+
/*
* The normal case is that PTEs are 32-bits and we have a 1-page
* 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
@@ -19,14 +36,10 @@
* -Matt
*/
/* PGDIR_SHIFT determines what a top-level page table entry can map */
-#define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT)
+#define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
-#define PTRS_PER_PTE (1 << PTE_SHIFT)
-#define PTRS_PER_PMD 1
-#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
-
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
/*
* This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
@@ -82,12 +95,8 @@
extern unsigned long ioremap_bot;
-/*
- * entries per page directory level: our page-table tree is two-level, so
- * we don't really have any PMD directory.
- */
-#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_SHIFT)
-#define PGD_TABLE_SIZE (sizeof(pgd_t) << (32 - PGDIR_SHIFT))
+/* Bits to mask out from a PGD to get to the PUD page */
+#define PGD_MASKED_BITS 0
#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
@@ -284,15 +293,6 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm,
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
-#ifndef CONFIG_PPC_4K_PAGES
-void pgtable_cache_init(void);
-#else
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init() do { } while (0)
-#endif
-
extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
pmd_t **pmdp);
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index bbea0040320a..6cfc5dbdae03 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -800,9 +800,6 @@ extern struct page *pgd_page(pgd_t pgd);
#define pgd_ERROR(e) \
pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
-void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
-void pgtable_cache_init(void);
-
static inline int map_kernel_page(unsigned long ea, unsigned long pa,
unsigned long flags)
{
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index c03e0a3dd4d8..ede215167d1a 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -51,12 +51,20 @@ static inline void __local_flush_hugetlb_page(struct vm_area_struct *vma,
static inline pte_t *hugepd_page(hugepd_t hpd)
{
BUG_ON(!hugepd_ok(hpd));
+#ifdef CONFIG_PPC_8xx
+ return (pte_t *)__va(hpd.pd & ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK));
+#else
return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE);
+#endif
}
static inline unsigned int hugepd_shift(hugepd_t hpd)
{
+#ifdef CONFIG_PPC_8xx
+ return ((hpd.pd & _PMD_PAGE_MASK) >> 1) + 17;
+#else
return hpd.pd & HUGEPD_SHIFT_MASK;
+#endif
}
#endif /* CONFIG_PPC_BOOK3S_64 */
@@ -99,7 +107,15 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
pte_t pte);
+#ifdef CONFIG_PPC_8xx
+static inline void flush_hugetlb_page(struct vm_area_struct *vma,
+ unsigned long vmaddr)
+{
+ flush_tlb_page(vma, vmaddr);
+}
+#else
void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+#endif
void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor,
@@ -205,7 +221,8 @@ static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
* are reserved early in the boot process by memblock instead of via
* the .dts as on IBM platforms.
*/
-#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_FSL_BOOK3E)
+#if defined(CONFIG_HUGETLB_PAGE) && (defined(CONFIG_PPC_FSL_BOOK3E) || \
+ defined(CONFIG_PPC_8xx))
extern void __init reserve_hugetlb_gpages(void);
#else
static inline void reserve_hugetlb_gpages(void)
diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h
index 3e0e4927811c..798b5bf91427 100644
--- a/arch/powerpc/include/asm/mmu-8xx.h
+++ b/arch/powerpc/include/asm/mmu-8xx.h
@@ -172,6 +172,41 @@ typedef struct {
#define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000)
#define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE))
+
+/* Page size definitions, common between 32 and 64-bit
+ *
+ * shift : is the "PAGE_SHIFT" value for that page size
+ * penc : is the pte encoding mask
+ *
+ */
+struct mmu_psize_def {
+ unsigned int shift; /* number of bits */
+ unsigned int enc; /* PTE encoding */
+ unsigned int ind; /* Corresponding indirect page size shift */
+ unsigned int flags;
+#define MMU_PAGE_SIZE_DIRECT 0x1 /* Supported as a direct size */
+#define MMU_PAGE_SIZE_INDIRECT 0x2 /* Supported as an indirect size */
+};
+
+extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
+
+static inline int shift_to_mmu_psize(unsigned int shift)
+{
+ int psize;
+
+ for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
+ if (mmu_psize_defs[psize].shift == shift)
+ return psize;
+ return -1;
+}
+
+static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
+{
+ if (mmu_psize_defs[mmu_psize].shift)
+ return mmu_psize_defs[mmu_psize].shift;
+ BUG();
+}
+
#endif /* !__ASSEMBLY__ */
#if defined(CONFIG_PPC_4K_PAGES)
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 060b40b1bc3d..09304d2bec03 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -269,19 +269,20 @@ static inline bool early_radix_enabled(void)
#define MMU_PAGE_64K 2
#define MMU_PAGE_64K_AP 3 /* "Admixed pages" (hash64 only) */
#define MMU_PAGE_256K 4
-#define MMU_PAGE_1M 5
-#define MMU_PAGE_2M 6
-#define MMU_PAGE_4M 7
-#define MMU_PAGE_8M 8
-#define MMU_PAGE_16M 9
-#define MMU_PAGE_64M 10
-#define MMU_PAGE_256M 11
-#define MMU_PAGE_1G 12
-#define MMU_PAGE_16G 13
-#define MMU_PAGE_64G 14
+#define MMU_PAGE_512K 5
+#define MMU_PAGE_1M 6
+#define MMU_PAGE_2M 7
+#define MMU_PAGE_4M 8
+#define MMU_PAGE_8M 9
+#define MMU_PAGE_16M 10
+#define MMU_PAGE_64M 11
+#define MMU_PAGE_256M 12
+#define MMU_PAGE_1G 13
+#define MMU_PAGE_16G 14
+#define MMU_PAGE_64G 15
/* N.B. we need to change the type of hpte_page_sizes if this gets to be > 16 */
-#define MMU_PAGE_COUNT 15
+#define MMU_PAGE_COUNT 16
#ifdef CONFIG_PPC_BOOK3S_64
#include <asm/book3s/64/mmu.h>
diff --git a/arch/powerpc/include/asm/nohash/32/pgalloc.h b/arch/powerpc/include/asm/nohash/32/pgalloc.h
index 76d6b9e0c8a9..633139291a48 100644
--- a/arch/powerpc/include/asm/nohash/32/pgalloc.h
+++ b/arch/powerpc/include/asm/nohash/32/pgalloc.h
@@ -2,14 +2,42 @@
#define _ASM_POWERPC_PGALLOC_32_H
#include <linux/threads.h>
+#include <linux/slab.h>
-/* For 32-bit, all levels of page tables are just drawn from get_free_page() */
-#define MAX_PGTABLE_INDEX_SIZE 0
+/*
+ * Functions that deal with pagetables that could be at any level of
+ * the table need to be passed an "index_size" so they know how to
+ * handle allocation. For PTE pages (which are linked to a struct
+ * page for now, and drawn from the main get_free_pages() pool), the
+ * allocation size will be (2^index_size * sizeof(pointer)) and
+ * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
+ *
+ * The maximum index size needs to be big enough to allow any
+ * pagetable sizes we need, but small enough to fit in the low bits of
+ * any page table pointer. In other words all pagetables, even tiny
+ * ones, must be aligned to allow at least enough low 0 bits to
+ * contain this value. This value is also used as a mask, so it must
+ * be one less than a power of two.
+ */
+#define MAX_PGTABLE_INDEX_SIZE 0xf
extern void __bad_pte(pmd_t *pmd);
-extern pgd_t *pgd_alloc(struct mm_struct *mm);
-extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
+extern struct kmem_cache *pgtable_cache[];
+#define PGT_CACHE(shift) ({ \
+ BUG_ON(!(shift)); \
+ pgtable_cache[(shift) - 1]; \
+ })
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL);
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
+}
/*
* We don't have any real pmd's, and this code never triggers because
@@ -68,8 +96,12 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
static inline void pgtable_free(void *table, unsigned index_size)
{
- BUG_ON(index_size); /* 32-bit doesn't use this */
- free_page((unsigned long)table);
+ if (!index_size) {
+ free_page((unsigned long)table);
+ } else {
+ BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
+ kmem_cache_free(PGT_CACHE(index_size), table);
+ }
}
#define check_pgt_cache() do { } while (0)
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index 65073fbc6707..ba9921bf202e 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -16,6 +16,23 @@ extern int icache_44x_need_flush;
#endif /* __ASSEMBLY__ */
+#define PTE_INDEX_SIZE PTE_SHIFT
+#define PMD_INDEX_SIZE 0
+#define PUD_INDEX_SIZE 0
+#define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
+
+#define PMD_CACHE_INDEX PMD_INDEX_SIZE
+
+#ifndef __ASSEMBLY__
+#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
+#define PMD_TABLE_SIZE 0
+#define PUD_TABLE_SIZE 0
+#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
+#endif /* __ASSEMBLY__ */
+
+#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
+#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
+
/*
* The normal case is that PTEs are 32-bits and we have a 1-page
* 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
@@ -27,22 +44,12 @@ extern int icache_44x_need_flush;
* -Matt
*/
/* PGDIR_SHIFT determines what a top-level page table entry can map */
-#define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT)
+#define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
-/*
- * entries per page directory level: our page-table tree is two-level, so
- * we don't really have any PMD directory.
- */
-#ifndef __ASSEMBLY__
-#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_SHIFT)
-#define PGD_TABLE_SIZE (sizeof(pgd_t) << (32 - PGDIR_SHIFT))
-#endif /* __ASSEMBLY__ */
-
-#define PTRS_PER_PTE (1 << PTE_SHIFT)
-#define PTRS_PER_PMD 1
-#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
+/* Bits to mask out from a PGD to get to the PUD page */
+#define PGD_MASKED_BITS 0
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
#define FIRST_USER_ADDRESS 0UL
@@ -329,15 +336,6 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm,
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
-#ifndef CONFIG_PPC_4K_PAGES
-void pgtable_cache_init(void);
-#else
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init() do { } while (0)
-#endif
-
extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
pmd_t **pmdp);
diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
index 3742b1919661..b4df2734c078 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
@@ -49,6 +49,7 @@
#define _PMD_BAD 0x0ff0
#define _PMD_PAGE_MASK 0x000c
#define _PMD_PAGE_8M 0x000c
+#define _PMD_PAGE_512K 0x0004
/* Until my rework is finished, 8xx still needs atomic PTE updates */
#define PTE_ATOMIC_UPDATES 1
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index 53a41b06a7b9..c7f927e67d14 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -347,8 +347,6 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm,
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
#define __swp_entry_to_pte(x) __pte((x).val)
-void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
-void pgtable_cache_init(void);
extern int map_kernel_page(unsigned long ea, unsigned long pa,
unsigned long flags);
extern int __meminit vmemmap_create_mapping(unsigned long start,
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index 1263c22d60d8..172849727054 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -226,7 +226,11 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
#ifdef CONFIG_HUGETLB_PAGE
static inline int hugepd_ok(hugepd_t hpd)
{
+#ifdef CONFIG_PPC_8xx
+ return ((hpd.pd & 0x4) != 0);
+#else
return (hpd.pd > 0);
+#endif
}
static inline int pmd_huge(pmd_t pmd)
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 9bd87f269d6d..dd01212935ac 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -78,6 +78,8 @@ static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
unsigned long vmalloc_to_phys(void *vmalloc_addr);
+void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
+void pgtable_cache_init(void);
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_PGTABLE_H */
diff --git a/arch/powerpc/include/asm/reg_8xx.h b/arch/powerpc/include/asm/reg_8xx.h
index 0197e12f7d48..1f1636124a04 100644
--- a/arch/powerpc/include/asm/reg_8xx.h
+++ b/arch/powerpc/include/asm/reg_8xx.h
@@ -4,7 +4,7 @@
#ifndef _ASM_POWERPC_REG_8xx_H
#define _ASM_POWERPC_REG_8xx_H
-#include <asm/mmu-8xx.h>
+#include <asm/mmu.h>
/* Cache control on the MPC8xx is provided through some additional
* special purpose registers.
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index fb133a163263..1a9c99d3e5d8 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -73,6 +73,9 @@
#define RPN_PATTERN 0x00f0
#endif
+#define PAGE_SHIFT_512K 19
+#define PAGE_SHIFT_8M 23
+
__HEAD
_ENTRY(_stext);
_ENTRY(_start);
@@ -322,7 +325,7 @@ SystemCall:
#endif
InstructionTLBMiss:
-#if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC)
+#if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) || defined (CONFIG_HUGETLB_PAGE)
mtspr SPRN_SPRG_SCRATCH2, r3
#endif
EXCEPTION_PROLOG_0
@@ -332,10 +335,12 @@ InstructionTLBMiss:
*/
mfspr r10, SPRN_SRR0 /* Get effective address of fault */
INVALIDATE_ADJACENT_PAGES_CPU15(r11, r10)
-#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC)
/* Only modules will cause ITLB Misses as we always
* pin the first 8MB of kernel memory */
+#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) || defined (CONFIG_HUGETLB_PAGE)
mfcr r3
+#endif
+#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC)
IS_KERNEL(r11, r10)
#endif
mfspr r11, SPRN_M_TW /* Get level 1 table */
@@ -343,7 +348,6 @@ InstructionTLBMiss:
BRANCH_UNLESS_KERNEL(3f)
lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha
3:
- mtcr r3
#endif
/* Insert level 1 index */
rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
@@ -351,14 +355,25 @@ InstructionTLBMiss:
/* Extract level 2 index */
rlwinm r10, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29
+#ifdef CONFIG_HUGETLB_PAGE
+ mtcr r11
+ bt- 28, 10f /* bit 28 = Large page (8M) */
+ bt- 29, 20f /* bit 29 = Large page (8M or 512k) */
+#endif
rlwimi r10, r11, 0, 0, 32 - PAGE_SHIFT - 1 /* Add level 2 base */
lwz r10, 0(r10) /* Get the pte */
-
+4:
+#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) || defined (CONFIG_HUGETLB_PAGE)
+ mtcr r3
+#endif
/* Insert the APG into the TWC from the Linux PTE. */
rlwimi r11, r10, 0, 25, 26
/* Load the MI_TWC with the attributes for this "segment." */
MTSPR_CPU6(SPRN_MI_TWC, r11, r3) /* Set segment attributes */
+#if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES)
+ rlwimi r10, r11, 1, MI_SPS16K
+#endif
#ifdef CONFIG_SWAP
rlwinm r11, r10, 32-5, _PAGE_PRESENT
and r11, r11, r10
@@ -371,16 +386,45 @@ InstructionTLBMiss:
* set. All other Linux PTE bits control the behavior
* of the MMU.
*/
+#if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES)
+ rlwimi r10, r11, 0, 0x0ff0 /* Set 24-27, clear 20-23 */
+#else
rlwimi r10, r11, 0, 0x0ff8 /* Set 24-27, clear 20-23,28 */
+#endif
MTSPR_CPU6(SPRN_MI_RPN, r10, r3) /* Update TLB entry */
/* Restore registers */
-#if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC)
+#if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) || defined (CONFIG_HUGETLB_PAGE)
mfspr r3, SPRN_SPRG_SCRATCH2
#endif
EXCEPTION_EPILOG_0
rfi
+#ifdef CONFIG_HUGETLB_PAGE
+10: /* 8M pages */
+#ifdef CONFIG_PPC_16K_PAGES
+ /* Extract level 2 index */
+ rlwinm r10, r10, 32 - (PAGE_SHIFT_8M - PAGE_SHIFT), 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1), 29
+ /* Add level 2 base */
+ rlwimi r10, r11, 0, 0, 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1) - 1
+#else
+ /* Level 2 base */
+ rlwinm r10, r11, 0, ~HUGEPD_SHIFT_MASK
+#endif
+ lwz r10, 0(r10) /* Get the pte */
+ rlwinm r11, r11, 0, 0xf
+ b 4b
+
+20: /* 512k pages */
+ /* Extract level 2 index */
+ rlwinm r10, r10, 32 - (PAGE_SHIFT_512K - PAGE_SHIFT), 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1), 29
+ /* Add level 2 base */
+ rlwimi r10, r11, 0, 0, 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1) - 1
+ lwz r10, 0(r10) /* Get the pte */
+ rlwinm r11, r11, 0, 0xf
+ b 4b
+#endif
+
. = 0x1200
DataStoreTLBMiss:
mtspr SPRN_SPRG_SCRATCH2, r3
@@ -407,7 +451,6 @@ _ENTRY(DTLBMiss_jmp)
#endif
blt cr7, DTLBMissLinear
3:
- mtcr r3
mfspr r10, SPRN_MD_EPN
/* Insert level 1 index */
@@ -418,8 +461,15 @@ _ENTRY(DTLBMiss_jmp)
*/
/* Extract level 2 index */
rlwinm r10, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29
+#ifdef CONFIG_HUGETLB_PAGE
+ mtcr r11
+ bt- 28, 10f /* bit 28 = Large page (8M) */
+ bt- 29, 20f /* bit 29 = Large page (8M or 512k) */
+#endif
rlwimi r10, r11, 0, 0, 32 - PAGE_SHIFT - 1 /* Add level 2 base */
lwz r10, 0(r10) /* Get the pte */
+4:
+ mtcr r3
/* Insert the Guarded flag and APG into the TWC from the Linux PTE.
* It is bit 26-27 of both the Linux PTE and the TWC (at least
@@ -434,6 +484,11 @@ _ENTRY(DTLBMiss_jmp)
rlwimi r11, r10, 32-5, 30, 30
MTSPR_CPU6(SPRN_MD_TWC, r11, r3)
+ /* In 4k pages mode, SPS (bit 28) in RPN must match PS[1] (bit 29)
+ * In 16k pages mode, SPS is always 1 */
+#if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES)
+ rlwimi r10, r11, 1, MD_SPS16K
+#endif
/* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set.
* We also need to know if the insn is a load/store, so:
* Clear _PAGE_PRESENT and load that which will
@@ -455,7 +510,11 @@ _ENTRY(DTLBMiss_jmp)
* of the MMU.
*/
li r11, RPN_PATTERN
+#if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES)
+ rlwimi r10, r11, 0, 24, 27 /* Set 24-27 */
+#else
rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
+#endif
rlwimi r10, r11, 0, 20, 20 /* clear 20 */
MTSPR_CPU6(SPRN_MD_RPN, r10, r3) /* Update TLB entry */
@@ -465,6 +524,30 @@ _ENTRY(DTLBMiss_jmp)
EXCEPTION_EPILOG_0
rfi
+#ifdef CONFIG_HUGETLB_PAGE
+10: /* 8M pages */
+ /* Extract level 2 index */
+#ifdef CONFIG_PPC_16K_PAGES
+ rlwinm r10, r10, 32 - (PAGE_SHIFT_8M - PAGE_SHIFT), 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1), 29
+ /* Add level 2 base */
+ rlwimi r10, r11, 0, 0, 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1) - 1
+#else
+ /* Level 2 base */
+ rlwinm r10, r11, 0, ~HUGEPD_SHIFT_MASK
+#endif
+ lwz r10, 0(r10) /* Get the pte */
+ rlwinm r11, r11, 0, 0xf
+ b 4b
+
+20: /* 512k pages */
+ /* Extract level 2 index */
+ rlwinm r10, r10, 32 - (PAGE_SHIFT_512K - PAGE_SHIFT), 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1), 29
+ /* Add level 2 base */
+ rlwimi r10, r11, 0, 0, 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1) - 1
+ lwz r10, 0(r10) /* Get the pte */
+ rlwinm r11, r11, 0, 0xf
+ b 4b
+#endif
/* This is an instruction TLB error on the MPC8xx. This could be due
* to many reasons, such as executing guarded memory or illegal instruction
@@ -586,6 +669,9 @@ _ENTRY(FixupDAR_cmp)
/* Insert level 1 index */
3: rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11) /* Get the level 1 entry */
+ mtcr r11
+ bt 28,200f /* bit 28 = Large page (8M) */
+ bt 29,202f /* bit 29 = Large page (8M or 512K) */
rlwinm r11, r11,0,0,19 /* Extract page descriptor page address */
/* Insert level 2 index */
rlwimi r11, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29
@@ -611,6 +697,27 @@ _ENTRY(FixupDAR_cmp)
141: mfspr r10,SPRN_SPRG_SCRATCH2
b DARFixed /* Nope, go back to normal TLB processing */
+ /* concat physical page address(r11) and page offset(r10) */
+200:
+#ifdef CONFIG_PPC_16K_PAGES
+ rlwinm r11, r11, 0, 0, 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1) - 1
+ rlwimi r11, r10, 32 - (PAGE_SHIFT_8M - 2), 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1), 29
+#else
+ rlwinm r11, r10, 0, ~HUGEPD_SHIFT_MASK
+#endif
+ lwz r11, 0(r11) /* Get the pte */
+ /* concat physical page address(r11) and page offset(r10) */
+ rlwimi r11, r10, 0, 32 - PAGE_SHIFT_8M, 31
+ b 201b
+
+202:
+ rlwinm r11, r11, 0, 0, 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1) - 1
+ rlwimi r11, r10, 32 - (PAGE_SHIFT_512K - 2), 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1), 29
+ lwz r11, 0(r11) /* Get the pte */
+ /* concat physical page address(r11) and page offset(r10) */
+ rlwimi r11, r10, 0, 32 - PAGE_SHIFT_512K, 31
+ b 201b
+
144: mfspr r10, SPRN_DSISR
rlwinm r10, r10,0,7,5 /* Clear store bit for buggy dcbst insn */
mtspr SPRN_DSISR, r10
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 5784682d7b63..7414034df1c3 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -7,7 +7,8 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
obj-y := fault.o mem.o pgtable.o mmap.o \
- init_$(BITS).o pgtable_$(BITS).o
+ init_$(BITS).o pgtable_$(BITS).o \
+ init-common.o
obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \
tlb_nohash_low.o
obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(BITS)e.o
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index a5d3ecdabc44..289df38fb7e0 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -26,6 +26,8 @@
#ifdef CONFIG_HUGETLB_PAGE
#define PAGE_SHIFT_64K 16
+#define PAGE_SHIFT_512K 19
+#define PAGE_SHIFT_8M 23
#define PAGE_SHIFT_16M 24
#define PAGE_SHIFT_16G 34
@@ -38,7 +40,7 @@ unsigned int HPAGE_SHIFT;
* implementations may have more than one gpage size, so we need multiple
* arrays
*/
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
#define MAX_NUMBER_GPAGES 128
struct psize_gpages {
u64 gpage_list[MAX_NUMBER_GPAGES];
@@ -64,14 +66,16 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
{
struct kmem_cache *cachep;
pte_t *new;
-
-#ifdef CONFIG_PPC_FSL_BOOK3E
int i;
- int num_hugepd = 1 << (pshift - pdshift);
- cachep = hugepte_cache;
-#else
- cachep = PGT_CACHE(pdshift - pshift);
-#endif
+ int num_hugepd;
+
+ if (pshift >= pdshift) {
+ cachep = hugepte_cache;
+ num_hugepd = 1 << (pshift - pdshift);
+ } else {
+ cachep = PGT_CACHE(pdshift - pshift);
+ num_hugepd = 1;
+ }
new = kmem_cache_zalloc(cachep, GFP_KERNEL);
@@ -89,7 +93,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
smp_wmb();
spin_lock(&mm->page_table_lock);
-#ifdef CONFIG_PPC_FSL_BOOK3E
+
/*
* We have multiple higher-level entries that point to the same
* actual pte location. Fill in each as we go and backtrack on error.
@@ -100,8 +104,18 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
if (unlikely(!hugepd_none(*hpdp)))
break;
else
+#ifdef CONFIG_PPC_BOOK3S_64
+ hpdp->pd = __pa(new) |
+ (shift_to_mmu_psize(pshift) << 2);
+#elif defined(CONFIG_PPC_8xx)
+ hpdp->pd = __pa(new) |
+ (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M :
+ _PMD_PAGE_512K) |
+ _PMD_PRESENT;
+#else
/* We use the old format for PPC_FSL_BOOK3E */
hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
+#endif
}
/* If we bailed from the for loop early, an error occurred, clean up */
if (i < num_hugepd) {
@@ -109,17 +123,6 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
hpdp->pd = 0;
kmem_cache_free(cachep, new);
}
-#else
- if (!hugepd_none(*hpdp))
- kmem_cache_free(cachep, new);
- else {
-#ifdef CONFIG_PPC_BOOK3S_64
- hpdp->pd = __pa(new) | (shift_to_mmu_psize(pshift) << 2);
-#else
- hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
-#endif
- }
-#endif
spin_unlock(&mm->page_table_lock);
return 0;
}
@@ -128,7 +131,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
* These macros define how to determine which level of the page table holds
* the hpdp.
*/
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
#define HUGEPD_PGD_SHIFT PGDIR_SHIFT
#define HUGEPD_PUD_SHIFT PUD_SHIFT
#else
@@ -136,7 +139,6 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
#define HUGEPD_PUD_SHIFT PMD_SHIFT
#endif
-#ifdef CONFIG_PPC_BOOK3S_64
/*
* At this point we do the placement change only for BOOK3S 64. This would
* possibly work on other subarchs.
@@ -153,6 +155,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
addr &= ~(sz-1);
pg = pgd_offset(mm, addr);
+#ifdef CONFIG_PPC_BOOK3S_64
if (pshift == PGDIR_SHIFT)
/* 16GB huge page */
return (pte_t *) pg;
@@ -178,32 +181,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
hpdp = (hugepd_t *)pm;
}
}
- if (!hpdp)
- return NULL;
-
- BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
-
- if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
- return NULL;
-
- return hugepte_offset(*hpdp, addr, pdshift);
-}
-
#else
-
-pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
-{
- pgd_t *pg;
- pud_t *pu;
- pmd_t *pm;
- hugepd_t *hpdp = NULL;
- unsigned pshift = __ffs(sz);
- unsigned pdshift = PGDIR_SHIFT;
-
- addr &= ~(sz-1);
-
- pg = pgd_offset(mm, addr);
-
if (pshift >= HUGEPD_PGD_SHIFT) {
hpdp = (hugepd_t *)pg;
} else {
@@ -217,7 +195,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
hpdp = (hugepd_t *)pm;
}
}
-
+#endif
if (!hpdp)
return NULL;
@@ -228,9 +206,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
return hugepte_offset(*hpdp, addr, pdshift);
}
-#endif
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
/* Build list of addresses of gigantic pages. This function is used in early
* boot before the buddy allocator is setup.
*/
@@ -310,7 +287,11 @@ static int __init do_gpage_early_setup(char *param, char *val,
npages = 0;
if (npages > MAX_NUMBER_GPAGES) {
pr_warn("MMU: %lu pages requested for page "
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
"size %llu KB, limiting to "
+#else
+ "size %u KB, limiting to "
+#endif
__stringify(MAX_NUMBER_GPAGES) "\n",
npages, size / 1024);
npages = MAX_NUMBER_GPAGES;
@@ -392,7 +373,7 @@ int alloc_bootmem_huge_page(struct hstate *hstate)
}
#endif
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
#define HUGEPD_FREELIST_SIZE \
((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
@@ -442,6 +423,8 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
}
put_cpu_var(hugepd_freelist_cur);
}
+#else
+static inline void hugepd_free(struct mmu_gather *tlb, void *hugepte) {}
#endif
static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
@@ -453,13 +436,11 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
unsigned long pdmask = ~((1UL << pdshift) - 1);
unsigned int num_hugepd = 1;
+ unsigned int shift = hugepd_shift(*hpdp);
-#ifdef CONFIG_PPC_FSL_BOOK3E
/* Note: On fsl the hpdp may be the first of several */
- num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift));
-#else
- unsigned int shift = hugepd_shift(*hpdp);
-#endif
+ if (shift > pdshift)
+ num_hugepd = 1 << (shift - pdshift);
start &= pdmask;
if (start < floor)
@@ -475,11 +456,10 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
for (i = 0; i < num_hugepd; i++, hpdp++)
hpdp->pd = 0;
-#ifdef CONFIG_PPC_FSL_BOOK3E
- hugepd_free(tlb, hugepte);
-#else
- pgtable_free_tlb(tlb, hugepte, pdshift - shift);
-#endif
+ if (shift >= pdshift)
+ hugepd_free(tlb, hugepte);
+ else
+ pgtable_free_tlb(tlb, hugepte, pdshift - shift);
}
static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
@@ -492,6 +472,8 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
start = addr;
do {
+ unsigned long more;
+
pmd = pmd_offset(pud, addr);
next = pmd_addr_end(addr, end);
if (!is_hugepd(__hugepd(pmd_val(*pmd)))) {
@@ -502,15 +484,16 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
WARN_ON(!pmd_none_or_clear_bad(pmd));
continue;
}
-#ifdef CONFIG_PPC_FSL_BOOK3E
/*
* Increment next by the size of the huge mapping since
* there may be more than one entry at this level for a
* single hugepage, but all of them point to
* the same kmem cache that holds the hugepte.
*/
- next = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
-#endif
+ more = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
+ if (more > next)
+ next = more;
+
free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
addr, next, floor, ceiling);
} while (addr = next, addr != end);
@@ -550,15 +533,17 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
ceiling);
} else {
-#ifdef CONFIG_PPC_FSL_BOOK3E
+ unsigned long more;
/*
* Increment next by the size of the huge mapping since
* there may be more than one entry at this level for a
* single hugepage, but all of them point to
* the same kmem cache that holds the hugepte.
*/
- next = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
-#endif
+ more = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
+ if (more > next)
+ next = more;
+
free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
addr, next, floor, ceiling);
}
@@ -615,15 +600,17 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
continue;
hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
} else {
-#ifdef CONFIG_PPC_FSL_BOOK3E
+ unsigned long more;
/*
* Increment next by the size of the huge mapping since
* there may be more than one entry at the pgd level
* for a single hugepage, but all of them point to the
* same kmem cache that holds the hugepte.
*/
- next = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
-#endif
+ more = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
+ if (more > next)
+ next = more;
+
free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
addr, next, floor, ceiling);
}
@@ -753,12 +740,13 @@ static int __init add_huge_page_size(unsigned long long size)
/* Check that it is a page size supported by the hardware and
* that it fits within pagetable and slice limits. */
-#ifdef CONFIG_PPC_FSL_BOOK3E
- if ((size < PAGE_SIZE) || !is_power_of_4(size))
+ if (size <= PAGE_SIZE)
return -EINVAL;
-#else
- if (!is_power_of_2(size)
- || (shift > SLICE_HIGH_SHIFT) || (shift <= PAGE_SHIFT))
+#if defined(CONFIG_PPC_FSL_BOOK3E)
+ if (!is_power_of_4(size))
+ return -EINVAL;
+#elif !defined(CONFIG_PPC_8xx)
+ if (!is_power_of_2(size) || (shift > SLICE_HIGH_SHIFT))
return -EINVAL;
#endif
@@ -791,53 +779,15 @@ static int __init hugepage_setup_sz(char *str)
}
__setup("hugepagesz=", hugepage_setup_sz);
-#ifdef CONFIG_PPC_FSL_BOOK3E
struct kmem_cache *hugepte_cache;
static int __init hugetlbpage_init(void)
{
int psize;
- for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
- unsigned shift;
-
- if (!mmu_psize_defs[psize].shift)
- continue;
-
- shift = mmu_psize_to_shift(psize);
-
- /* Don't treat normal page sizes as huge... */
- if (shift != PAGE_SHIFT)
- if (add_huge_page_size(1ULL << shift) < 0)
- continue;
- }
-
- /*
- * Create a kmem cache for hugeptes. The bottom bits in the pte have
- * size information encoded in them, so align them to allow this
- */
- hugepte_cache = kmem_cache_create("hugepte-cache", sizeof(pte_t),
- HUGEPD_SHIFT_MASK + 1, 0, NULL);
- if (hugepte_cache == NULL)
- panic("%s: Unable to create kmem cache for hugeptes\n",
- __func__);
-
- /* Default hpage size = 4M */
- if (mmu_psize_defs[MMU_PAGE_4M].shift)
- HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift;
- else
- panic("%s: Unable to set default huge page size\n", __func__);
-
-
- return 0;
-}
-#else
-static int __init hugetlbpage_init(void)
-{
- int psize;
-
+#if !defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_PPC_8xx)
if (!radix_enabled() && !mmu_has_feature(MMU_FTR_16M_PAGE))
return -ENODEV;
-
+#endif
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
unsigned shift;
unsigned pdshift;
@@ -850,9 +800,9 @@ static int __init hugetlbpage_init(void)
if (add_huge_page_size(1ULL << shift) < 0)
continue;
- if (shift < PMD_SHIFT)
+ if (shift < HUGEPD_PUD_SHIFT)
pdshift = PMD_SHIFT;
- else if (shift < PUD_SHIFT)
+ else if (shift < HUGEPD_PGD_SHIFT)
pdshift = PUD_SHIFT;
else
pdshift = PGDIR_SHIFT;
@@ -860,14 +810,38 @@ static int __init hugetlbpage_init(void)
* if we have pdshift and shift value same, we don't
* use pgt cache for hugepd.
*/
- if (pdshift != shift) {
+ if (pdshift > shift) {
pgtable_cache_add(pdshift - shift, NULL);
if (!PGT_CACHE(pdshift - shift))
panic("hugetlbpage_init(): could not create "
"pgtable cache for %d bit pagesize\n", shift);
}
+#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
+ else if (!hugepte_cache) {
+ /*
+ * Create a kmem cache for hugeptes. The bottom bits in
+ * the pte have size information encoded in them, so
+ * align them to allow this
+ */
+ hugepte_cache = kmem_cache_create("hugepte-cache",
+ sizeof(pte_t),
+ HUGEPD_SHIFT_MASK + 1,
+ 0, NULL);
+ if (hugepte_cache == NULL)
+ panic("%s: Unable to create kmem cache "
+ "for hugeptes\n", __func__);
+
+ }
+#endif
}
+#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
+ /* Default hpage size = 4M on FSL_BOOK3E and 512k on 8xx */
+ if (mmu_psize_defs[MMU_PAGE_4M].shift)
+ HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift;
+ else if (mmu_psize_defs[MMU_PAGE_512K].shift)
+ HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_512K].shift;
+#else
/* Set default large page size. Currently, we pick 16M or 1M
* depending on what is available
*/
@@ -877,11 +851,13 @@ static int __init hugetlbpage_init(void)
HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
else if (mmu_psize_defs[MMU_PAGE_2M].shift)
HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_2M].shift;
-
+#endif
+ else
+ panic("%s: Unable to set default huge page size\n", __func__);
return 0;
}
-#endif
+
arch_initcall(hugetlbpage_init);
void flush_dcache_icache_hugepage(struct page *page)
diff --git a/arch/powerpc/mm/init-common.c b/arch/powerpc/mm/init-common.c
new file mode 100644
index 000000000000..a175cd82ae8c
--- /dev/null
+++ b/arch/powerpc/mm/init-common.c
@@ -0,0 +1,107 @@
+/*
+ * PowerPC version
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ * and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ * Copyright (C) 1996 Paul Mackerras
+ *
+ * Derived from "arch/i386/mm/init.c"
+ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
+ *
+ * Dave Engebretsen <engebret@us.ibm.com>
+ * Rework for PPC64 port.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#undef DEBUG
+
+#include <linux/string.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+
+static void pgd_ctor(void *addr)
+{
+ memset(addr, 0, PGD_TABLE_SIZE);
+}
+
+static void pud_ctor(void *addr)
+{
+ memset(addr, 0, PUD_TABLE_SIZE);
+}
+
+static void pmd_ctor(void *addr)
+{
+ memset(addr, 0, PMD_TABLE_SIZE);
+}
+
+struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE];
+
+/*
+ * Create a kmem_cache() for pagetables. This is not used for PTE
+ * pages - they're linked to struct page, come from the normal free
+ * pages pool and have a different entry size (see real_pte_t) to
+ * everything else. Caches created by this function are used for all
+ * the higher level pagetables, and for hugepage pagetables.
+ */
+void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
+{
+ char *name;
+ unsigned long table_size = sizeof(void *) << shift;
+ unsigned long align = table_size;
+
+ /* When batching pgtable pointers for RCU freeing, we store
+ * the index size in the low bits. Table alignment must be
+ * big enough to fit it.
+ *
+ * Likewise, hugeapge pagetable pointers contain a (different)
+ * shift value in the low bits. All tables must be aligned so
+ * as to leave enough 0 bits in the address to contain it. */
+ unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
+ HUGEPD_SHIFT_MASK + 1);
+ struct kmem_cache *new;
+
+ /* It would be nice if this was a BUILD_BUG_ON(), but at the
+ * moment, gcc doesn't seem to recognize is_power_of_2 as a
+ * constant expression, so so much for that. */
+ BUG_ON(!is_power_of_2(minalign));
+ BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE));
+
+ if (PGT_CACHE(shift))
+ return; /* Already have a cache of this size */
+
+ align = max_t(unsigned long, align, minalign);
+ name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
+ new = kmem_cache_create(name, table_size, align, 0, ctor);
+ kfree(name);
+ pgtable_cache[shift - 1] = new;
+ pr_debug("Allocated pgtable cache for order %d\n", shift);
+}
+
+
+void pgtable_cache_init(void)
+{
+ pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
+
+ if (PMD_INDEX_SIZE && !PGT_CACHE(PMD_INDEX_SIZE))
+ pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor);
+ /*
+ * In all current configs, when the PUD index exists it's the
+ * same size as either the pgd or pmd index except with THP enabled
+ * on book3s 64
+ */
+ if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
+ pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor);
+
+ if (!PGT_CACHE(PGD_INDEX_SIZE))
+ panic("Couldn't allocate pgd cache");
+ if (PMD_INDEX_SIZE && !PGT_CACHE(PMD_INDEX_SIZE))
+ panic("Couldn't allocate pmd pgtable caches");
+ if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
+ panic("Couldn't allocate pud pgtable caches");
+}
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 16ada1eb7e26..a000c3585390 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -80,83 +80,6 @@ EXPORT_SYMBOL_GPL(memstart_addr);
phys_addr_t kernstart_addr;
EXPORT_SYMBOL_GPL(kernstart_addr);
-static void pgd_ctor(void *addr)
-{
- memset(addr, 0, PGD_TABLE_SIZE);
-}
-
-static void pud_ctor(void *addr)
-{
- memset(addr, 0, PUD_TABLE_SIZE);
-}
-
-static void pmd_ctor(void *addr)
-{
- memset(addr, 0, PMD_TABLE_SIZE);
-}
-
-struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE];
-
-/*
- * Create a kmem_cache() for pagetables. This is not used for PTE
- * pages - they're linked to struct page, come from the normal free
- * pages pool and have a different entry size (see real_pte_t) to
- * everything else. Caches created by this function are used for all
- * the higher level pagetables, and for hugepage pagetables.
- */
-void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
-{
- char *name;
- unsigned long table_size = sizeof(void *) << shift;
- unsigned long align = table_size;
-
- /* When batching pgtable pointers for RCU freeing, we store
- * the index size in the low bits. Table alignment must be
- * big enough to fit it.
- *
- * Likewise, hugeapge pagetable pointers contain a (different)
- * shift value in the low bits. All tables must be aligned so
- * as to leave enough 0 bits in the address to contain it. */
- unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
- HUGEPD_SHIFT_MASK + 1);
- struct kmem_cache *new;
-
- /* It would be nice if this was a BUILD_BUG_ON(), but at the
- * moment, gcc doesn't seem to recognize is_power_of_2 as a
- * constant expression, so so much for that. */
- BUG_ON(!is_power_of_2(minalign));
- BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE));
-
- if (PGT_CACHE(shift))
- return; /* Already have a cache of this size */
-
- align = max_t(unsigned long, align, minalign);
- name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
- new = kmem_cache_create(name, table_size, align, 0, ctor);
- kfree(name);
- pgtable_cache[shift - 1] = new;
- pr_debug("Allocated pgtable cache for order %d\n", shift);
-}
-
-
-void pgtable_cache_init(void)
-{
- pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
- pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor);
- /*
- * In all current configs, when the PUD index exists it's the
- * same size as either the pgd or pmd index except with THP enabled
- * on book3s 64
- */
- if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
- pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor);
-
- if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_CACHE_INDEX))
- panic("Couldn't allocate pgtable caches");
- if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
- panic("Couldn't allocate pud pgtable caches");
-}
-
#ifdef CONFIG_SPARSEMEM_VMEMMAP
/*
* Given an address within the vmemmap, determine the pfn of the page that
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 0ae0572bc239..a65c0b4c0669 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -42,43 +42,6 @@ EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */
extern char etext[], _stext[], _sinittext[], _einittext[];
-#define PGDIR_ORDER (32 + PGD_T_LOG2 - PGDIR_SHIFT)
-
-#ifndef CONFIG_PPC_4K_PAGES
-static struct kmem_cache *pgtable_cache;
-
-void pgtable_cache_init(void)
-{
- pgtable_cache = kmem_cache_create("PGDIR cache", 1 << PGDIR_ORDER,
- 1 << PGDIR_ORDER, 0, NULL);
- if (pgtable_cache == NULL)
- panic("Couldn't allocate pgtable caches");
-}
-#endif
-
-pgd_t *pgd_alloc(struct mm_struct *mm)
-{
- pgd_t *ret;
-
- /* pgdir take page or two with 4K pages and a page fraction otherwise */
-#ifndef CONFIG_PPC_4K_PAGES
- ret = kmem_cache_alloc(pgtable_cache, GFP_KERNEL | __GFP_ZERO);
-#else
- ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
- PGDIR_ORDER - PAGE_SHIFT);
-#endif
- return ret;
-}
-
-void pgd_free(struct mm_struct *mm, pgd_t *pgd)
-{
-#ifndef CONFIG_PPC_4K_PAGES
- kmem_cache_free(pgtable_cache, (void *)pgd);
-#else
- free_pages((unsigned long)pgd, PGDIR_ORDER - PAGE_SHIFT);
-#endif
-}
-
__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
pte_t *pte;
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 050badc0ebd3..ba28fcb98597 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -53,7 +53,7 @@
* other sizes not listed here. The .ind field is only used on MMUs that have
* indirect page table entries.
*/
-#ifdef CONFIG_PPC_BOOK3E_MMU
+#if defined(CONFIG_PPC_BOOK3E_MMU) || defined(CONFIG_PPC_8xx)
#ifdef CONFIG_PPC_FSL_BOOK3E
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
[MMU_PAGE_4K] = {
@@ -85,6 +85,25 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
.enc = BOOK3E_PAGESZ_1GB,
},
};
+#elif defined(CONFIG_PPC_8xx)
+struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
+ /* we only manage 4k and 16k pages as normal pages */
+#ifdef CONFIG_PPC_4K_PAGES
+ [MMU_PAGE_4K] = {
+ .shift = 12,
+ },
+#else
+ [MMU_PAGE_16K] = {
+ .shift = 14,
+ },
+#endif
+ [MMU_PAGE_512K] = {
+ .shift = 19,
+ },
+ [MMU_PAGE_8M] = {
+ .shift = 23,
+ },
+};
#else
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
[MMU_PAGE_4K] = {
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
index 24717d060008..08f92f6ed228 100644
--- a/arch/powerpc/platforms/83xx/suspend.c
+++ b/arch/powerpc/platforms/83xx/suspend.c
@@ -441,8 +441,4 @@ static struct platform_driver pmc_driver = {
.remove = pmc_remove
};
-static int pmc_init(void)
-{
- return platform_driver_register(&pmc_driver);
-}
-device_initcall(pmc_init);
+builtin_platform_driver(pmc_driver);
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index 9dc1d28975b9..47b389dc4938 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -253,6 +253,8 @@ endif # PPC32
config PPC_QEMU_E500
bool "QEMU generic e500 platform"
select DEFAULT_UIMAGE
+ select E500
+ select PPC_E500MC if PPC64
help
This option enables support for running as a QEMU guest using
QEMU's generic e500 machine. This is not required if you're
diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig
index 564d99bb2a26..80cbcb0ad9b1 100644
--- a/arch/powerpc/platforms/8xx/Kconfig
+++ b/arch/powerpc/platforms/8xx/Kconfig
@@ -130,6 +130,7 @@ config 8xx_CPU6
config 8xx_CPU15
bool "CPU15 Silicon Errata"
+ depends on !HUGETLB_PAGE
default y
help
This enables a workaround for erratum CPU15 on MPC8xx chips.
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index ca2da30ad2ab..6e89e5a8d4fb 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -34,6 +34,7 @@ config PPC_8xx
select FSL_SOC
select 8xx
select PPC_LIB_RHEAP
+ select SYS_SUPPORTS_HUGETLBFS
config 40x
bool "AMCC 40x"
diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c
index 424b67fdb57f..5340a483cf55 100644
--- a/arch/powerpc/sysdev/fsl_lbc.c
+++ b/arch/powerpc/sysdev/fsl_lbc.c
@@ -31,7 +31,7 @@
#include <asm/prom.h>
#include <asm/fsl_lbc.h>
-static spinlock_t fsl_lbc_lock = __SPIN_LOCK_UNLOCKED(fsl_lbc_lock);
+static DEFINE_SPINLOCK(fsl_lbc_lock);
struct fsl_lbc_ctrl *fsl_lbc_ctrl_dev;
EXPORT_SYMBOL(fsl_lbc_ctrl_dev);
diff --git a/arch/powerpc/sysdev/fsl_pmc.c b/arch/powerpc/sysdev/fsl_pmc.c
index 1d6fd7c59fe9..232225e7f863 100644
--- a/arch/powerpc/sysdev/fsl_pmc.c
+++ b/arch/powerpc/sysdev/fsl_pmc.c
@@ -85,8 +85,4 @@ static struct platform_driver pmc_driver = {
.probe = pmc_probe,
};
-static int __init pmc_init(void)
-{
- return platform_driver_register(&pmc_driver);
-}
-device_initcall(pmc_init);
+builtin_platform_driver(pmc_driver);
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index d93056eedcb0..19101f9cfcfc 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -77,13 +77,10 @@ phys_addr_t get_immrbase(void)
EXPORT_SYMBOL(get_immrbase);
-static u32 sysfreq = -1;
-
u32 fsl_get_sys_freq(void)
{
+ static u32 sysfreq = -1;
struct device_node *soc;
- const u32 *prop;
- int size;
if (sysfreq != -1)
return sysfreq;
@@ -92,12 +89,9 @@ u32 fsl_get_sys_freq(void)
if (!soc)
return -1;
- prop = of_get_property(soc, "clock-frequency", &size);
- if (!prop || size != sizeof(*prop) || *prop == 0)
- prop = of_get_property(soc, "bus-frequency", &size);
-
- if (prop && size == sizeof(*prop))
- sysfreq = *prop;
+ of_property_read_u32(soc, "clock-frequency", &sysfreq);
+ if (sysfreq == -1 || !sysfreq)
+ of_property_read_u32(soc, "bus-frequency", &sysfreq);
of_node_put(soc);
return sysfreq;
@@ -106,23 +100,17 @@ EXPORT_SYMBOL(fsl_get_sys_freq);
#if defined(CONFIG_CPM2) || defined(CONFIG_QUICC_ENGINE) || defined(CONFIG_8xx)
-static u32 brgfreq = -1;
-
u32 get_brgfreq(void)
{
+ static u32 brgfreq = -1;
struct device_node *node;
- const unsigned int *prop;
- int size;
if (brgfreq != -1)
return brgfreq;
node = of_find_compatible_node(NULL, NULL, "fsl,cpm-brg");
if (node) {
- prop = of_get_property(node, "clock-frequency", &size);
- if (prop && size == 4)
- brgfreq = *prop;
-
+ of_property_read_u32(node, "clock-frequency", &brgfreq);
of_node_put(node);
return brgfreq;
}
@@ -135,15 +123,11 @@ u32 get_brgfreq(void)
node = of_find_node_by_type(NULL, "qe");
if (node) {
- prop = of_get_property(node, "brg-frequency", &size);
- if (prop && size == 4)
- brgfreq = *prop;
-
- if (brgfreq == -1 || brgfreq == 0) {
- prop = of_get_property(node, "bus-frequency", &size);
- if (prop && size == 4)
- brgfreq = *prop / 2;
- }
+ of_property_read_u32(node, "brg-frequency", &brgfreq);
+ if (brgfreq == -1 || !brgfreq)
+ if (!of_property_read_u32(node, "bus-frequency",
+ &brgfreq))
+ brgfreq /= 2;
of_node_put(node);
}
@@ -152,10 +136,9 @@ u32 get_brgfreq(void)
EXPORT_SYMBOL(get_brgfreq);
-static u32 fs_baudrate = -1;
-
u32 get_baudrate(void)
{
+ static u32 fs_baudrate = -1;
struct device_node *node;
if (fs_baudrate != -1)
@@ -163,12 +146,7 @@ u32 get_baudrate(void)
node = of_find_node_by_type(NULL, "serial");
if (node) {
- int size;
- const unsigned int *prop = of_get_property(node,
- "current-speed", &size);
-
- if (prop)
- fs_baudrate = *prop;
+ of_property_read_u32(node, "current-speed", &fs_baudrate);
of_node_put(node);
}