summaryrefslogtreecommitdiff
path: root/arch/mips/include/asm/pgtable-32.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/include/asm/pgtable-32.h')
-rw-r--r--arch/mips/include/asm/pgtable-32.h148
1 files changed, 93 insertions, 55 deletions
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index 1945c8970141..0e196650f4f4 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -62,9 +62,9 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
/* PGDIR_SHIFT determines what a third-level page table entry can map */
#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT)
-# define PGDIR_SHIFT (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2 - 1)
+# define PGDIR_SHIFT (2 * PAGE_SHIFT - PTE_T_LOG2 - 1)
#else
-# define PGDIR_SHIFT (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2)
+# define PGDIR_SHIFT (2 * PAGE_SHIFT - PTE_T_LOG2)
#endif
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
@@ -75,25 +75,23 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
* we don't really have any PUD/PMD directory physically.
*/
#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT)
-# define __PGD_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2 + 1)
+# define __PGD_TABLE_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2 + 1)
#else
-# define __PGD_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2)
+# define __PGD_TABLE_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2)
#endif
-#define PGD_ORDER (__PGD_ORDER >= 0 ? __PGD_ORDER : 0)
-#define PUD_ORDER aieeee_attempt_to_allocate_pud
-#define PMD_ORDER aieeee_attempt_to_allocate_pmd
-#define PTE_ORDER 0
+#define PGD_TABLE_ORDER (__PGD_TABLE_ORDER >= 0 ? __PGD_TABLE_ORDER : 0)
+#define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud
+#define PMD_TABLE_ORDER aieeee_attempt_to_allocate_pmd
#define PTRS_PER_PGD (USER_PTRS_PER_PGD * 2)
#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT)
-# define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t) / 2)
+# define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t) / 2)
#else
-# define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
+# define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
#endif
#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0UL
#define VMALLOC_START MAP_BASE
@@ -154,7 +152,8 @@ static inline void pmd_clear(pmd_t *pmdp)
#if defined(CONFIG_XPA)
-#define pte_pfn(x) (((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT))
+#define MAX_POSSIBLE_PHYSMEM_BITS 40
+#define pte_pfn(x) (((unsigned long)((x).pte_high >> PFN_PTE_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT))
static inline pte_t
pfn_pte(unsigned long pfn, pgprot_t prot)
{
@@ -162,13 +161,14 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
pte.pte_low = (pfn >> _PAGE_PRESENT_SHIFT) |
(pgprot_val(prot) & ~_PFNX_MASK);
- pte.pte_high = (pfn << _PFN_SHIFT) |
+ pte.pte_high = (pfn << PFN_PTE_SHIFT) |
(pgprot_val(prot) & ~_PFN_MASK);
return pte;
}
#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+#define MAX_POSSIBLE_PHYSMEM_BITS 36
#define pte_pfn(x) ((unsigned long)((x).pte_high >> 6))
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
@@ -183,83 +183,121 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
#else
-#ifdef CONFIG_CPU_VR41XX
-#define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
-#define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
-#else
-#define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT))
-#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot))
-#define pfn_pmd(pfn, prot) __pmd(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot))
-#endif
+#define MAX_POSSIBLE_PHYSMEM_BITS 32
+#define pte_pfn(x) ((unsigned long)((x).pte >> PFN_PTE_SHIFT))
+#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
+#define pfn_pmd(pfn, prot) __pmd(((unsigned long long)(pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */
#define pte_page(x) pfn_to_page(pte_pfn(x))
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
-#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-
-/* to find an entry in a page-table-directory */
-#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
-
-/* Find an entry in the third-level page table.. */
-#define __pte_offset(address) \
- (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset(dir, address) \
- ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
-#define pte_offset_kernel(dir, address) \
- ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
-
-#define pte_offset_map(dir, address) \
- ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
-#define pte_unmap(pte) ((void)(pte))
-
+/*
+ * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
+ * are !pte_none() && !pte_present().
+ */
#if defined(CONFIG_CPU_R3K_TLB)
-/* Swap entries must have VALID bit cleared. */
+/*
+ * Format of swap PTEs:
+ *
+ * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * <----------- offset ------------> < type -> V G E 0 0 0 0 0 0 P
+ *
+ * E is the exclusive marker that is not stored in swap entries.
+ * _PAGE_PRESENT (P), _PAGE_VALID (V) and_PAGE_GLOBAL (G) have to remain
+ * unused.
+ */
#define __swp_type(x) (((x).val >> 10) & 0x1f)
#define __swp_offset(x) ((x).val >> 15)
-#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 10) | ((offset) << 15) })
+#define __swp_entry(type, offset) ((swp_entry_t) { (((type) & 0x1f) << 10) | ((offset) << 15) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+/* We borrow bit 7 to store the exclusive marker in swap PTEs. */
+#define _PAGE_SWP_EXCLUSIVE (1 << 7)
+
#else
#if defined(CONFIG_XPA)
-/* Swap entries must have VALID and GLOBAL bits cleared. */
+/*
+ * Format of swap PTEs:
+ *
+ * 6 6 6 6 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3
+ * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
+ * 0 0 0 0 0 0 E P <------------------ zeroes ------------------->
+ *
+ * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * <----------------- offset ------------------> < type -> V G 0 0
+ *
+ * E is the exclusive marker that is not stored in swap entries.
+ * _PAGE_PRESENT (P), _PAGE_VALID (V) and_PAGE_GLOBAL (G) have to remain
+ * unused.
+ */
#define __swp_type(x) (((x).val >> 4) & 0x1f)
#define __swp_offset(x) ((x).val >> 9)
-#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 4) | ((offset) << 9) })
+#define __swp_entry(type, offset) ((swp_entry_t) { (((type) & 0x1f) << 4) | ((offset) << 9) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
#define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val })
+/*
+ * We borrow bit 57 (bit 25 in the low PTE) to store the exclusive marker in
+ * swap PTEs.
+ */
+#define _PAGE_SWP_EXCLUSIVE (1 << 25)
+
#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
-/* Swap entries must have VALID and GLOBAL bits cleared. */
+/*
+ * Format of swap PTEs:
+ *
+ * 6 6 6 6 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3
+ * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
+ * <------------------ zeroes -------------------> E P 0 0 0 0 0 0
+ *
+ * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * <------------------- offset --------------------> < type -> V G
+ *
+ * E is the exclusive marker that is not stored in swap entries.
+ * _PAGE_PRESENT (P), _PAGE_VALID (V) and_PAGE_GLOBAL (G) have to remain
+ * unused.
+ */
#define __swp_type(x) (((x).val >> 2) & 0x1f)
#define __swp_offset(x) ((x).val >> 7)
-#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 7) })
+#define __swp_entry(type, offset) ((swp_entry_t) { (((type) & 0x1f) << 2) | ((offset) << 7) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
#define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val })
+/*
+ * We borrow bit 39 (bit 7 in the low PTE) to store the exclusive marker in swap
+ * PTEs.
+ */
+#define _PAGE_SWP_EXCLUSIVE (1 << 7)
+
#else
/*
- * Constraints:
- * _PAGE_PRESENT at bit 0
- * _PAGE_MODIFIED at bit 4
- * _PAGE_GLOBAL at bit 6
- * _PAGE_VALID at bit 7
+ * Format of swap PTEs:
+ *
+ * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * <------------- offset --------------> < type -> 0 0 0 0 0 0 E P
+ *
+ * E is the exclusive marker that is not stored in swap entries.
+ * _PAGE_PRESENT (P), _PAGE_VALID (V) and_PAGE_GLOBAL (G) have to remain
+ * unused. The location of V and G varies.
*/
#define __swp_type(x) (((x).val >> 8) & 0x1f)
#define __swp_offset(x) ((x).val >> 13)
-#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 8) | ((offset) << 13) })
+#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 8) | ((offset) << 13) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+/* We borrow bit 1 to store the exclusive marker in swap PTEs. */
+#define _PAGE_SWP_EXCLUSIVE (1 << 1)
+
#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */
#endif /* defined(CONFIG_CPU_R3K_TLB) */