summaryrefslogtreecommitdiff
path: root/arch/sh/include/asm/pgtable_32.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/include/asm/pgtable_32.h')
-rw-r--r--arch/sh/include/asm/pgtable_32.h105
1 files changed, 55 insertions, 50 deletions
diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h
index 29274f0e428e..71b18741cc11 100644
--- a/arch/sh/include/asm/pgtable_32.h
+++ b/arch/sh/include/asm/pgtable_32.h
@@ -170,7 +170,7 @@ static inline unsigned long copy_ptea_attributes(unsigned long x)
(PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | \
_PAGE_DIRTY | _PAGE_SPECIAL)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#if defined(CONFIG_X2TLB) /* SH-X2 TLB */
#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE | \
@@ -287,9 +287,9 @@ static inline unsigned long copy_ptea_attributes(unsigned long x)
__pgprot(0)
#endif
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/*
* Certain architectures need to do special things when PTEs
@@ -307,14 +307,13 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
#endif
-#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
-
/*
* (pmds are folded into pgds so this doesn't get actually called,
* but the define is needed for a generic inline function.)
*/
#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
+#define PFN_PTE_SHIFT PAGE_SHIFT
#define pfn_pte(pfn, prot) \
__pte(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define pfn_pmd(pfn, prot) \
@@ -323,7 +322,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
#define pte_none(x) (!pte_val(x))
#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
-#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
+#define pte_clear(mm, addr, ptep) set_pte(ptep, __pte(0))
#define pmd_none(x) (!pmd_val(x))
#define pmd_present(x) (pmd_val(x))
@@ -359,11 +358,11 @@ static inline pte_t pte_##fn(pte_t pte) { pte.pte_##h op; return pte; }
* kernel permissions), we attempt to couple them a bit more sanely here.
*/
PTE_BIT_FUNC(high, wrprotect, &= ~(_PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE));
-PTE_BIT_FUNC(high, mkwrite, |= _PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE);
+PTE_BIT_FUNC(high, mkwrite_novma, |= _PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE);
PTE_BIT_FUNC(high, mkhuge, |= _PAGE_SZHUGE);
#else
PTE_BIT_FUNC(low, wrprotect, &= ~_PAGE_RW);
-PTE_BIT_FUNC(low, mkwrite, |= _PAGE_RW);
+PTE_BIT_FUNC(low, mkwrite_novma, |= _PAGE_RW);
PTE_BIT_FUNC(low, mkhuge, |= _PAGE_SZHUGE);
#endif
@@ -381,14 +380,6 @@ PTE_BIT_FUNC(low, mkspecial, |= _PAGE_SPECIAL);
#define pgprot_noncached pgprot_writecombine
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- *
- * extern pte_t mk_pte(struct page *page, pgprot_t pgprot)
- */
-#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
-
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pte.pte_low &= _PAGE_CHG_MASK;
@@ -401,28 +392,13 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
return pte;
}
-#define pmd_page_vaddr(pmd) ((unsigned long)pmd_val(pmd))
-#define pmd_page(pmd) (virt_to_page(pmd_val(pmd)))
-
-/* to find an entry in a page-table-directory. */
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-#define __pgd_offset(address) pgd_index(address)
-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
-#define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-
-/* Find an entry in the third-level page table.. */
-#define pte_index(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define __pte_offset(address) pte_index(address)
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ return (unsigned long)pmd_val(pmd);
+}
-#define pte_offset_kernel(dir, address) \
- ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
-#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
-#define pte_unmap(pte) do { } while (0)
+#define pmd_pfn(pmd) (__pa(pmd_val(pmd)) >> PAGE_SHIFT)
+#define pmd_page(pmd) (virt_to_page(pmd_val(pmd)))
#ifdef CONFIG_X2TLB
#define pte_ERROR(e) \
@@ -438,40 +414,69 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#endif
/*
- * Encode and de-code a swap entry
+ * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
+ * are !pte_none() && !pte_present().
*
* Constraints:
* _PAGE_PRESENT at bit 8
* _PAGE_PROTNONE at bit 9
*
- * For the normal case, we encode the swap type into bits 0:7 and the
- * swap offset into bits 10:30. For the 64-bit PTE case, we keep the
- * preserved bits in the low 32-bits and use the upper 32 as the swap
- * offset (along with a 5-bit type), following the same approach as x86
- * PAE. This keeps the logic quite simple.
+ * For the normal case, we encode the swap type and offset into the swap PTE
+ * such that bits 8 and 9 stay zero. For the 64-bit PTE case, we use the
+ * upper 32 for the swap offset and swap type, following the same approach as
+ * x86 PAE. This keeps the logic quite simple.
*
* As is evident by the Alpha code, if we ever get a 64-bit unsigned
* long (swp_entry_t) to match up with the 64-bit PTEs, this all becomes
* much cleaner..
- *
- * NOTE: We should set ZEROs at the position of _PAGE_PRESENT
- * and _PAGE_PROTNONE bits
*/
+
#ifdef CONFIG_X2TLB
+/*
+ * Format of swap PTEs:
+ *
+ * 6 6 6 6 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3
+ * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
+ * <--------------------- offset ----------------------> < type ->
+ *
+ * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * <------------------- zeroes --------------------> E 0 0 0 0 0 0
+ */
#define __swp_type(x) ((x).val & 0x1f)
#define __swp_offset(x) ((x).val >> 5)
-#define __swp_entry(type, offset) ((swp_entry_t){ (type) | (offset) << 5})
+#define __swp_entry(type, offset) ((swp_entry_t){ ((type) & 0x1f) | (offset) << 5})
#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
#define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val })
#else
-#define __swp_type(x) ((x).val & 0xff)
+/*
+ * Format of swap PTEs:
+ *
+ * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * <--------------- offset ----------------> 0 0 0 0 E < type -> 0
+ *
+ * E is the exclusive marker that is not stored in swap entries.
+ */
+#define __swp_type(x) ((x).val & 0x1f)
#define __swp_offset(x) ((x).val >> 10)
-#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) <<10})
+#define __swp_entry(type, offset) ((swp_entry_t){((type) & 0x1f) | (offset) << 10})
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 1 })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 1 })
#endif
-#endif /* __ASSEMBLY__ */
+/* In both cases, we borrow bit 6 to store the exclusive marker in swap PTEs. */
+#define _PAGE_SWP_EXCLUSIVE _PAGE_USER
+
+static inline int pte_swp_exclusive(pte_t pte)
+{
+ return pte.pte_low & _PAGE_SWP_EXCLUSIVE;
+}
+
+PTE_BIT_FUNC(low, swp_mkexclusive, |= _PAGE_SWP_EXCLUSIVE);
+PTE_BIT_FUNC(low, swp_clear_exclusive, &= ~_PAGE_SWP_EXCLUSIVE);
+
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_SH_PGTABLE_32_H */