summaryrefslogtreecommitdiff
path: root/arch/alpha/include/asm/pgtable.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/alpha/include/asm/pgtable.h')
-rw-r--r--arch/alpha/include/asm/pgtable.h48
1 files changed, 12 insertions, 36 deletions
diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
index ba43cb841d19..90e7a9539102 100644
--- a/arch/alpha/include/asm/pgtable.h
+++ b/arch/alpha/include/asm/pgtable.h
@@ -26,7 +26,6 @@ struct vm_area_struct;
* hook is made available.
*/
#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
-#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
/* PMD_SHIFT determines the size of the area a second-level page table can map */
#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
@@ -108,7 +107,7 @@ struct vm_area_struct;
#define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
-#define _PAGE_P(x) _PAGE_NORMAL((x) | (((x) & _PAGE_FOW)?0:_PAGE_FOW))
+#define _PAGE_P(x) _PAGE_NORMAL((x) | _PAGE_FOW)
#define _PAGE_S(x) _PAGE_NORMAL(x)
/*
@@ -127,34 +126,11 @@ struct vm_area_struct;
#define pgprot_noncached(prot) (prot)
/*
- * BAD_PAGETABLE is used when we need a bogus page-table, while
- * BAD_PAGE is used for a bogus page.
- *
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
-extern pte_t __bad_page(void);
-extern pmd_t * __bad_pagetable(void);
-
-extern unsigned long __zero_page(void);
-
-#define BAD_PAGETABLE __bad_pagetable()
-#define BAD_PAGE __bad_page()
#define ZERO_PAGE(vaddr) (virt_to_page(ZERO_PGE))
-/* number of bits that fit into a memory pointer */
-#define BITS_PER_PTR (8*sizeof(unsigned long))
-
-/* to align the pointer to a pointer address */
-#define PTR_MASK (~(sizeof(void*)-1))
-
-/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
-#define SIZEOF_PTR_LOG2 3
-
-/* to find an entry in a page-table */
-#define PAGE_PTR(address) \
- ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
-
/*
* On certain platforms whose physical address space can overlap KSEG,
* namely EV6 and above, we must re-twiddle the physaddr to restore the
@@ -189,16 +165,10 @@ extern unsigned long __zero_page(void);
* and a page entry and page directory to the page they refer to.
*/
#define page_to_pa(page) (page_to_pfn(page) << PAGE_SHIFT)
-#define pte_pfn(pte) (pte_val(pte) >> 32)
+#define PFN_PTE_SHIFT 32
+#define pte_pfn(pte) (pte_val(pte) >> PFN_PTE_SHIFT)
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
-#define mk_pte(page, pgprot) \
-({ \
- pte_t pte; \
- \
- pte_val(pte) = (page_to_pfn(page) << 32) | pgprot_val(pgprot); \
- pte; \
-})
extern inline pte_t pfn_pte(unsigned long physpfn, pgprot_t pgprot)
{ pte_t pte; pte_val(pte) = (PHYS_TWIDDLE(physpfn) << 32) | pgprot_val(pgprot); return pte; }
@@ -256,7 +226,7 @@ extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;
extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOW; return pte; }
extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~(__DIRTY_BITS); return pte; }
extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~(__ACCESS_BITS); return pte; }
-extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_FOW; return pte; }
+extern inline pte_t pte_mkwrite_novma(pte_t pte){ pte_val(pte) &= ~_PAGE_FOW; return pte; }
extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= __DIRTY_BITS; return pte; }
extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; return pte; }
@@ -303,6 +273,12 @@ extern inline void update_mmu_cache(struct vm_area_struct * vma,
{
}
+static inline void update_mmu_cache_range(struct vm_fault *vmf,
+ struct vm_area_struct *vma, unsigned long address,
+ pte_t *ptep, unsigned int nr)
+{
+}
+
/*
* Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
* are !pte_none() && !pte_present().
@@ -328,7 +304,7 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-static inline int pte_swp_exclusive(pte_t pte)
+static inline bool pte_swp_exclusive(pte_t pte)
{
return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
}
@@ -354,7 +330,7 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
extern void paging_init(void);
-/* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT. */
+/* We have our own get_unmapped_area */
#define HAVE_ARCH_UNMAPPED_AREA
#endif /* _ALPHA_PGTABLE_H */