summaryrefslogtreecommitdiff
path: root/arch/sparc/include/asm/pgtable_32.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/include/asm/pgtable_32.h')
-rw-r--r--arch/sparc/include/asm/pgtable_32.h45
1 files changed, 19 insertions, 26 deletions
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index d4330e3c57a6..a9f802d1dd64 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -21,7 +21,7 @@
#define PGDIR_MASK (~(PGDIR_SIZE-1))
#define PGDIR_ALIGN(__addr) (((__addr) + ~PGDIR_MASK) & PGDIR_MASK)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm-generic/pgtable-nopud.h>
#include <linux/spinlock.h>
@@ -101,8 +101,6 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
srmmu_swap((unsigned long *)ptep, pte_val(pteval));
}
-#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
-
static inline int srmmu_device_memory(unsigned long x)
{
return ((x & 0xF0000000) != 0);
@@ -241,7 +239,7 @@ static inline pte_t pte_mkold(pte_t pte)
return __pte(pte_val(pte) & ~SRMMU_REF);
}
-static inline pte_t pte_mkwrite(pte_t pte)
+static inline pte_t pte_mkwrite_novma(pte_t pte)
{
return __pte(pte_val(pte) | SRMMU_WRITE);
}
@@ -256,7 +254,12 @@ static inline pte_t pte_mkyoung(pte_t pte)
return __pte(pte_val(pte) | SRMMU_REF);
}
-#define pfn_pte(pfn, prot) mk_pte(pfn_to_page(pfn), prot)
+#define PFN_PTE_SHIFT (PAGE_SHIFT - 4)
+
+static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
+{
+ return __pte((pfn << PFN_PTE_SHIFT) | pgprot_val(pgprot));
+}
static inline unsigned long pte_pfn(pte_t pte)
{
@@ -268,20 +271,11 @@ static inline unsigned long pte_pfn(pte_t pte)
*/
return ~0UL;
}
- return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4);
+ return (pte_val(pte) & SRMMU_PTE_PMASK) >> PFN_PTE_SHIFT;
}
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- */
-static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
-{
- return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot));
-}
-
static inline pte_t mk_pte_phys(unsigned long page, pgprot_t pgprot)
{
return __pte(((page) >> 4) | pgprot_val(pgprot));
@@ -318,6 +312,7 @@ void mmu_info(struct seq_file *m);
#define FAULT_CODE_USER 0x4
#define update_mmu_cache(vma, address, ptep) do { } while (0)
+#define update_mmu_cache_range(vmf, vma, address, ptep, nr) do { } while (0)
void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
unsigned long xva, unsigned int len);
@@ -353,7 +348,7 @@ static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-static inline int pte_swp_exclusive(pte_t pte)
+static inline bool pte_swp_exclusive(pte_t pte)
{
return pte_val(pte) & SRMMU_SWP_EXCLUSIVE;
}
@@ -400,12 +395,8 @@ __get_iospace (unsigned long addr)
#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
#define GET_PFN(pfn) (pfn & 0x0fffffffUL)
-int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
- unsigned long, pgprot_t);
-
-static inline int io_remap_pfn_range(struct vm_area_struct *vma,
- unsigned long from, unsigned long pfn,
- unsigned long size, pgprot_t prot)
+static inline unsigned long io_remap_pfn_range_pfn(unsigned long pfn,
+ unsigned long size)
{
unsigned long long offset, space, phys_base;
@@ -413,25 +404,27 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
space = GET_IOSPACE(pfn);
phys_base = offset | (space << 32ULL);
- return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
+ return phys_base >> PAGE_SHIFT;
}
-#define io_remap_pfn_range io_remap_pfn_range
+#define io_remap_pfn_range_pfn io_remap_pfn_range_pfn
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
({ \
int __changed = !pte_same(*(__ptep), __entry); \
if (__changed) { \
- set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
+ set_pte(__ptep, __entry); \
flush_tlb_page(__vma, __address); \
} \
__changed; \
})
-#endif /* !(__ASSEMBLY__) */
+#endif /* !(__ASSEMBLER__) */
#define VMALLOC_START _AC(0xfe600000,UL)
#define VMALLOC_END _AC(0xffc00000,UL)
+#define MODULES_VADDR VMALLOC_START
+#define MODULES_END VMALLOC_END
/* We provide our own get_unmapped_area to cope with VA holes for userland */
#define HAVE_ARCH_UNMAPPED_AREA