summaryrefslogtreecommitdiff
path: root/arch/sparc/include/asm/pgtable_64.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/include/asm/pgtable_64.h')
-rw-r--r--arch/sparc/include/asm/pgtable_64.h389
1 files changed, 215 insertions, 174 deletions
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 1393a8ac596b..615f460c50af 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -13,7 +13,7 @@
* the SpitFire page tables.
*/
-#include <asm-generic/5level-fixup.h>
+#include <asm-generic/pgtable-nop4d.h>
#include <linux/compiler.h>
#include <linux/const.h>
#include <asm/types.h>
@@ -79,13 +79,14 @@
#error PMD_SHIFT must equal HPAGE_SHIFT for transparent huge pages.
#endif
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
extern unsigned long VMALLOC_END;
#define vmemmap ((struct page *)VMEMMAP_BASE)
#include <linux/sched.h>
+#include <asm/tlbflush.h>
bool kern_addr_valid(unsigned long addr);
@@ -95,9 +96,6 @@ bool kern_addr_valid(unsigned long addr);
#define PTRS_PER_PUD (1UL << PUD_BITS)
#define PTRS_PER_PGD (1UL << PGDIR_BITS)
-/* Kernel has a separate 44bit address space. */
-#define FIRST_USER_ADDRESS 0UL
-
#define pmd_ERROR(e) \
pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n", \
__FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0))
@@ -108,7 +106,7 @@ bool kern_addr_valid(unsigned long addr);
pr_err("%s:%d: bad pgd %p(%016lx) seen at (%pS)\n", \
__FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0))
-#endif /* !(__ASSEMBLY__) */
+#endif /* !(__ASSEMBLER__) */
/* PTE bits which are the same in SUN4U and SUN4V format. */
#define _PAGE_VALID _AC(0x8000000000000000,UL) /* Valid TTE */
@@ -190,26 +188,10 @@ bool kern_addr_valid(unsigned long addr);
#define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U
#define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V
-/* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */
-#define __P000 __pgprot(0)
-#define __P001 __pgprot(0)
-#define __P010 __pgprot(0)
-#define __P011 __pgprot(0)
-#define __P100 __pgprot(0)
-#define __P101 __pgprot(0)
-#define __P110 __pgprot(0)
-#define __P111 __pgprot(0)
-
-#define __S000 __pgprot(0)
-#define __S001 __pgprot(0)
-#define __S010 __pgprot(0)
-#define __S011 __pgprot(0)
-#define __S100 __pgprot(0)
-#define __S101 __pgprot(0)
-#define __S110 __pgprot(0)
-#define __S111 __pgprot(0)
-
-#ifndef __ASSEMBLY__
+/* We borrow bit 20 to store the exclusive marker in swap PTEs. */
+#define _PAGE_SWP_EXCLUSIVE _AC(0x0000000000100000, UL)
+
+#ifndef __ASSEMBLER__
pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long);
@@ -231,36 +213,6 @@ extern unsigned long _PAGE_ALL_SZ_BITS;
extern struct page *mem_map_zero;
#define ZERO_PAGE(vaddr) (mem_map_zero)
-/* This macro must be updated when the size of struct page grows above 80
- * or reduces below 64.
- * The idea that compiler optimizes out switch() statement, and only
- * leaves clrx instructions
- */
-#define mm_zero_struct_page(pp) do { \
- unsigned long *_pp = (void *)(pp); \
- \
- /* Check that struct page is either 64, 72, or 80 bytes */ \
- BUILD_BUG_ON(sizeof(struct page) & 7); \
- BUILD_BUG_ON(sizeof(struct page) < 64); \
- BUILD_BUG_ON(sizeof(struct page) > 80); \
- \
- switch (sizeof(struct page)) { \
- case 80: \
- _pp[9] = 0; /* fallthrough */ \
- case 72: \
- _pp[8] = 0; /* fallthrough */ \
- default: \
- _pp[7] = 0; \
- _pp[6] = 0; \
- _pp[5] = 0; \
- _pp[4] = 0; \
- _pp[3] = 0; \
- _pp[2] = 0; \
- _pp[1] = 0; \
- _pp[0] = 0; \
- } \
-} while (0)
-
/* PFNs are real physical page numbers. However, mem_map only begins to record
* per-page information starting at pfn_base. This is to handle systems where
* the first physical page in the machine is at some huge physical address,
@@ -273,7 +225,6 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
BUILD_BUG_ON(_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL);
return __pte(paddr | pgprot_val(prot));
}
-#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
@@ -282,7 +233,6 @@ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
return __pmd(pte_val(pte));
}
-#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
#endif
/* This one can be done with two shifts. */
@@ -406,9 +356,44 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot)
*/
#define pgprot_noncached pgprot_noncached
+static inline unsigned long pte_dirty(pte_t pte)
+{
+ unsigned long mask;
+
+ __asm__ __volatile__(
+ "\n661: mov %1, %0\n"
+ " nop\n"
+ " .section .sun4v_2insn_patch, \"ax\"\n"
+ " .word 661b\n"
+ " sethi %%uhi(%2), %0\n"
+ " sllx %0, 32, %0\n"
+ " .previous\n"
+ : "=r" (mask)
+ : "i" (_PAGE_MODIFIED_4U), "i" (_PAGE_MODIFIED_4V));
+
+ return (pte_val(pte) & mask);
+}
+
+static inline unsigned long pte_write(pte_t pte)
+{
+ unsigned long mask;
+
+ __asm__ __volatile__(
+ "\n661: mov %1, %0\n"
+ " nop\n"
+ " .section .sun4v_2insn_patch, \"ax\"\n"
+ " .word 661b\n"
+ " sethi %%uhi(%2), %0\n"
+ " sllx %0, 32, %0\n"
+ " .previous\n"
+ : "=r" (mask)
+ : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
+
+ return (pte_val(pte) & mask);
+}
+
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
-extern pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
- struct page *page, int writable);
+pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags);
#define arch_make_huge_pte arch_make_huge_pte
static inline unsigned long __pte_default_huge_mask(void)
{
@@ -468,28 +453,43 @@ static inline bool is_hugetlb_pte(pte_t pte)
}
#endif
+static inline pte_t __pte_mkhwwrite(pte_t pte)
+{
+ unsigned long val = pte_val(pte);
+
+ /*
+ * Note: we only want to set the HW writable bit if the SW writable bit
+ * and the SW dirty bit are set.
+ */
+ __asm__ __volatile__(
+ "\n661: or %0, %2, %0\n"
+ " .section .sun4v_1insn_patch, \"ax\"\n"
+ " .word 661b\n"
+ " or %0, %3, %0\n"
+ " .previous\n"
+ : "=r" (val)
+ : "0" (val), "i" (_PAGE_W_4U), "i" (_PAGE_W_4V));
+
+ return __pte(val);
+}
+
static inline pte_t pte_mkdirty(pte_t pte)
{
- unsigned long val = pte_val(pte), tmp;
+ unsigned long val = pte_val(pte), mask;
__asm__ __volatile__(
- "\n661: or %0, %3, %0\n"
- " nop\n"
- "\n662: nop\n"
+ "\n661: mov %1, %0\n"
" nop\n"
" .section .sun4v_2insn_patch, \"ax\"\n"
" .word 661b\n"
- " sethi %%uhi(%4), %1\n"
- " sllx %1, 32, %1\n"
- " .word 662b\n"
- " or %1, %%lo(%4), %1\n"
- " or %0, %1, %0\n"
+ " sethi %%uhi(%2), %0\n"
+ " sllx %0, 32, %0\n"
" .previous\n"
- : "=r" (val), "=r" (tmp)
- : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
- "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));
+ : "=r" (mask)
+ : "i" (_PAGE_MODIFIED_4U), "i" (_PAGE_MODIFIED_4V));
- return __pte(val);
+ pte = __pte(val | mask);
+ return pte_write(pte) ? __pte_mkhwwrite(pte) : pte;
}
static inline pte_t pte_mkclean(pte_t pte)
@@ -516,7 +516,7 @@ static inline pte_t pte_mkclean(pte_t pte)
return __pte(val);
}
-static inline pte_t pte_mkwrite(pte_t pte)
+static inline pte_t pte_mkwrite_novma(pte_t pte)
{
unsigned long val = pte_val(pte), mask;
@@ -531,7 +531,8 @@ static inline pte_t pte_mkwrite(pte_t pte)
: "=r" (mask)
: "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
- return __pte(val | mask);
+ pte = __pte(val | mask);
+ return pte_dirty(pte) ? __pte_mkhwwrite(pte) : pte;
}
static inline pte_t pte_wrprotect(pte_t pte)
@@ -634,42 +635,6 @@ static inline unsigned long pte_young(pte_t pte)
return (pte_val(pte) & mask);
}
-static inline unsigned long pte_dirty(pte_t pte)
-{
- unsigned long mask;
-
- __asm__ __volatile__(
- "\n661: mov %1, %0\n"
- " nop\n"
- " .section .sun4v_2insn_patch, \"ax\"\n"
- " .word 661b\n"
- " sethi %%uhi(%2), %0\n"
- " sllx %0, 32, %0\n"
- " .previous\n"
- : "=r" (mask)
- : "i" (_PAGE_MODIFIED_4U), "i" (_PAGE_MODIFIED_4V));
-
- return (pte_val(pte) & mask);
-}
-
-static inline unsigned long pte_write(pte_t pte)
-{
- unsigned long mask;
-
- __asm__ __volatile__(
- "\n661: mov %1, %0\n"
- " nop\n"
- " .section .sun4v_2insn_patch, \"ax\"\n"
- " .word 661b\n"
- " sethi %%uhi(%2), %0\n"
- " sllx %0, 32, %0\n"
- " .previous\n"
- : "=r" (mask)
- : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
-
- return (pte_val(pte) & mask);
-}
-
static inline unsigned long pte_exec(pte_t pte)
{
unsigned long mask;
@@ -713,7 +678,8 @@ static inline unsigned long pte_special(pte_t pte)
return pte_val(pte) & _PAGE_SPECIAL;
}
-static inline unsigned long pmd_large(pmd_t pmd)
+#define pmd_leaf pmd_leaf
+static inline bool pmd_leaf(pmd_t pmd)
{
pte_t pte = __pte(pmd_val(pmd));
@@ -738,6 +704,7 @@ static inline unsigned long pmd_write(pmd_t pmd)
#define pud_write(pud) pte_write(__pte(pud_val(pud)))
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define pmd_dirty pmd_dirty
static inline unsigned long pmd_dirty(pmd_t pmd)
{
pte_t pte = __pte(pmd_val(pmd));
@@ -745,6 +712,7 @@ static inline unsigned long pmd_dirty(pmd_t pmd)
return pte_dirty(pte);
}
+#define pmd_young pmd_young
static inline unsigned long pmd_young(pmd_t pmd)
{
pte_t pte = __pte(pmd_val(pmd));
@@ -804,15 +772,16 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd)
return __pmd(pte_val(pte));
}
-static inline pmd_t pmd_mkwrite(pmd_t pmd)
+static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
{
pte_t pte = __pte(pmd_val(pmd));
- pte = pte_mkwrite(pte);
+ pte = pte_mkwrite_novma(pte);
return __pmd(pte_val(pte));
}
+#define pmd_pgprot pmd_pgprot
static inline pgprot_t pmd_pgprot(pmd_t entry)
{
unsigned long val = pmd_val(entry);
@@ -840,9 +809,9 @@ static inline int pmd_present(pmd_t pmd)
#define pud_bad(pud) (pud_val(pud) & ~PAGE_MASK)
-#define pgd_none(pgd) (!pgd_val(pgd))
+#define p4d_none(p4d) (!p4d_val(p4d))
-#define pgd_bad(pgd) (pgd_val(pgd) & ~PAGE_MASK)
+#define p4d_bad(p4d) (p4d_val(p4d) & ~PAGE_MASK)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
@@ -864,7 +833,7 @@ static inline void pmd_set(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
#define pud_set(pudp, pmdp) \
(pud_val(*(pudp)) = (__pa((unsigned long) (pmdp))))
-static inline unsigned long __pmd_page(pmd_t pmd)
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
{
pte_t pte = __pte(pmd_val(pmd));
unsigned long pfn;
@@ -874,33 +843,38 @@ static inline unsigned long __pmd_page(pmd_t pmd)
return ((unsigned long) __va(pfn << PAGE_SHIFT));
}
-static inline unsigned long pud_page_vaddr(pud_t pud)
+static inline pmd_t *pud_pgtable(pud_t pud)
{
pte_t pte = __pte(pud_val(pud));
unsigned long pfn;
pfn = pte_pfn(pte);
- return ((unsigned long) __va(pfn << PAGE_SHIFT));
+ return ((pmd_t *) __va(pfn << PAGE_SHIFT));
}
-#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
-#define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud))
+#define pmd_page(pmd) virt_to_page((void *)pmd_page_vaddr(pmd))
+#define pud_page(pud) virt_to_page((void *)pud_pgtable(pud))
#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
#define pud_present(pud) (pud_val(pud) != 0U)
#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
-#define pgd_page_vaddr(pgd) \
- ((unsigned long) __va(pgd_val(pgd)))
-#define pgd_present(pgd) (pgd_val(pgd) != 0U)
-#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0UL)
+#define p4d_pgtable(p4d) \
+ ((pud_t *) __va(p4d_val(p4d)))
+#define p4d_present(p4d) (p4d_val(p4d) != 0U)
+#define p4d_clear(p4dp) (p4d_val(*(p4dp)) = 0UL)
+
+/* only used by the stubbed out hugetlb gup code, should never be called */
+#define p4d_page(p4d) NULL
-static inline unsigned long pud_large(pud_t pud)
+#define pud_leaf pud_leaf
+static inline bool pud_leaf(pud_t pud)
{
pte_t pte = __pte(pud_val(pud));
return pte_val(pte) & _PAGE_PMD_HUGE;
}
+#define pud_pfn pud_pfn
static inline unsigned long pud_pfn(pud_t pud)
{
pte_t pte = __pte(pud_val(pud));
@@ -911,33 +885,8 @@ static inline unsigned long pud_pfn(pud_t pud)
/* Same in both SUN4V and SUN4U. */
#define pte_none(pte) (!pte_val(pte))
-#define pgd_set(pgdp, pudp) \
- (pgd_val(*(pgdp)) = (__pa((unsigned long) (pudp))))
-
-/* to find an entry in a page-table-directory. */
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-/* Find an entry in the third-level page table.. */
-#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
-#define pud_offset(pgdp, address) \
- ((pud_t *) pgd_page_vaddr(*(pgdp)) + pud_index(address))
-
-/* Find an entry in the second-level page table.. */
-#define pmd_offset(pudp, address) \
- ((pmd_t *) pud_page_vaddr(*(pudp)) + \
- (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)))
-
-/* Find an entry in the third-level page table.. */
-#define pte_index(dir, address) \
- ((pte_t *) __pmd_page(*(dir)) + \
- ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
-#define pte_offset_kernel pte_index
-#define pte_offset_map pte_index
-#define pte_unmap(pte) do { } while (0)
+#define p4d_set(p4dp, pudp) \
+ (p4d_val(*(p4dp)) = (__pa((unsigned long) (pudp))))
/* We cannot include <linux/mm_types.h> at this point yet: */
extern struct mm_struct init_mm;
@@ -980,8 +929,21 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm, PAGE_SHIFT);
}
-#define set_pte_at(mm,addr,ptep,pte) \
- __set_pte_at((mm), (addr), (ptep), (pte), 0)
+#define PFN_PTE_SHIFT PAGE_SHIFT
+
+static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, unsigned int nr)
+{
+ for (;;) {
+ __set_pte_at(mm, addr, ptep, pte, 0);
+ if (--nr == 0)
+ break;
+ ptep++;
+ pte_val(pte) += PAGE_SIZE;
+ addr += PAGE_SIZE;
+ }
+}
+#define set_ptes set_ptes
#define pte_clear(mm,addr,ptep) \
set_pte_at((mm), (addr), (ptep), __pte(0UL))
@@ -992,7 +954,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
#ifdef DCACHE_ALIASING_POSSIBLE
#define __HAVE_ARCH_MOVE_PTE
-#define move_pte(pte, prot, old_addr, new_addr) \
+#define move_pte(pte, old_addr, new_addr) \
({ \
pte_t newpte = (pte); \
if (tlb_type != hypervisor && pte_present(pte)) { \
@@ -1000,8 +962,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
\
if (pfn_valid(this_pfn) && \
(((old_addr) ^ (new_addr)) & (1 << 13))) \
- flush_dcache_page_all(current->mm, \
- pfn_to_page(this_pfn)); \
+ flush_dcache_folio_all(current->mm, \
+ page_folio(pfn_to_page(this_pfn))); \
} \
newpte; \
})
@@ -1016,7 +978,10 @@ struct seq_file;
void mmu_info(struct seq_file *);
struct vm_area_struct;
-void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
+void update_mmu_cache_range(struct vm_fault *, struct vm_area_struct *,
+ unsigned long addr, pte_t *ptep, unsigned int nr);
+#define update_mmu_cache(vma, addr, ptep) \
+ update_mmu_cache_range(NULL, vma, addr, ptep, 1)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd);
@@ -1033,18 +998,46 @@ void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
#endif
-/* Encode and de-code a swap entry */
-#define __swp_type(entry) (((entry).val >> PAGE_SHIFT) & 0xffUL)
+/*
+ * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
+ * are !pte_none() && !pte_present().
+ *
+ * Format of swap PTEs:
+ *
+ * 6 6 6 6 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3
+ * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
+ * <--------------------------- offset ---------------------------
+ *
+ * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * --------------------> E <-- type ---> <------- zeroes -------->
+ */
+#define __swp_type(entry) (((entry).val >> PAGE_SHIFT) & 0x7fUL)
#define __swp_offset(entry) ((entry).val >> (PAGE_SHIFT + 8UL))
#define __swp_entry(type, offset) \
( (swp_entry_t) \
{ \
- (((long)(type) << PAGE_SHIFT) | \
+ ((((long)(type) & 0x7fUL) << PAGE_SHIFT) | \
((long)(offset) << (PAGE_SHIFT + 8UL))) \
} )
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+static inline bool pte_swp_exclusive(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
+}
+
+static inline pte_t pte_swp_mkexclusive(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_SWP_EXCLUSIVE);
+}
+
+static inline pte_t pte_swp_clear_exclusive(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE);
+}
+
int page_in_phys_avail(unsigned long paddr);
/*
@@ -1055,9 +1048,6 @@ int page_in_phys_avail(unsigned long paddr);
#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
#define GET_PFN(pfn) (pfn & 0x0fffffffffffffffUL)
-int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
- unsigned long, pgprot_t);
-
void adi_restore_tags(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pte_t pte);
@@ -1091,9 +1081,8 @@ static inline int arch_unmap_one(struct mm_struct *mm,
return 0;
}
-static inline int io_remap_pfn_range(struct vm_area_struct *vma,
- unsigned long from, unsigned long pfn,
- unsigned long size, pgprot_t prot)
+static inline unsigned long io_remap_pfn_range_pfn(unsigned long pfn,
+ unsigned long size)
{
unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
int space = GET_IOSPACE(pfn);
@@ -1101,12 +1090,50 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
phys_base = offset | (((unsigned long) space) << 32UL);
- return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
+ return phys_base >> PAGE_SHIFT;
}
-#define io_remap_pfn_range io_remap_pfn_range
+#define io_remap_pfn_range_pfn io_remap_pfn_range_pfn
-#include <asm/tlbflush.h>
-#include <asm-generic/pgtable.h>
+static inline unsigned long __untagged_addr(unsigned long start)
+{
+ if (adi_capable()) {
+ long addr = start;
+
+ /* If userspace has passed a versioned address, kernel
+ * will not find it in the VMAs since it does not store
+ * the version tags in the list of VMAs. Storing version
+ * tags in list of VMAs is impractical since they can be
+ * changed any time from userspace without dropping into
+ * kernel. Any address search in VMAs will be done with
+ * non-versioned addresses. Ensure the ADI version bits
+ * are dropped here by sign extending the last bit before
+ * ADI bits. IOMMU does not implement version tags.
+ */
+ return (addr << (long)adi_nbits()) >> (long)adi_nbits();
+ }
+
+ return start;
+}
+#define untagged_addr(addr) \
+ ((__typeof__(addr))(__untagged_addr((unsigned long)(addr))))
+
+static inline bool pte_access_permitted(pte_t pte, bool write)
+{
+ u64 prot;
+
+ if (tlb_type == hypervisor) {
+ prot = _PAGE_PRESENT_4V | _PAGE_P_4V;
+ if (write)
+ prot |= _PAGE_WRITE_4V;
+ } else {
+ prot = _PAGE_PRESENT_4U | _PAGE_P_4U;
+ if (write)
+ prot |= _PAGE_WRITE_4U;
+ }
+
+ return (pte_val(pte) & (prot | _PAGE_SPECIAL)) == prot;
+}
+#define pte_access_permitted pte_access_permitted
/* We provide our own get_unmapped_area to cope with VA holes and
* SHM area cache aliasing for userland.
@@ -1122,7 +1149,6 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
unsigned long);
#define HAVE_ARCH_FB_UNMAPPED_AREA
-void pgtable_cache_init(void);
void sun4v_register_fault_status(void);
void sun4v_ktsb_register(void);
void __init cheetah_ecache_flush_init(void);
@@ -1132,6 +1158,21 @@ extern unsigned long cmdline_memory_size;
asmlinkage void do_sparc64_fault(struct pt_regs *regs);
-#endif /* !(__ASSEMBLY__) */
+#define pmd_pgtable(PMD) ((pte_t *)pmd_page_vaddr(PMD))
+
+#ifdef CONFIG_HUGETLB_PAGE
+
+#define pud_leaf_size pud_leaf_size
+extern unsigned long pud_leaf_size(pud_t pud);
+
+#define pmd_leaf_size pmd_leaf_size
+extern unsigned long pmd_leaf_size(pmd_t pmd);
+
+#define pte_leaf_size pte_leaf_size
+extern unsigned long pte_leaf_size(pte_t pte);
+
+#endif /* CONFIG_HUGETLB_PAGE */
+
+#endif /* !(__ASSEMBLER__) */
#endif /* !(_SPARC64_PGTABLE_H) */