diff options
Diffstat (limited to 'arch/mips/include/asm/pgtable.h')
| -rw-r--r-- | arch/mips/include/asm/pgtable.h | 220 |
1 files changed, 117 insertions, 103 deletions
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index dd7a0f552cac..9c06a612d33a 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -25,20 +25,12 @@ struct mm_struct; struct vm_area_struct; -#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_NO_READ | \ - _page_cachable_default) -#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \ - _page_cachable_default) -#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC | \ - _page_cachable_default) -#define PAGE_READONLY __pgprot(_PAGE_PRESENT | \ - _page_cachable_default) +#define PAGE_SHARED vm_get_page_prot(VM_READ|VM_WRITE|VM_SHARED) + #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ _PAGE_GLOBAL | _page_cachable_default) #define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT) -#define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \ - _page_cachable_default) #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \ __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED) @@ -49,29 +41,8 @@ struct vm_area_struct; * by reasonable means.. */ -/* - * Dummy values to fill the table in mmap.c - * The real values will be generated at runtime - */ -#define __P000 __pgprot(0) -#define __P001 __pgprot(0) -#define __P010 __pgprot(0) -#define __P011 __pgprot(0) -#define __P100 __pgprot(0) -#define __P101 __pgprot(0) -#define __P110 __pgprot(0) -#define __P111 __pgprot(0) - -#define __S000 __pgprot(0) -#define __S001 __pgprot(0) -#define __S010 __pgprot(0) -#define __S011 __pgprot(0) -#define __S100 __pgprot(0) -#define __S101 __pgprot(0) -#define __S110 __pgprot(0) -#define __S111 __pgprot(0) - extern unsigned long _page_cachable_default; +extern void __update_cache(unsigned long address, pte_t pte); /* * ZERO_PAGE is a global shared page that is always zero; used @@ -93,46 +64,47 @@ extern void paging_init(void); */ #define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd)) -#define __pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) -#ifndef CONFIG_TRANSPARENT_HUGEPAGE -#define pmd_page(pmd) __pmd_page(pmd) -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +static inline unsigned long pmd_pfn(pmd_t pmd) +{ + return pmd_val(pmd) >> PFN_PTE_SHIFT; +} + +#ifndef CONFIG_MIPS_HUGE_TLB_SUPPORT +#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) +#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ #define pmd_page_vaddr(pmd) pmd_val(pmd) #define htw_stop() \ do { \ - unsigned long flags; \ + unsigned long __flags; \ \ if (cpu_has_htw) { \ - local_irq_save(flags); \ + local_irq_save(__flags); \ if(!raw_current_cpu_data.htw_seq++) { \ write_c0_pwctl(read_c0_pwctl() & \ ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \ back_to_back_c0_hazard(); \ } \ - local_irq_restore(flags); \ + local_irq_restore(__flags); \ } \ } while(0) #define htw_start() \ do { \ - unsigned long flags; \ + unsigned long __flags; \ \ if (cpu_has_htw) { \ - local_irq_save(flags); \ + local_irq_save(__flags); \ if (!--raw_current_cpu_data.htw_seq) { \ write_c0_pwctl(read_c0_pwctl() | \ (1 << MIPS_PWCTL_PWEN_SHIFT)); \ back_to_back_c0_hazard(); \ } \ - local_irq_restore(flags); \ + local_irq_restore(__flags); \ } \ } while(0) -static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pteval); - #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) #ifdef CONFIG_XPA @@ -182,7 +154,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt null.pte_low = null.pte_high = _PAGE_GLOBAL; } - set_pte_at(mm, addr, ptep, null); + set_pte(ptep, null); htw_start(); } #else @@ -221,29 +193,41 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt #if !defined(CONFIG_CPU_R3K_TLB) /* Preserve global status for the pair */ if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) - set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL)); + set_pte(ptep, __pte(_PAGE_GLOBAL)); else #endif - set_pte_at(mm, addr, ptep, __pte(0)); + set_pte(ptep, __pte(0)); htw_start(); } #endif -static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pteval) +static inline void set_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, unsigned int nr) { - extern void __update_cache(unsigned long address, pte_t pte); + unsigned int i; + bool do_sync = false; - if (!pte_present(pteval)) - goto cache_sync_done; + for (i = 0; i < nr; i++) { + if (!pte_present(pte)) + continue; + if (pte_present(ptep[i]) && + (pte_pfn(ptep[i]) == pte_pfn(pte))) + continue; + do_sync = true; + } - if (pte_present(*ptep) && (pte_pfn(*ptep) == pte_pfn(pteval))) - goto cache_sync_done; + if (do_sync) + __update_cache(addr, pte); - __update_cache(addr, pteval); -cache_sync_done: - set_pte(ptep, pteval); + for (;;) { + set_pte(ptep, pte); + if (--nr == 0) + break; + ptep++; + pte = __pte(pte_val(pte) + (1UL << PFN_PTE_SHIFT)); + } } +#define set_ptes set_ptes /* * (pmds are folded into puds so this doesn't get actually called, @@ -335,7 +319,7 @@ static inline pte_t pte_mkold(pte_t pte) return pte; } -static inline pte_t pte_mkwrite(pte_t pte) +static inline pte_t pte_mkwrite_novma(pte_t pte) { pte.pte_low |= _PAGE_WRITE; if (pte.pte_low & _PAGE_MODIFIED) { @@ -390,7 +374,7 @@ static inline pte_t pte_mkold(pte_t pte) return pte; } -static inline pte_t pte_mkwrite(pte_t pte) +static inline pte_t pte_mkwrite_novma(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; if (pte_val(pte) & _PAGE_MODIFIED) @@ -424,6 +408,20 @@ static inline pte_t pte_mkhuge(pte_t pte) pte_val(pte) |= _PAGE_HUGE; return pte; } + +#define pmd_write pmd_write +static inline int pmd_write(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_WRITE); +} + +static inline struct page *pmd_page(pmd_t pmd) +{ + if (pmd_val(pmd) & _PAGE_HUGE) + return pfn_to_page(pmd_pfn(pmd)); + + return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT); +} #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY @@ -481,7 +479,8 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot) } static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma, - unsigned long address) + unsigned long address, + pte_t *ptep) { } @@ -497,7 +496,7 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma, pte_t entry, int dirty) { if (!pte_same(*ptep, entry)) - set_pte_at(vma->vm_mm, address, ptep, entry); + set_pte(ptep, entry); /* * update_mmu_cache will unconditionally execute, handling both * the case that the PTE changed and the spurious fault case. @@ -505,12 +504,6 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma, return true; } -/* - * Conversion functions: convert a page and protection to a page entry, - * and a page entry and page directory to the page they refer to. - */ -#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) - #if defined(CONFIG_XPA) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { @@ -540,19 +533,63 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) } #endif +#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) +static inline bool pte_swp_exclusive(pte_t pte) +{ + return pte.pte_low & _PAGE_SWP_EXCLUSIVE; +} + +static inline pte_t pte_swp_mkexclusive(pte_t pte) +{ + pte.pte_low |= _PAGE_SWP_EXCLUSIVE; + return pte; +} + +static inline pte_t pte_swp_clear_exclusive(pte_t pte) +{ + pte.pte_low &= ~_PAGE_SWP_EXCLUSIVE; + return pte; +} +#else +static inline bool pte_swp_exclusive(pte_t pte) +{ + return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; +} + +static inline pte_t pte_swp_mkexclusive(pte_t pte) +{ + pte_val(pte) |= _PAGE_SWP_EXCLUSIVE; + return pte; +} + +static inline pte_t pte_swp_clear_exclusive(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE; + return pte; +} +#endif extern void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte); -static inline void update_mmu_cache(struct vm_area_struct *vma, - unsigned long address, pte_t *ptep) -{ - pte_t pte = *ptep; - __update_tlb(vma, address, pte); +static inline void update_mmu_cache_range(struct vm_fault *vmf, + struct vm_area_struct *vma, unsigned long address, + pte_t *ptep, unsigned int nr) +{ + for (;;) { + pte_t pte = *ptep; + __update_tlb(vma, address, pte); + if (--nr == 0) + break; + ptep++; + address += PAGE_SIZE; + } } +#define update_mmu_cache(vma, address, ptep) \ + update_mmu_cache_range(NULL, vma, address, ptep, 1) -#define __HAVE_ARCH_UPDATE_MMU_TLB -#define update_mmu_tlb update_mmu_cache +#define update_mmu_tlb_range(vma, address, ptep, nr) \ + update_mmu_cache_range(NULL, vma, address, ptep, nr) static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) @@ -562,16 +599,13 @@ static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, __update_tlb(vma, address, pte); } -#define kern_addr_valid(addr) (1) - /* * Allow physical addresses to be fixed up to help 36-bit peripherals. */ #ifdef CONFIG_MIPS_FIXUP_BIGPHYS_ADDR phys_addr_t fixup_bigphys_addr(phys_addr_t addr, phys_addr_t size); -int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long vaddr, - unsigned long pfn, unsigned long size, pgprot_t prot); -#define io_remap_pfn_range io_remap_pfn_range +unsigned long io_remap_pfn_range_pfn(unsigned long pfn, unsigned long size); +#define io_remap_pfn_range_pfn io_remap_pfn_range_pfn #else #define fixup_bigphys_addr(addr, size) (addr) #endif /* CONFIG_MIPS_FIXUP_BIGPHYS_ADDR */ @@ -599,19 +633,13 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd) extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd); -#define pmd_write pmd_write -static inline int pmd_write(pmd_t pmd) -{ - return !!(pmd_val(pmd) & _PAGE_WRITE); -} - static inline pmd_t pmd_wrprotect(pmd_t pmd) { pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); return pmd; } -static inline pmd_t pmd_mkwrite(pmd_t pmd) +static inline pmd_t pmd_mkwrite_novma(pmd_t pmd) { pmd_val(pmd) |= _PAGE_WRITE; if (pmd_val(pmd) & _PAGE_MODIFIED) @@ -620,6 +648,7 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd) return pmd; } +#define pmd_dirty pmd_dirty static inline int pmd_dirty(pmd_t pmd) { return !!(pmd_val(pmd) & _PAGE_MODIFIED); @@ -640,6 +669,7 @@ static inline pmd_t pmd_mkdirty(pmd_t pmd) return pmd; } +#define pmd_young pmd_young static inline int pmd_young(pmd_t pmd) { return !!(pmd_val(pmd) & _PAGE_ACCESSED); @@ -682,22 +712,6 @@ static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ -/* Extern to avoid header file madness */ -extern pmd_t mk_pmd(struct page *page, pgprot_t prot); - -static inline unsigned long pmd_pfn(pmd_t pmd) -{ - return pmd_val(pmd) >> _PFN_SHIFT; -} - -static inline struct page *pmd_page(pmd_t pmd) -{ - if (pmd_trans_huge(pmd)) - return pfn_to_page(pmd_pfn(pmd)); - - return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT); -} - static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) { pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HUGE)) | |
