diff options
Diffstat (limited to 'arch/microblaze/include/asm/pgtable.h')
| -rw-r--r-- | arch/microblaze/include/asm/pgtable.h | 89 |
1 files changed, 49 insertions, 40 deletions
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h index 42f5988e998b..4eb76de6be4a 100644 --- a/arch/microblaze/include/asm/pgtable.h +++ b/arch/microblaze/include/asm/pgtable.h @@ -10,14 +10,14 @@ #include <asm/setup.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ extern int mem_init_done; #endif #include <asm-generic/pgtable-nopmd.h> #ifdef __KERNEL__ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/sched.h> #include <linux/threads.h> @@ -39,7 +39,7 @@ extern pte_t *va_to_pte(unsigned long address); #define VMALLOC_START (CONFIG_KERNEL_START + CONFIG_LOWMEM_SIZE) #define VMALLOC_END ioremap_bot -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ /* * Macro to mark a page protection value as "uncacheable". @@ -99,7 +99,6 @@ extern pte_t *va_to_pte(unsigned long address); #define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) -#define FIRST_USER_PGD_NR 0 #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) @@ -131,10 +130,10 @@ extern pte_t *va_to_pte(unsigned long address); * of the 16 available. Bit 24-26 of the TLB are cleared in the TLB * miss handler. Bit 27 is PAGE_USER, thus selecting the correct * zone. - * - PRESENT *must* be in the bottom two bits because swap cache - * entries use the top 30 bits. Because 4xx doesn't support SMP - * anyway, M is irrelevant so we borrow it for PAGE_PRESENT. Bit 30 - * is cleared in the TLB miss handler before the TLB entry is loaded. + * - PRESENT *must* be in the bottom two bits because swap PTEs use the top + * 30 bits. Because 4xx doesn't support SMP anyway, M is irrelevant so we + * borrow it for PAGE_PRESENT. Bit 30 is cleared in the TLB miss handler + * before the TLB entry is loaded. * - All other bits of the PTE are loaded into TLBLO without * * modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for * software PTE bits. We actually use bits 21, 24, 25, and @@ -155,6 +154,9 @@ extern pte_t *va_to_pte(unsigned long address); #define _PAGE_ACCESSED 0x400 /* software: R: page referenced */ #define _PMD_PRESENT PAGE_MASK +/* We borrow bit 24 to store the exclusive marker in swap PTEs. */ +#define _PAGE_SWP_EXCLUSIVE _PAGE_DIRTY + /* * Some bits are unused... */ @@ -205,7 +207,7 @@ extern pte_t *va_to_pte(unsigned long address); * Also, write permissions imply read permissions. */ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ /* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. @@ -213,7 +215,7 @@ extern pte_t *va_to_pte(unsigned long address); extern unsigned long empty_zero_page[1024]; #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0) #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) @@ -227,14 +229,14 @@ extern unsigned long empty_zero_page[1024]; #define pte_page(x) (mem_map + (unsigned long) \ ((pte_val(x) - memory_start) >> PAGE_SHIFT)) -#define PFN_SHIFT_OFFSET (PAGE_SHIFT) +#define PFN_PTE_SHIFT PAGE_SHIFT -#define pte_pfn(x) (pte_val(x) >> PFN_SHIFT_OFFSET) +#define pte_pfn(x) (pte_val(x) >> PFN_PTE_SHIFT) #define pfn_pte(pfn, prot) \ - __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) | pgprot_val(prot)) + __pte(((pte_basic_t)(pfn) << PFN_PTE_SHIFT) | pgprot_val(prot)) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ /* * The following only work if pte_present() is true. * Undefined behaviour if not.. @@ -263,7 +265,7 @@ static inline pte_t pte_mkread(pte_t pte) \ { pte_val(pte) |= _PAGE_USER; return pte; } static inline pte_t pte_mkexec(pte_t pte) \ { pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; } -static inline pte_t pte_mkwrite(pte_t pte) \ +static inline pte_t pte_mkwrite_novma(pte_t pte) \ { pte_val(pte) |= _PAGE_RW; return pte; } static inline pte_t pte_mkdirty(pte_t pte) \ { pte_val(pte) |= _PAGE_DIRTY; return pte; } @@ -282,14 +284,6 @@ static inline pte_t mk_pte_phys(phys_addr_t physpage, pgprot_t pgprot) return pte; } -#define mk_pte(page, pgprot) \ -({ \ - pte_t pte; \ - pte_val(pte) = (((page - mem_map) << PAGE_SHIFT) + memory_start) | \ - pgprot_val(pgprot); \ - pte; \ -}) - static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); @@ -327,19 +321,13 @@ static inline unsigned long pte_update(pte_t *p, unsigned long clr, /* * set_pte stores a linux PTE into the linux page table. */ -static inline void set_pte(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte) -{ - *ptep = pte; -} - -static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte) +static inline void set_pte(pte_t *ptep, pte_t pte) { *ptep = pte; } #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG +struct vm_area_struct; static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { @@ -393,18 +381,39 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* - * Encode and decode a swap entry. - * Note that the bits we use in a PTE for representing a swap entry - * must not include the _PAGE_PRESENT bit, or the _PAGE_HASHPTE bit - * (if used). -- paulus + * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that + * are !pte_none() && !pte_present(). + * + * 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * <------------------ offset -------------------> E < type -> 0 0 + * + * E is the exclusive marker that is not stored in swap entries. */ -#define __swp_type(entry) ((entry).val & 0x3f) +#define __swp_type(entry) ((entry).val & 0x1f) #define __swp_offset(entry) ((entry).val >> 6) #define __swp_entry(type, offset) \ - ((swp_entry_t) { (type) | ((offset) << 6) }) + ((swp_entry_t) { ((type) & 0x1f) | ((offset) << 6) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 2 }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 2 }) +static inline bool pte_swp_exclusive(pte_t pte) +{ + return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; +} + +static inline pte_t pte_swp_mkexclusive(pte_t pte) +{ + pte_val(pte) |= _PAGE_SWP_EXCLUSIVE; + return pte; +} + +static inline pte_t pte_swp_clear_exclusive(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE; + return pte; +} + extern unsigned long iopa(unsigned long addr); /* Values for nocacheflag and cmode */ @@ -426,13 +435,13 @@ extern int mem_init_done; asmlinkage void __init mmu_init(void); -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __KERNEL__ */ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ extern unsigned long ioremap_bot, ioremap_base; void setup_memory(void); -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _ASM_MICROBLAZE_PGTABLE_H */ |
