diff options
Diffstat (limited to 'arch/hexagon/include/asm/pgtable.h')
| -rw-r--r-- | arch/hexagon/include/asm/pgtable.h | 86 |
1 files changed, 37 insertions, 49 deletions
diff --git a/arch/hexagon/include/asm/pgtable.h b/arch/hexagon/include/asm/pgtable.h index dbb22b80b8c4..fbf24d1d1ca6 100644 --- a/arch/hexagon/include/asm/pgtable.h +++ b/arch/hexagon/include/asm/pgtable.h @@ -61,6 +61,9 @@ extern unsigned long empty_zero_page; * So we'll put up with a bit of inefficiency for now... */ +/* We borrow bit 6 to store the exclusive marker in swap PTEs. */ +#define _PAGE_SWP_EXCLUSIVE (1<<6) + /* * Top "FOURTH" level (pgd), which for the Hexagon VM is really * only the second from the bottom, pgd and pud both being collapsed. @@ -126,38 +129,8 @@ extern unsigned long _dflt_cache_att; */ #define CACHEDEF (CACHE_DEFAULT << 6) -/* Private (copy-on-write) page protections. */ -#define __P000 __pgprot(_PAGE_PRESENT | _PAGE_USER | CACHEDEF) -#define __P001 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | CACHEDEF) -#define __P010 __P000 /* Write-only copy-on-write */ -#define __P011 __P001 /* Read/Write copy-on-write */ -#define __P100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \ - _PAGE_EXECUTE | CACHEDEF) -#define __P101 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_EXECUTE | \ - _PAGE_READ | CACHEDEF) -#define __P110 __P100 /* Write/execute copy-on-write */ -#define __P111 __P101 /* Read/Write/Execute, copy-on-write */ - -/* Shared page protections. */ -#define __S000 __P000 -#define __S001 __P001 -#define __S010 __pgprot(_PAGE_PRESENT | _PAGE_USER | \ - _PAGE_WRITE | CACHEDEF) -#define __S011 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \ - _PAGE_WRITE | CACHEDEF) -#define __S100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \ - _PAGE_EXECUTE | CACHEDEF) -#define __S101 __P101 -#define __S110 __pgprot(_PAGE_PRESENT | _PAGE_USER | \ - _PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF) -#define __S111 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \ - _PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF) - extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* located in head.S */ -/* Seems to be zero even in architectures where the zero page is firewalled? */ -#define FIRST_USER_ADDRESS 0UL - /* HUGETLB not working currently */ #ifdef CONFIG_HUGETLB_PAGE #define pte_mkhuge(pte) __pte((pte_val(pte) & ~0x3) | HVM_HUGEPAGE_SIZE) @@ -239,10 +212,14 @@ static inline int pmd_bad(pmd_t pmd) } /* + * pmd_pfn - converts a PMD entry to a page frame number + */ +#define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT) + +/* * pmd_page - converts a PMD entry to a page pointer */ #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) -#define pmd_pgtable(pmd) pmd_page(pmd) /** * pte_none - check if pte is mapped @@ -261,9 +238,6 @@ static inline int pte_present(pte_t pte) return pte_val(pte) & _PAGE_PRESENT; } -/* mk_pte - make a PTE out of a page pointer and protection bits */ -#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) - /* pte_page - returns a page (frame pointer/descriptor?) based on a PTE */ #define pte_page(x) pfn_to_page(pte_pfn(x)) @@ -323,7 +297,7 @@ static inline pte_t pte_wrprotect(pte_t pte) } /* pte_mkwrite - mark page as writable */ -static inline pte_t pte_mkwrite(pte_t pte) +static inline pte_t pte_mkwrite_novma(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; @@ -361,6 +335,7 @@ static inline int pte_exec(pte_t pte) /* __swp_entry_to_pte - extract PTE from swap entry */ #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) +#define PFN_PTE_SHIFT PAGE_SHIFT /* pfn_pte - convert page number and protection value to page table entry */ #define pfn_pte(pfn, pgprot) __pte((pfn << PAGE_SHIFT) | pgprot_val(pgprot)) @@ -368,14 +343,6 @@ static inline int pte_exec(pte_t pte) #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) -/* - * set_pte_at - update page table and do whatever magic may be - * necessary to make the underlying hardware/firmware take note. - * - * VM may require a virtual instruction to alert the MMU. - */ -#define set_pte_at(mm, addr, ptep, pte) set_pte(ptep, pte) - static inline unsigned long pmd_page_vaddr(pmd_t pmd) { return (unsigned long)__va(pmd_val(pmd) & PAGE_MASK); @@ -385,9 +352,12 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) #define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page)) /* + * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that + * are !pte_none() && !pte_present(). + * * Swap/file PTE definitions. If _PAGE_PRESENT is zero, the rest of the PTE is * interpreted as swap information. The remaining free bits are interpreted as - * swap type/offset tuple. Rather than have the TLB fill handler test + * listed below. Rather than have the TLB fill handler test * _PAGE_PRESENT, we're going to reserve the permissions bits and set them to * all zeros for swap entries, which speeds up the miss handler at the cost of * 3 bits of offset. That trade-off can be revisited if necessary, but Hexagon @@ -397,9 +367,10 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) * Format of swap PTE: * bit 0: Present (zero) * bits 1-5: swap type (arch independent layer uses 5 bits max) - * bits 6-9: bits 3:0 of offset + * bit 6: exclusive marker + * bits 7-9: bits 2:0 of offset * bits 10-12: effectively _PAGE_PROTNONE (all zero) - * bits 13-31: bits 22:4 of swap offset + * bits 13-31: bits 21:3 of swap offset * * The split offset makes some of the following macros a little gnarly, * but there's plenty of precedent for this sort of thing. @@ -409,11 +380,28 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) #define __swp_type(swp_pte) (((swp_pte).val >> 1) & 0x1f) #define __swp_offset(swp_pte) \ - ((((swp_pte).val >> 6) & 0xf) | (((swp_pte).val >> 9) & 0x7ffff0)) + ((((swp_pte).val >> 7) & 0x7) | (((swp_pte).val >> 10) & 0x3ffff8)) #define __swp_entry(type, offset) \ ((swp_entry_t) { \ - ((type << 1) | \ - ((offset & 0x7ffff0) << 9) | ((offset & 0xf) << 6)) }) + (((type & 0x1f) << 1) | \ + ((offset & 0x3ffff8) << 10) | ((offset & 0x7) << 7)) }) + +static inline bool pte_swp_exclusive(pte_t pte) +{ + return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; +} + +static inline pte_t pte_swp_mkexclusive(pte_t pte) +{ + pte_val(pte) |= _PAGE_SWP_EXCLUSIVE; + return pte; +} + +static inline pte_t pte_swp_clear_exclusive(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE; + return pte; +} #endif |
