summaryrefslogtreecommitdiff
path: root/arch/microblaze/include/asm/pgtable.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/microblaze/include/asm/pgtable.h')
-rw-r--r--arch/microblaze/include/asm/pgtable.h239
1 files changed, 61 insertions, 178 deletions
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index e53b8532353c..4eb76de6be4a 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*/
#ifndef _ASM_MICROBLAZE_PGTABLE_H
@@ -13,58 +10,14 @@
#include <asm/setup.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
extern int mem_init_done;
#endif
-#ifndef CONFIG_MMU
-
-#define pgd_present(pgd) (1) /* pages are always present on non MMU */
-#define pgd_none(pgd) (0)
-#define pgd_bad(pgd) (0)
-#define pgd_clear(pgdp)
-#define kern_addr_valid(addr) (1)
-#define pmd_offset(a, b) ((void *) 0)
-
-#define PAGE_NONE __pgprot(0) /* these mean nothing to non MMU */
-#define PAGE_SHARED __pgprot(0) /* these mean nothing to non MMU */
-#define PAGE_COPY __pgprot(0) /* these mean nothing to non MMU */
-#define PAGE_READONLY __pgprot(0) /* these mean nothing to non MMU */
-#define PAGE_KERNEL __pgprot(0) /* these mean nothing to non MMU */
-
-#define pgprot_noncached(x) (x)
-
-#define __swp_type(x) (0)
-#define __swp_offset(x) (0)
-#define __swp_entry(typ, off) ((swp_entry_t) { ((typ) | ((off) << 7)) })
-#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
-#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-
-#define ZERO_PAGE(vaddr) ({ BUG(); NULL; })
-
-#define swapper_pg_dir ((pgd_t *) NULL)
-
-#define pgtable_cache_init() do {} while (0)
-
-#define arch_enter_lazy_cpu_mode() do {} while (0)
-
-#define pgprot_noncached_wc(prot) prot
-
-/*
- * All 32bit addresses are effectively valid for vmalloc...
- * Sort of meaningless for non-VM targets.
- */
-#define VMALLOC_START 0
-#define VMALLOC_END 0xffffffff
-
-#else /* CONFIG_MMU */
-
-#include <asm-generic/4level-fixup.h>
-
-#define __PAGETABLE_PMD_FOLDED
+#include <asm-generic/pgtable-nopmd.h>
#ifdef __KERNEL__
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/sched.h>
#include <linux/threads.h>
@@ -72,8 +25,6 @@ extern int mem_init_done;
#include <asm/mmu.h>
#include <asm/page.h>
-#define FIRST_USER_ADDRESS 0UL
-
extern unsigned long va_to_phys(unsigned long address);
extern pte_t *va_to_pte(unsigned long address);
@@ -82,17 +33,13 @@ extern pte_t *va_to_pte(unsigned long address);
* Undefined behaviour if not..
*/
-static inline int pte_special(pte_t pte) { return 0; }
-
-static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
-
/* Start and end of the vmalloc area. */
/* Make sure to map the vmalloc area above the pinned kernel memory area
of 32Mb. */
#define VMALLOC_START (CONFIG_KERNEL_START + CONFIG_LOWMEM_SIZE)
#define VMALLOC_END ioremap_bot
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
/*
* Macro to mark a page protection value as "uncacheable".
@@ -138,13 +85,8 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
*
*/
-/* PMD_SHIFT determines the size of the area mapped by the PTE pages */
-#define PMD_SHIFT (PAGE_SHIFT + PTE_SHIFT)
-#define PMD_SIZE (1UL << PMD_SHIFT)
-#define PMD_MASK (~(PMD_SIZE-1))
-
/* PGDIR_SHIFT determines what a top-level page table entry can map */
-#define PGDIR_SHIFT PMD_SHIFT
+#define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT)
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
@@ -157,7 +99,6 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
-#define FIRST_USER_PGD_NR 0
#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
@@ -165,9 +106,6 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
#define pte_ERROR(e) \
printk(KERN_ERR "%s:%d: bad pte "PTE_FMT".\n", \
__FILE__, __LINE__, pte_val(e))
-#define pmd_ERROR(e) \
- printk(KERN_ERR "%s:%d: bad pmd %08lx.\n", \
- __FILE__, __LINE__, pmd_val(e))
#define pgd_ERROR(e) \
printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \
__FILE__, __LINE__, pgd_val(e))
@@ -192,13 +130,13 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
* of the 16 available. Bit 24-26 of the TLB are cleared in the TLB
* miss handler. Bit 27 is PAGE_USER, thus selecting the correct
* zone.
- * - PRESENT *must* be in the bottom two bits because swap cache
- * entries use the top 30 bits. Because 4xx doesn't support SMP
- * anyway, M is irrelevant so we borrow it for PAGE_PRESENT. Bit 30
- * is cleared in the TLB miss handler before the TLB entry is loaded.
+ * - PRESENT *must* be in the bottom two bits because swap PTEs use the top
+ * 30 bits. Because 4xx doesn't support SMP anyway, M is irrelevant so we
+ * borrow it for PAGE_PRESENT. Bit 30 is cleared in the TLB miss handler
+ * before the TLB entry is loaded.
* - All other bits of the PTE are loaded into TLBLO without
* * modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for
- * software PTE bits. We actually use use bits 21, 24, 25, and
+ * software PTE bits. We actually use bits 21, 24, 25, and
* 30 respectively for the software bits: ACCESSED, DIRTY, RW, and
* PRESENT.
*/
@@ -216,6 +154,9 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
#define _PAGE_ACCESSED 0x400 /* software: R: page referenced */
#define _PMD_PRESENT PAGE_MASK
+/* We borrow bit 24 to store the exclusive marker in swap PTEs. */
+#define _PAGE_SWP_EXCLUSIVE _PAGE_DIRTY
+
/*
* Some bits are unused...
*/
@@ -265,25 +206,8 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
* We consider execute permission the same as read.
* Also, write permissions imply read permissions.
*/
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY_X
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY_X
-#define __P100 PAGE_READONLY
-#define __P101 PAGE_READONLY_X
-#define __P110 PAGE_COPY
-#define __P111 PAGE_COPY_X
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY_X
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED_X
-#define __S100 PAGE_READONLY
-#define __S101 PAGE_READONLY_X
-#define __S110 PAGE_SHARED
-#define __S111 PAGE_SHARED_X
-
-#ifndef __ASSEMBLY__
+
+#ifndef __ASSEMBLER__
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
@@ -291,7 +215,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
extern unsigned long empty_zero_page[1024];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
@@ -305,26 +229,14 @@ extern unsigned long empty_zero_page[1024];
#define pte_page(x) (mem_map + (unsigned long) \
((pte_val(x) - memory_start) >> PAGE_SHIFT))
-#define PFN_SHIFT_OFFSET (PAGE_SHIFT)
+#define PFN_PTE_SHIFT PAGE_SHIFT
-#define pte_pfn(x) (pte_val(x) >> PFN_SHIFT_OFFSET)
+#define pte_pfn(x) (pte_val(x) >> PFN_PTE_SHIFT)
#define pfn_pte(pfn, prot) \
- __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) | pgprot_val(prot))
-
-#ifndef __ASSEMBLY__
-/*
- * The "pgd_xxx()" functions here are trivial for a folded two-level
- * setup: the pgd is never bad, and a pmd always exists (as it's folded
- * into the pgd entry)
- */
-static inline int pgd_none(pgd_t pgd) { return 0; }
-static inline int pgd_bad(pgd_t pgd) { return 0; }
-static inline int pgd_present(pgd_t pgd) { return 1; }
-#define pgd_clear(xp) do { } while (0)
-#define pgd_page(pgd) \
- ((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
+ __pte(((pte_basic_t)(pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
+#ifndef __ASSEMBLER__
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
@@ -353,7 +265,7 @@ static inline pte_t pte_mkread(pte_t pte) \
{ pte_val(pte) |= _PAGE_USER; return pte; }
static inline pte_t pte_mkexec(pte_t pte) \
{ pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
-static inline pte_t pte_mkwrite(pte_t pte) \
+static inline pte_t pte_mkwrite_novma(pte_t pte) \
{ pte_val(pte) |= _PAGE_RW; return pte; }
static inline pte_t pte_mkdirty(pte_t pte) \
{ pte_val(pte) |= _PAGE_DIRTY; return pte; }
@@ -372,14 +284,6 @@ static inline pte_t mk_pte_phys(phys_addr_t physpage, pgprot_t pgprot)
return pte;
}
-#define mk_pte(page, pgprot) \
-({ \
- pte_t pte; \
- pte_val(pte) = (((page - mem_map) << PAGE_SHIFT) + memory_start) | \
- pgprot_val(pgprot); \
- pte; \
-})
-
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
@@ -417,19 +321,13 @@ static inline unsigned long pte_update(pte_t *p, unsigned long clr,
/*
* set_pte stores a linux PTE into the linux page table.
*/
-static inline void set_pte(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
-{
- *ptep = pte;
-}
-
-static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
+static inline void set_pte(pte_t *ptep, pte_t pte)
{
*ptep = pte;
}
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+struct vm_area_struct;
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
@@ -467,49 +365,55 @@ static inline void ptep_mkdirty(struct mm_struct *mm,
/* Convert pmd entry to page */
/* our pmd entry is an effective address of pte table*/
/* returns effective address of the pmd entry*/
-#define pmd_page_kernel(pmd) ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ return ((unsigned long) (pmd_val(pmd) & PAGE_MASK));
+}
+
+/* returns pfn of the pmd entry*/
+#define pmd_pfn(pmd) (__pa(pmd_val(pmd)) >> PAGE_SHIFT)
/* returns struct *page of the pmd entry*/
#define pmd_page(pmd) (pfn_to_page(__pa(pmd_val(pmd)) >> PAGE_SHIFT))
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-/* to find an entry in a page-table-directory */
-#define pgd_index(address) ((address) >> PGDIR_SHIFT)
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-
-/* Find an entry in the second-level page table.. */
-static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address)
-{
- return (pmd_t *) dir;
-}
-
/* Find an entry in the third-level page table.. */
-#define pte_index(address) \
- (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, addr) \
- ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr))
-#define pte_offset_map(dir, addr) \
- ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
-
-#define pte_unmap(pte) kunmap_atomic(pte)
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
/*
- * Encode and decode a swap entry.
- * Note that the bits we use in a PTE for representing a swap entry
- * must not include the _PAGE_PRESENT bit, or the _PAGE_HASHPTE bit
- * (if used). -- paulus
+ * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
+ * are !pte_none() && !pte_present().
+ *
+ * 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * <------------------ offset -------------------> E < type -> 0 0
+ *
+ * E is the exclusive marker that is not stored in swap entries.
*/
-#define __swp_type(entry) ((entry).val & 0x3f)
+#define __swp_type(entry) ((entry).val & 0x1f)
#define __swp_offset(entry) ((entry).val >> 6)
#define __swp_entry(type, offset) \
- ((swp_entry_t) { (type) | ((offset) << 6) })
+ ((swp_entry_t) { ((type) & 0x1f) | ((offset) << 6) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 2 })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 2 })
+static inline bool pte_swp_exclusive(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
+}
+
+static inline pte_t pte_swp_mkexclusive(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_SWP_EXCLUSIVE;
+ return pte;
+}
+
+static inline pte_t pte_swp_clear_exclusive(pte_t pte)
+{
+ pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE;
+ return pte;
+}
+
extern unsigned long iopa(unsigned long addr);
/* Values for nocacheflag and cmode */
@@ -521,14 +425,6 @@ extern unsigned long iopa(unsigned long addr);
#define IOMAP_NOCACHE_NONSER 2
#define IOMAP_NO_COPYBACK 3
-/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
-#define kern_addr_valid(addr) (1)
-
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init() do { } while (0)
-
void do_page_fault(struct pt_regs *regs, unsigned long address,
unsigned long error_code);
@@ -539,26 +435,13 @@ extern int mem_init_done;
asmlinkage void __init mmu_init(void);
-void __init *early_get_page(void);
-
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __KERNEL__ */
-#endif /* CONFIG_MMU */
-
-#ifndef __ASSEMBLY__
-#include <asm-generic/pgtable.h>
-
+#ifndef __ASSEMBLER__
extern unsigned long ioremap_bot, ioremap_base;
-void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle);
-void consistent_free(size_t size, void *vaddr);
-void consistent_sync(void *vaddr, size_t size, int direction);
-void consistent_sync_page(struct page *page, unsigned long offset,
- size_t size, int direction);
-unsigned long consistent_virt_to_pfn(void *vaddr);
-
void setup_memory(void);
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_MICROBLAZE_PGTABLE_H */