summaryrefslogtreecommitdiff
path: root/arch/m68k/include/asm/pgtable_mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/m68k/include/asm/pgtable_mm.h')
-rw-r--r--arch/m68k/include/asm/pgtable_mm.h73
1 files changed, 34 insertions, 39 deletions
diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h
index 35ed4a9981ae..bba64a9c49ac 100644
--- a/arch/m68k/include/asm/pgtable_mm.h
+++ b/arch/m68k/include/asm/pgtable_mm.h
@@ -1,11 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _M68K_PGTABLE_H
#define _M68K_PGTABLE_H
-#include <asm-generic/4level-fixup.h>
+
+#if defined(CONFIG_SUN3) || defined(CONFIG_COLDFIRE)
+#include <asm-generic/pgtable-nopmd.h>
+#else
+#include <asm-generic/pgtable-nopud.h>
+#endif
#include <asm/setup.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/processor.h>
#include <linux/sched.h>
#include <linux/threads.h>
@@ -25,14 +31,10 @@
do{ \
*(pteptr) = (pteval); \
} while(0)
-#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
-
/* PMD_SHIFT determines the size of the area a second-level page table can map */
-#ifdef CONFIG_SUN3
-#define PMD_SHIFT 17
-#else
-#define PMD_SHIFT 22
+#if CONFIG_PGTABLE_LEVELS == 3
+#define PMD_SHIFT 18
#endif
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
@@ -54,29 +56,31 @@
*/
#ifdef CONFIG_SUN3
#define PTRS_PER_PTE 16
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
#define PTRS_PER_PMD 1
#define PTRS_PER_PGD 2048
#elif defined(CONFIG_COLDFIRE)
#define PTRS_PER_PTE 512
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
#define PTRS_PER_PMD 1
#define PTRS_PER_PGD 1024
#else
-#define PTRS_PER_PTE 1024
-#define PTRS_PER_PMD 8
+#define PTRS_PER_PTE 64
+#define PTRS_PER_PMD 128
#define PTRS_PER_PGD 128
#endif
#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0UL
/* Virtual address region for use by kernel_map() */
#ifdef CONFIG_SUN3
-#define KMAP_START 0x0DC00000
-#define KMAP_END 0x0E000000
+#define KMAP_START 0x0dc00000
+#define KMAP_END 0x0e000000
#elif defined(CONFIG_COLDFIRE)
#define KMAP_START 0xe0000000
#define KMAP_END 0xf0000000
+#elif defined(CONFIG_VIRT)
+#define KMAP_START 0xdf000000
+#define KMAP_END 0xff000000
#else
#define KMAP_START 0xd0000000
#define KMAP_END 0xf0000000
@@ -89,6 +93,10 @@ extern unsigned long m68k_vmalloc_end;
#elif defined(CONFIG_COLDFIRE)
#define VMALLOC_START 0xd0000000
#define VMALLOC_END 0xe0000000
+#elif defined(CONFIG_VIRT)
+#define VMALLOC_OFFSET PAGE_SIZE
+#define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
+#define VMALLOC_END KMAP_START
#else
/* Just any arbitrary offset to the start of the vmalloc VM area: the
* current 8MB value just means that there will be a 8MB "hole" after the
@@ -111,16 +119,6 @@ extern void *empty_zero_page;
*/
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
-/* number of bits that fit into a memory pointer */
-#define BITS_PER_PTR (8*sizeof(unsigned long))
-
-/* to align the pointer to a pointer address */
-#define PTR_MASK (~(sizeof(void*)-1))
-
-/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
-/* 64-bit machines, beware! SRB. */
-#define SIZEOF_PTR_LOG2 2
-
extern void kernel_set_cachemode(void *addr, unsigned long size, int cmode);
/*
@@ -128,14 +126,16 @@ extern void kernel_set_cachemode(void *addr, unsigned long size, int cmode);
* tables contain all the necessary information. The Sun3 does, but
* they are updated on demand.
*/
-static inline void update_mmu_cache(struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep)
+static inline void update_mmu_cache_range(struct vm_fault *vmf,
+ struct vm_area_struct *vma, unsigned long address,
+ pte_t *ptep, unsigned int nr)
{
}
-#endif /* !__ASSEMBLY__ */
+#define update_mmu_cache(vma, addr, ptep) \
+ update_mmu_cache_range(NULL, vma, addr, ptep, 1)
-#define kern_addr_valid(addr) (1)
+#endif /* !__ASSEMBLER__ */
/* MMU-specific headers */
@@ -147,7 +147,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
#include <asm/motorola_pgtable.h>
#endif
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/*
* Macro to mark a page protection value as "uncacheable".
*/
@@ -168,15 +168,10 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
? (__pgprot((pgprot_val(prot) & _CACHEMASK040) | _PAGE_NOCACHE_S)) \
: (prot)))
-#endif /* CONFIG_COLDFIRE */
-#include <asm-generic/pgtable.h>
-#endif /* !__ASSEMBLY__ */
+pgprot_t pgprot_dmacoherent(pgprot_t prot);
+#define pgprot_dmacoherent(prot) pgprot_dmacoherent(prot)
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init() do { } while (0)
-
-#define check_pgt_cache() do { } while (0)
+#endif /* CONFIG_COLDFIRE */
+#endif /* !__ASSEMBLER__ */
#endif /* _M68K_PGTABLE_H */