diff options
Diffstat (limited to 'arch/m68k/mm')
-rw-r--r-- | arch/m68k/mm/cache.c | 32 | ||||
-rw-r--r-- | arch/m68k/mm/fault.c | 78 | ||||
-rw-r--r-- | arch/m68k/mm/fault.h | 7 | ||||
-rw-r--r-- | arch/m68k/mm/hwtest.c | 2 | ||||
-rw-r--r-- | arch/m68k/mm/init.c | 74 | ||||
-rw-r--r-- | arch/m68k/mm/kmap.c | 60 | ||||
-rw-r--r-- | arch/m68k/mm/mcfmmu.c | 143 | ||||
-rw-r--r-- | arch/m68k/mm/memory.c | 105 | ||||
-rw-r--r-- | arch/m68k/mm/motorola.c | 329 | ||||
-rw-r--r-- | arch/m68k/mm/sun3kmap.c | 7 | ||||
-rw-r--r-- | arch/m68k/mm/sun3mmu.c | 43 |
11 files changed, 497 insertions, 383 deletions
diff --git a/arch/m68k/mm/cache.c b/arch/m68k/mm/cache.c index 079e64898e6a..dde978e66f14 100644 --- a/arch/m68k/mm/cache.c +++ b/arch/m68k/mm/cache.c @@ -8,7 +8,7 @@ */ #include <linux/module.h> -#include <asm/pgalloc.h> +#include <asm/cacheflush.h> #include <asm/traps.h> @@ -49,31 +49,14 @@ static unsigned long virt_to_phys_slow(unsigned long vaddr) if (mmusr & MMU_R_040) return (mmusr & PAGE_MASK) | (vaddr & ~PAGE_MASK); } else { - unsigned short mmusr; - unsigned long *descaddr; - - asm volatile ("ptestr %3,%2@,#7,%0\n\t" - "pmove %%psr,%1" - : "=a&" (descaddr), "=m" (mmusr) - : "a" (vaddr), "d" (get_fs().seg)); - if (mmusr & (MMU_I|MMU_B|MMU_L)) - return 0; - descaddr = phys_to_virt((unsigned long)descaddr); - switch (mmusr & MMU_NUM) { - case 1: - return (*descaddr & 0xfe000000) | (vaddr & 0x01ffffff); - case 2: - return (*descaddr & 0xfffc0000) | (vaddr & 0x0003ffff); - case 3: - return (*descaddr & PAGE_MASK) | (vaddr & ~PAGE_MASK); - } + WARN_ON_ONCE(!CPU_IS_040_OR_060); } return 0; } /* Push n pages at kernel virtual address and clear the icache */ /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */ -void flush_icache_range(unsigned long address, unsigned long endaddr) +void flush_icache_user_range(unsigned long address, unsigned long endaddr) { if (CPU_IS_COLDFIRE) { unsigned long start, end; @@ -104,9 +87,16 @@ void flush_icache_range(unsigned long address, unsigned long endaddr) : "di" (FLUSH_I)); } } + +void flush_icache_range(unsigned long address, unsigned long endaddr) +{ + set_fc(SUPER_DATA); + flush_icache_user_range(address, endaddr); + set_fc(USER_DATA); +} EXPORT_SYMBOL(flush_icache_range); -void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, +void flush_icache_user_page(struct vm_area_struct *vma, struct page *page, unsigned long addr, int len) { if (CPU_IS_COLDFIRE) { diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index e9b1d7585b43..fa3c5f38d989 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -12,10 +12,12 @@ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/uaccess.h> +#include <linux/perf_event.h> #include <asm/setup.h> #include <asm/traps.h> -#include <asm/pgalloc.h> + +#include "fault.h" extern void die_if_kernel(char *, struct pt_regs *, long); @@ -48,7 +50,7 @@ int send_fault_sig(struct pt_regs *regs) pr_alert("Unable to handle kernel access"); pr_cont(" at virtual address %p\n", addr); die_if_kernel("Oops", regs, 0 /*error_code*/); - do_exit(SIGKILL); + make_task_dead(SIGKILL); } return 1; @@ -71,7 +73,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, struct mm_struct *mm = current->mm; struct vm_area_struct * vma; vm_fault_t fault; - unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; + unsigned int flags = FAULT_FLAG_DEFAULT; pr_debug("do page fault:\nregs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n", regs->sr, regs->pc, address, error_code, mm ? mm->pgd : NULL); @@ -85,14 +87,14 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, if (user_mode(regs)) flags |= FAULT_FLAG_USER; + + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); retry: - down_read(&mm->mmap_sem); + mmap_read_lock(mm); vma = find_vma(mm, address); if (!vma) goto map_err; - if (vma->vm_flags & VM_IO) - goto acc_err; if (vma->vm_start <= address) goto good_area; if (!(vma->vm_flags & VM_GROWSDOWN)) @@ -105,8 +107,9 @@ retry: if (address + 256 < rdusp()) goto map_err; } - if (expand_stack(vma, address)) - goto map_err; + vma = expand_stack(mm, address); + if (!vma) + goto map_err_nosemaphore; /* * Ok, we have a good vm_area for this memory access, so @@ -116,7 +119,7 @@ good_area: pr_debug("do_page_fault: good_area\n"); switch (error_code & 3) { default: /* 3: write, present */ - /* fall through */ + fallthrough; case 2: /* write, not present */ if (!(vma->vm_flags & VM_WRITE)) goto acc_err; @@ -125,7 +128,7 @@ good_area: case 1: /* read, present */ goto acc_err; case 0: /* read, not present */ - if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) + if (unlikely(!vma_is_accessible(vma))) goto acc_err; } @@ -135,10 +138,17 @@ good_area: * the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, regs); pr_debug("handle_mm_fault returns %x\n", fault); - if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + goto no_context; + return 0; + } + + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) return 0; if (unlikely(fault & VM_FAULT_ERROR)) { @@ -151,33 +161,19 @@ good_area: BUG(); } - /* - * Major/minor page fault accounting is only done on the - * initial attempt. If we go through a retry, it is extremely - * likely that the page will be found in page cache at that point. - */ - if (flags & FAULT_FLAG_ALLOW_RETRY) { - if (fault & VM_FAULT_MAJOR) - current->maj_flt++; - else - current->min_flt++; - if (fault & VM_FAULT_RETRY) { - /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk - * of starvation. */ - flags &= ~FAULT_FLAG_ALLOW_RETRY; - flags |= FAULT_FLAG_TRIED; - - /* - * No need to up_read(&mm->mmap_sem) as we would - * have already released it in __lock_page_or_retry - * in mm/filemap.c. - */ - - goto retry; - } + if (fault & VM_FAULT_RETRY) { + flags |= FAULT_FLAG_TRIED; + + /* + * No need to mmap_read_unlock(mm) as we would + * have already released it in __lock_page_or_retry + * in mm/filemap.c. + */ + + goto retry; } - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); return 0; /* @@ -185,7 +181,7 @@ good_area: * us unable to handle the page fault gracefully. */ out_of_memory: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); if (!user_mode(regs)) goto no_context; pagefault_out_of_memory(); @@ -203,10 +199,12 @@ bus_err: goto send_sig; map_err: + mmap_read_unlock(mm); +map_err_nosemaphore: current->thread.signo = SIGSEGV; current->thread.code = SEGV_MAPERR; current->thread.faddr = address; - goto send_sig; + return send_fault_sig(regs); acc_err: current->thread.signo = SIGSEGV; @@ -214,6 +212,6 @@ acc_err: current->thread.faddr = address; send_sig: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); return send_fault_sig(regs); } diff --git a/arch/m68k/mm/fault.h b/arch/m68k/mm/fault.h new file mode 100644 index 000000000000..dab14ef7d4a1 --- /dev/null +++ b/arch/m68k/mm/fault.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +struct pt_regs; + +int do_page_fault(struct pt_regs *regs, unsigned long address, + unsigned long error_code); +int send_fault_sig(struct pt_regs *regs); diff --git a/arch/m68k/mm/hwtest.c b/arch/m68k/mm/hwtest.c index fe99aa99987e..8ee7a3368688 100644 --- a/arch/m68k/mm/hwtest.c +++ b/arch/m68k/mm/hwtest.c @@ -26,6 +26,8 @@ #include <linux/module.h> +#include <asm/hwtest.h> + int hwreg_present(volatile void *regp) { int ret = 0; diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c index 27c453f4fffe..488411af1b3f 100644 --- a/arch/m68k/mm/init.c +++ b/arch/m68k/mm/init.c @@ -40,37 +40,12 @@ void *empty_zero_page; EXPORT_SYMBOL(empty_zero_page); -#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE) -extern void init_pointer_table(unsigned long ptable); -extern pmd_t *zero_pgtable; -#endif - #ifdef CONFIG_MMU -pg_data_t pg_data_map[MAX_NUMNODES]; -EXPORT_SYMBOL(pg_data_map); - int m68k_virt_to_node_shift; -#ifndef CONFIG_SINGLE_MEMORY_CHUNK -pg_data_t *pg_data_table[65]; -EXPORT_SYMBOL(pg_data_table); -#endif - void __init m68k_setup_node(int node) { -#ifndef CONFIG_SINGLE_MEMORY_CHUNK - struct m68k_mem_info *info = m68k_memory + node; - int i, end; - - i = (unsigned long)phys_to_virt(info->addr) >> __virt_to_node_shift(); - end = (unsigned long)phys_to_virt(info->addr + info->size - 1) >> __virt_to_node_shift(); - for (; i <= end; i++) { - if (pg_data_table[i]) - pr_warn("overlap at %u for chunk %u\n", i, node); - pg_data_table[i] = pg_data_map + node; - } -#endif node_set_online(node); } @@ -89,22 +64,13 @@ void __init paging_init(void) * page_alloc get different views of the world. */ unsigned long end_mem = memory_end & PAGE_MASK; - unsigned long zones_size[MAX_NR_ZONES] = { 0, }; + unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, }; high_memory = (void *) end_mem; - empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); - if (!empty_zero_page) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, PAGE_SIZE, PAGE_SIZE); - - /* - * Set up SFC/DFC registers (user data space). - */ - set_fs (USER_DS); - - zones_size[ZONE_DMA] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT; - free_area_init(zones_size); + empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); + max_zone_pfn[ZONE_DMA] = end_mem >> PAGE_SHIFT; + free_area_init(max_zone_pfn); } #endif /* CONFIG_MMU */ @@ -125,27 +91,35 @@ void free_initmem(void) static inline void init_pointer_tables(void) { #if defined(CONFIG_MMU) && !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE) - int i; + int i, j; /* insert pointer tables allocated so far into the tablelist */ - init_pointer_table((unsigned long)kernel_pg_dir); + init_pointer_table(kernel_pg_dir, TABLE_PGD); for (i = 0; i < PTRS_PER_PGD; i++) { - pud_t *pud = (pud_t *)(&kernel_pg_dir[i]); + pud_t *pud = (pud_t *)&kernel_pg_dir[i]; + pmd_t *pmd_dir; - if (pud_present(*pud)) - init_pointer_table(pgd_page_vaddr(kernel_pg_dir[i])); - } + if (!pud_present(*pud)) + continue; - /* insert also pointer table that we used to unmap the zero page */ - if (zero_pgtable) - init_pointer_table((unsigned long)zero_pgtable); + pmd_dir = (pmd_t *)pgd_page_vaddr(kernel_pg_dir[i]); + init_pointer_table(pmd_dir, TABLE_PMD); + + for (j = 0; j < PTRS_PER_PMD; j++) { + pmd_t *pmd = &pmd_dir[j]; + pte_t *pte_dir; + + if (!pmd_present(*pmd)) + continue; + + pte_dir = (pte_t *)pmd_page_vaddr(*pmd); + init_pointer_table(pte_dir, TABLE_PTE); + } + } #endif } void __init mem_init(void) { - /* this will put all memory onto the freelists */ - memblock_free_all(); init_pointer_tables(); - mem_init_print_info(NULL); } diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c index 120030ad8dc4..7594a945732b 100644 --- a/arch/m68k/mm/kmap.c +++ b/arch/m68k/mm/kmap.c @@ -17,15 +17,12 @@ #include <linux/vmalloc.h> #include <asm/setup.h> -#include <asm/segment.h> #include <asm/page.h> -#include <asm/pgalloc.h> #include <asm/io.h> +#include <asm/tlbflush.h> #undef DEBUG -#define PTRTREESIZE (256*1024) - /* * For 040/060 we can use the virtual memory area like other architectures, * but for 020/030 we want to use early termination page descriptors and we @@ -50,7 +47,7 @@ static inline void free_io_area(void *addr) #else -#define IO_SIZE (256*1024) +#define IO_SIZE PMD_SIZE static struct vm_struct *iolist; @@ -81,14 +78,13 @@ static void __free_io_area(void *addr, unsigned long size) #if CONFIG_PGTABLE_LEVELS == 3 if (CPU_IS_020_OR_030) { - int pmd_off = (virtaddr/PTRTREESIZE) & 15; - int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK; + int pmd_type = pmd_val(*pmd_dir) & _DESCTYPE_MASK; if (pmd_type == _PAGE_PRESENT) { - pmd_dir->pmd[pmd_off] = 0; - virtaddr += PTRTREESIZE; - size -= PTRTREESIZE; - continue; + pmd_clear(pmd_dir); + virtaddr += PMD_SIZE; + size -= PMD_SIZE; + } else if (pmd_type == 0) continue; } @@ -183,6 +179,12 @@ void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cachefla return (void __iomem *)physaddr; } #endif +#ifdef CONFIG_VIRT + if (MACH_IS_VIRT) { + if (physaddr >= 0xff000000 && cacheflag == IOMAP_NOCACHE_SER) + return (void __iomem *)physaddr; + } +#endif #ifdef CONFIG_COLDFIRE if (__cf_internalio(physaddr)) return (void __iomem *) physaddr; @@ -249,7 +251,7 @@ void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cachefla while ((long)size > 0) { #ifdef DEBUG - if (!(virtaddr & (PTRTREESIZE-1))) + if (!(virtaddr & (PMD_SIZE-1))) printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr); #endif pgd_dir = pgd_offset_k(virtaddr); @@ -263,10 +265,10 @@ void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cachefla #if CONFIG_PGTABLE_LEVELS == 3 if (CPU_IS_020_OR_030) { - pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr; - physaddr += PTRTREESIZE; - virtaddr += PTRTREESIZE; - size -= PTRTREESIZE; + pmd_val(*pmd_dir) = physaddr; + physaddr += PMD_SIZE; + virtaddr += PMD_SIZE; + size -= PMD_SIZE; } else #endif { @@ -297,17 +299,20 @@ EXPORT_SYMBOL(__ioremap); void iounmap(void __iomem *addr) { #ifdef CONFIG_AMIGA - if ((!MACH_IS_AMIGA) || - (((unsigned long)addr < 0x40000000) || - ((unsigned long)addr > 0x60000000))) - free_io_area((__force void *)addr); -#else + if (MACH_IS_AMIGA && + ((unsigned long)addr >= 0x40000000) && + ((unsigned long)addr < 0x60000000)) + return; +#endif +#ifdef CONFIG_VIRT + if (MACH_IS_VIRT && (unsigned long)addr >= 0xff000000) + return; +#endif #ifdef CONFIG_COLDFIRE if (cf_internalio(addr)) return; #endif free_io_area((__force void *)addr); -#endif } EXPORT_SYMBOL(iounmap); @@ -367,13 +372,12 @@ void kernel_set_cachemode(void *addr, unsigned long size, int cmode) #if CONFIG_PGTABLE_LEVELS == 3 if (CPU_IS_020_OR_030) { - int pmd_off = (virtaddr/PTRTREESIZE) & 15; + unsigned long pmd = pmd_val(*pmd_dir); - if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) { - pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] & - _CACHEMASK040) | cmode; - virtaddr += PTRTREESIZE; - size -= PTRTREESIZE; + if ((pmd & _DESCTYPE_MASK) == _PAGE_PRESENT) { + *pmd_dir = __pmd((pmd & _CACHEMASK040) | cmode); + virtaddr += PMD_SIZE; + size -= PMD_SIZE; continue; } } diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c index 0ea375607767..19a75029036c 100644 --- a/arch/m68k/mm/mcfmmu.c +++ b/arch/m68k/mm/mcfmmu.c @@ -17,10 +17,10 @@ #include <asm/setup.h> #include <asm/page.h> -#include <asm/pgtable.h> #include <asm/mmu_context.h> #include <asm/mcf_pgalloc.h> #include <asm/tlbflush.h> +#include <asm/pgalloc.h> #define KMAPAREA(x) ((x >= VMALLOC_START) && (x < KMAP_END)) @@ -38,27 +38,19 @@ void __init paging_init(void) pgd_t *pg_dir; pte_t *pg_table; unsigned long address, size; - unsigned long next_pgtable, bootmem_end; - unsigned long zones_size[MAX_NR_ZONES]; - enum zone_type zone; + unsigned long next_pgtable; + unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 }; int i; - empty_zero_page = (void *) memblock_alloc(PAGE_SIZE, PAGE_SIZE); - if (!empty_zero_page) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, PAGE_SIZE, PAGE_SIZE); + empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); pg_dir = swapper_pg_dir; memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); size = num_pages * sizeof(pte_t); size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1); - next_pgtable = (unsigned long) memblock_alloc(size, PAGE_SIZE); - if (!next_pgtable) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, size, PAGE_SIZE); + next_pgtable = (unsigned long) memblock_alloc_or_panic(size, PAGE_SIZE); - bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK; pg_dir += PAGE_OFFSET >> PGDIR_SHIFT; address = PAGE_OFFSET; @@ -70,7 +62,8 @@ void __init paging_init(void) /* now change pg_table to kernel virtual addresses */ for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) { - pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT); + pte_t pte = pfn_pte(virt_to_pfn((void *)address), + PAGE_INIT); if (address >= (unsigned long) high_memory) pte_val(pte) = 0; @@ -80,11 +73,8 @@ void __init paging_init(void) } current->mm = NULL; - - for (zone = 0; zone < MAX_NR_ZONES; zone++) - zones_size[zone] = 0x0; - zones_size[ZONE_DMA] = num_pages; - free_area_init(zones_size); + max_zone_pfn[ZONE_DMA] = PFN_DOWN(_ramend); + free_area_init(max_zone_pfn); } int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word) @@ -95,7 +85,8 @@ int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word) p4d_t *p4d; pud_t *pud; pmd_t *pmd; - pte_t *pte; + pte_t *pte = NULL; + int ret = -1; int asid; local_irq_save(flags); @@ -104,47 +95,33 @@ int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word) regs->pc + (extension_word * sizeof(long)); mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm; - if (!mm) { - local_irq_restore(flags); - return -1; - } + if (!mm) + goto out; pgd = pgd_offset(mm, mmuar); - if (pgd_none(*pgd)) { - local_irq_restore(flags); - return -1; - } + if (pgd_none(*pgd)) + goto out; p4d = p4d_offset(pgd, mmuar); - if (p4d_none(*p4d)) { - local_irq_restore(flags); - return -1; - } + if (p4d_none(*p4d)) + goto out; pud = pud_offset(p4d, mmuar); - if (pud_none(*pud)) { - local_irq_restore(flags); - return -1; - } + if (pud_none(*pud)) + goto out; pmd = pmd_offset(pud, mmuar); - if (pmd_none(*pmd)) { - local_irq_restore(flags); - return -1; - } + if (pmd_none(*pmd)) + goto out; pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar) : pte_offset_map(pmd, mmuar); - if (pte_none(*pte) || !pte_present(*pte)) { - local_irq_restore(flags); - return -1; - } + if (!pte || pte_none(*pte) || !pte_present(*pte)) + goto out; if (write) { - if (!pte_write(*pte)) { - local_irq_restore(flags); - return -1; - } + if (!pte_write(*pte)) + goto out; set_pte(pte, pte_mkdirty(*pte)); } @@ -165,9 +142,12 @@ int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word) mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA); else mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA); - + ret = 0; +out: + if (pte && !KMAPAREA(mmuar)) + pte_unmap(pte); local_irq_restore(flags); - return 0; + return ret; } void __init cf_bootmem_alloc(void) @@ -178,7 +158,8 @@ void __init cf_bootmem_alloc(void) m68k_memory[0].addr = _rambase; m68k_memory[0].size = _ramend - _rambase; - memblock_add(m68k_memory[0].addr, m68k_memory[0].size); + memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0, + MEMBLOCK_NONE); /* compute total pages in system */ num_pages = PFN_DOWN(_ramend - _rambase); @@ -218,11 +199,6 @@ void __init cf_mmu_context_init(void) /* * Steal a context from a task that has one at the moment. - * This is only used on 8xx and 4xx and we presently assume that - * they don't do SMP. If they do then thicfpgalloc.hs will have to check - * whether the MM we steal is in use. - * We also assume that this is only used on systems that don't - * use an MMU hash table - this is true for 8xx and 4xx. * This isn't an LRU system, it just frees up each context in * turn (sort-of pseudo-random replacement :). This would be the * place to implement an LRU scheme if anyone was motivated to do it. @@ -242,3 +218,58 @@ void steal_context(void) destroy_context(mm); } +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_READABLE), + [VM_WRITE] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_WRITABLE), + [VM_WRITE | VM_READ] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_READABLE | + CF_PAGE_WRITABLE), + [VM_EXEC] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_EXEC), + [VM_EXEC | VM_READ] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_READABLE | + CF_PAGE_EXEC), + [VM_EXEC | VM_WRITE] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_WRITABLE | + CF_PAGE_EXEC), + [VM_EXEC | VM_WRITE | VM_READ] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_READABLE | + CF_PAGE_WRITABLE | + CF_PAGE_EXEC), + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_READABLE), + [VM_SHARED | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_WRITE | VM_READ] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_READABLE | + CF_PAGE_SHARED), + [VM_SHARED | VM_EXEC] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_EXEC), + [VM_SHARED | VM_EXEC | VM_READ] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_READABLE | + CF_PAGE_EXEC), + [VM_SHARED | VM_EXEC | VM_WRITE] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_SHARED | + CF_PAGE_EXEC), + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_READABLE | + CF_PAGE_SHARED | + CF_PAGE_EXEC) +}; +DECLARE_VM_GET_PAGE_PROT diff --git a/arch/m68k/mm/memory.c b/arch/m68k/mm/memory.c index 227c04fe60d2..c2c03b0a1567 100644 --- a/arch/m68k/mm/memory.c +++ b/arch/m68k/mm/memory.c @@ -15,116 +15,11 @@ #include <linux/gfp.h> #include <asm/setup.h> -#include <asm/segment.h> #include <asm/page.h> -#include <asm/pgalloc.h> #include <asm/traps.h> #include <asm/machdep.h> -/* ++andreas: {get,free}_pointer_table rewritten to use unused fields from - struct page instead of separately kmalloced struct. Stolen from - arch/sparc/mm/srmmu.c ... */ - -typedef struct list_head ptable_desc; -static LIST_HEAD(ptable_list); - -#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru)) -#define PD_PAGE(ptable) (list_entry(ptable, struct page, lru)) -#define PD_MARKBITS(dp) (*(unsigned char *)&PD_PAGE(dp)->index) - -#define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t)) - -void __init init_pointer_table(unsigned long ptable) -{ - ptable_desc *dp; - unsigned long page = ptable & PAGE_MASK; - unsigned char mask = 1 << ((ptable - page)/PTABLE_SIZE); - - dp = PD_PTABLE(page); - if (!(PD_MARKBITS(dp) & mask)) { - PD_MARKBITS(dp) = 0xff; - list_add(dp, &ptable_list); - } - - PD_MARKBITS(dp) &= ~mask; - pr_debug("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp)); - - /* unreserve the page so it's possible to free that page */ - __ClearPageReserved(PD_PAGE(dp)); - init_page_count(PD_PAGE(dp)); - - return; -} - -pmd_t *get_pointer_table (void) -{ - ptable_desc *dp = ptable_list.next; - unsigned char mask = PD_MARKBITS (dp); - unsigned char tmp; - unsigned int off; - - /* - * For a pointer table for a user process address space, a - * table is taken from a page allocated for the purpose. Each - * page can hold 8 pointer tables. The page is remapped in - * virtual address space to be noncacheable. - */ - if (mask == 0) { - void *page; - ptable_desc *new; - - if (!(page = (void *)get_zeroed_page(GFP_KERNEL))) - return NULL; - - flush_tlb_kernel_page(page); - nocache_page(page); - - new = PD_PTABLE(page); - PD_MARKBITS(new) = 0xfe; - list_add_tail(new, dp); - - return (pmd_t *)page; - } - - for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += PTABLE_SIZE) - ; - PD_MARKBITS(dp) = mask & ~tmp; - if (!PD_MARKBITS(dp)) { - /* move to end of list */ - list_move_tail(dp, &ptable_list); - } - return (pmd_t *) (page_address(PD_PAGE(dp)) + off); -} - -int free_pointer_table (pmd_t *ptable) -{ - ptable_desc *dp; - unsigned long page = (unsigned long)ptable & PAGE_MASK; - unsigned char mask = 1 << (((unsigned long)ptable - page)/PTABLE_SIZE); - - dp = PD_PTABLE(page); - if (PD_MARKBITS (dp) & mask) - panic ("table already free!"); - - PD_MARKBITS (dp) |= mask; - - if (PD_MARKBITS(dp) == 0xff) { - /* all tables in page are free, free page */ - list_del(dp); - cache_page((void *)page); - free_page (page); - return 1; - } else if (ptable_list.next != dp) { - /* - * move this descriptor to the front of the list, since - * it has one or more free tables. - */ - list_move(dp, &ptable_list); - } - return 0; -} - /* invalidate page in both caches */ static inline void clear040(unsigned long paddr) { diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index 4857985b8080..73651e093c4d 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c @@ -27,7 +27,6 @@ #include <asm/pgalloc.h> #include <asm/machdep.h> #include <asm/io.h> -#include <asm/dma.h> #ifdef CONFIG_ATARI #include <asm/atari_stram.h> #endif @@ -45,34 +44,219 @@ unsigned long mm_cachebits; EXPORT_SYMBOL(mm_cachebits); #endif +/* Prior to calling these routines, the page should have been flushed + * from both the cache and ATC, or the CPU might not notice that the + * cache setting for the page has been changed. -jskov + */ +static inline void nocache_page(void *vaddr) +{ + unsigned long addr = (unsigned long)vaddr; + + if (CPU_IS_040_OR_060) { + pte_t *ptep = virt_to_kpte(addr); + + *ptep = pte_mknocache(*ptep); + } +} + +static inline void cache_page(void *vaddr) +{ + unsigned long addr = (unsigned long)vaddr; + + if (CPU_IS_040_OR_060) { + pte_t *ptep = virt_to_kpte(addr); + + *ptep = pte_mkcache(*ptep); + } +} + +/* + * Motorola 680x0 user's manual recommends using uncached memory for address + * translation tables. + * + * Seeing how the MMU can be external on (some of) these chips, that seems like + * a very important recommendation to follow. Provide some helpers to combat + * 'variation' amongst the users of this. + */ + +void mmu_page_ctor(void *page) +{ + __flush_pages_to_ram(page, 1); + flush_tlb_kernel_page(page); + nocache_page(page); +} + +void mmu_page_dtor(void *page) +{ + cache_page(page); +} + +/* ++andreas: {get,free}_pointer_table rewritten to use unused fields from + struct page instead of separately kmalloced struct. Stolen from + arch/sparc/mm/srmmu.c ... */ + +typedef struct list_head ptable_desc; + +static struct list_head ptable_list[3] = { + LIST_HEAD_INIT(ptable_list[0]), + LIST_HEAD_INIT(ptable_list[1]), + LIST_HEAD_INIT(ptable_list[2]), +}; + +#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page((void *)(page))->lru)) +#define PD_PAGE(ptable) (list_entry(ptable, struct page, lru)) +#define PD_MARKBITS(dp) (*(unsigned int *)&PD_PAGE(dp)->index) + +static const int ptable_shift[3] = { + 7+2, /* PGD */ + 7+2, /* PMD */ + 6+2, /* PTE */ +}; + +#define ptable_size(type) (1U << ptable_shift[type]) +#define ptable_mask(type) ((1U << (PAGE_SIZE / ptable_size(type))) - 1) + +void __init init_pointer_table(void *table, int type) +{ + ptable_desc *dp; + unsigned long ptable = (unsigned long)table; + unsigned long page = ptable & PAGE_MASK; + unsigned int mask = 1U << ((ptable - page)/ptable_size(type)); + + dp = PD_PTABLE(page); + if (!(PD_MARKBITS(dp) & mask)) { + PD_MARKBITS(dp) = ptable_mask(type); + list_add(dp, &ptable_list[type]); + } + + PD_MARKBITS(dp) &= ~mask; + pr_debug("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp)); + + /* unreserve the page so it's possible to free that page */ + __ClearPageReserved(PD_PAGE(dp)); + init_page_count(PD_PAGE(dp)); + + return; +} + +void *get_pointer_table(int type) +{ + ptable_desc *dp = ptable_list[type].next; + unsigned int mask = list_empty(&ptable_list[type]) ? 0 : PD_MARKBITS(dp); + unsigned int tmp, off; + + /* + * For a pointer table for a user process address space, a + * table is taken from a page allocated for the purpose. Each + * page can hold 8 pointer tables. The page is remapped in + * virtual address space to be noncacheable. + */ + if (mask == 0) { + void *page; + ptable_desc *new; + + if (!(page = (void *)get_zeroed_page(GFP_KERNEL))) + return NULL; + + switch (type) { + case TABLE_PTE: + /* + * m68k doesn't have SPLIT_PTE_PTLOCKS for not having + * SMP. + */ + pagetable_pte_ctor(virt_to_ptdesc(page)); + break; + case TABLE_PMD: + pagetable_pmd_ctor(virt_to_ptdesc(page)); + break; + case TABLE_PGD: + pagetable_pgd_ctor(virt_to_ptdesc(page)); + break; + } + + mmu_page_ctor(page); + + new = PD_PTABLE(page); + PD_MARKBITS(new) = ptable_mask(type) - 1; + list_add_tail(new, dp); + + return (pmd_t *)page; + } + + for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += ptable_size(type)) + ; + PD_MARKBITS(dp) = mask & ~tmp; + if (!PD_MARKBITS(dp)) { + /* move to end of list */ + list_move_tail(dp, &ptable_list[type]); + } + return page_address(PD_PAGE(dp)) + off; +} + +int free_pointer_table(void *table, int type) +{ + ptable_desc *dp; + unsigned long ptable = (unsigned long)table; + unsigned long page = ptable & PAGE_MASK; + unsigned int mask = 1U << ((ptable - page)/ptable_size(type)); + + dp = PD_PTABLE(page); + if (PD_MARKBITS (dp) & mask) + panic ("table already free!"); + + PD_MARKBITS (dp) |= mask; + + if (PD_MARKBITS(dp) == ptable_mask(type)) { + /* all tables in page are free, free page */ + list_del(dp); + mmu_page_dtor((void *)page); + pagetable_dtor(virt_to_ptdesc((void *)page)); + free_page (page); + return 1; + } else if (ptable_list[type].next != dp) { + /* + * move this descriptor to the front of the list, since + * it has one or more free tables. + */ + list_move(dp, &ptable_list[type]); + } + return 0; +} + /* size of memory already mapped in head.S */ extern __initdata unsigned long m68k_init_mapped_size; extern unsigned long availmem; +static pte_t *last_pte_table __initdata = NULL; + static pte_t * __init kernel_page_table(void) { - pte_t *ptablep; + pte_t *pte_table = last_pte_table; - ptablep = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); - if (!ptablep) - panic("%s: Failed to allocate %lu bytes align=%lx\n", - __func__, PAGE_SIZE, PAGE_SIZE); + if (PAGE_ALIGNED(last_pte_table)) { + pte_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); + if (!pte_table) { + panic("%s: Failed to allocate %lu bytes align=%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE); + } - clear_page(ptablep); - __flush_page_to_ram(ptablep); - flush_tlb_kernel_page(ptablep); - nocache_page(ptablep); + clear_page(pte_table); + mmu_page_ctor(pte_table); - return ptablep; + last_pte_table = pte_table; + } + + last_pte_table += PTRS_PER_PTE; + + return pte_table; } -static pmd_t *last_pgtable __initdata = NULL; -pmd_t *zero_pgtable __initdata = NULL; +static pmd_t *last_pmd_table __initdata = NULL; static pmd_t * __init kernel_ptr_table(void) { - if (!last_pgtable) { + if (!last_pmd_table) { unsigned long pmd, last; int i; @@ -91,33 +275,28 @@ static pmd_t * __init kernel_ptr_table(void) last = pmd; } - last_pgtable = (pmd_t *)last; + last_pmd_table = (pmd_t *)last; #ifdef DEBUG - printk("kernel_ptr_init: %p\n", last_pgtable); + printk("kernel_ptr_init: %p\n", last_pmd_table); #endif } - last_pgtable += PTRS_PER_PMD; - if (((unsigned long)last_pgtable & ~PAGE_MASK) == 0) { - last_pgtable = (pmd_t *)memblock_alloc_low(PAGE_SIZE, - PAGE_SIZE); - if (!last_pgtable) + last_pmd_table += PTRS_PER_PMD; + if (PAGE_ALIGNED(last_pmd_table)) { + last_pmd_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); + if (!last_pmd_table) panic("%s: Failed to allocate %lu bytes align=%lx\n", __func__, PAGE_SIZE, PAGE_SIZE); - clear_page(last_pgtable); - __flush_page_to_ram(last_pgtable); - flush_tlb_kernel_page(last_pgtable); - nocache_page(last_pgtable); + clear_page(last_pmd_table); + mmu_page_ctor(last_pmd_table); } - return last_pgtable; + return last_pmd_table; } static void __init map_node(int node) { -#define PTRTREESIZE (256*1024) -#define ROOTTREESIZE (32*1024*1024) unsigned long physaddr, virtaddr, size; pgd_t *pgd_dir; p4d_t *p4d_dir; @@ -135,21 +314,21 @@ static void __init map_node(int node) while (size > 0) { #ifdef DEBUG - if (!(virtaddr & (PTRTREESIZE-1))) + if (!(virtaddr & (PMD_SIZE-1))) printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK, virtaddr); #endif pgd_dir = pgd_offset_k(virtaddr); if (virtaddr && CPU_IS_020_OR_030) { - if (!(virtaddr & (ROOTTREESIZE-1)) && - size >= ROOTTREESIZE) { + if (!(virtaddr & (PGDIR_SIZE-1)) && + size >= PGDIR_SIZE) { #ifdef DEBUG printk ("[very early term]"); #endif pgd_val(*pgd_dir) = physaddr; - size -= ROOTTREESIZE; - virtaddr += ROOTTREESIZE; - physaddr += ROOTTREESIZE; + size -= PGDIR_SIZE; + virtaddr += PGDIR_SIZE; + physaddr += PGDIR_SIZE; continue; } } @@ -169,24 +348,23 @@ static void __init map_node(int node) #ifdef DEBUG printk ("[early term]"); #endif - pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr; - physaddr += PTRTREESIZE; + pmd_val(*pmd_dir) = physaddr; + physaddr += PMD_SIZE; } else { int i; #ifdef DEBUG printk ("[zero map]"); #endif - zero_pgtable = kernel_ptr_table(); - pte_dir = (pte_t *)zero_pgtable; - pmd_dir->pmd[0] = virt_to_phys(pte_dir) | - _PAGE_TABLE | _PAGE_ACCESSED; + pte_dir = kernel_page_table(); + pmd_set(pmd_dir, pte_dir); + pte_val(*pte_dir++) = 0; physaddr += PAGE_SIZE; - for (i = 1; i < 64; physaddr += PAGE_SIZE, i++) + for (i = 1; i < PTRS_PER_PTE; physaddr += PAGE_SIZE, i++) pte_val(*pte_dir++) = physaddr; } - size -= PTRTREESIZE; - virtaddr += PTRTREESIZE; + size -= PMD_SIZE; + virtaddr += PMD_SIZE; } else { if (!pmd_present(*pmd_dir)) { #ifdef DEBUG @@ -214,12 +392,41 @@ static void __init map_node(int node) } /* + * Alternate definitions that are compile time constants, for + * initializing protection_map. The cachebits are fixed later. + */ +#define PAGE_NONE_C __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) +#define PAGE_SHARED_C __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) +#define PAGE_COPY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED) +#define PAGE_READONLY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED) + +static pgprot_t protection_map[16] __ro_after_init = { + [VM_NONE] = PAGE_NONE_C, + [VM_READ] = PAGE_READONLY_C, + [VM_WRITE] = PAGE_COPY_C, + [VM_WRITE | VM_READ] = PAGE_COPY_C, + [VM_EXEC] = PAGE_READONLY_C, + [VM_EXEC | VM_READ] = PAGE_READONLY_C, + [VM_EXEC | VM_WRITE] = PAGE_COPY_C, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_C, + [VM_SHARED] = PAGE_NONE_C, + [VM_SHARED | VM_READ] = PAGE_READONLY_C, + [VM_SHARED | VM_WRITE] = PAGE_SHARED_C, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED_C, + [VM_SHARED | VM_EXEC] = PAGE_READONLY_C, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_C, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_C, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_C +}; +DECLARE_VM_GET_PAGE_PROT + +/* * paging_init() continues the virtual memory environment setup which * was begun by the code in arch/head.S. */ void __init paging_init(void) { - unsigned long zones_size[MAX_NR_ZONES] = { 0, }; + unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, }; unsigned long min_addr, max_addr; unsigned long addr; int i; @@ -239,8 +446,9 @@ void __init paging_init(void) } min_addr = m68k_memory[0].addr; - max_addr = min_addr + m68k_memory[0].size; - memblock_add(m68k_memory[0].addr, m68k_memory[0].size); + max_addr = min_addr + m68k_memory[0].size - 1; + memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0, + MEMBLOCK_NONE); for (i = 1; i < m68k_num_memory;) { if (m68k_memory[i].addr < min_addr) { printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n", @@ -251,22 +459,23 @@ void __init paging_init(void) (m68k_num_memory - i) * sizeof(struct m68k_mem_info)); continue; } - memblock_add(m68k_memory[i].addr, m68k_memory[i].size); - addr = m68k_memory[i].addr + m68k_memory[i].size; + memblock_add_node(m68k_memory[i].addr, m68k_memory[i].size, i, + MEMBLOCK_NONE); + addr = m68k_memory[i].addr + m68k_memory[i].size - 1; if (addr > max_addr) max_addr = addr; i++; } m68k_memoffset = min_addr - PAGE_OFFSET; - m68k_virt_to_node_shift = fls(max_addr - min_addr - 1) - 6; + m68k_virt_to_node_shift = fls(max_addr - min_addr) - 6; module_fixup(NULL, __start_fixup, __stop_fixup); flush_icache(); - high_memory = phys_to_virt(max_addr); + high_memory = phys_to_virt(max_addr) + 1; min_low_pfn = availmem >> PAGE_SHIFT; - max_pfn = max_low_pfn = max_addr >> PAGE_SHIFT; + max_pfn = max_low_pfn = (max_addr >> PAGE_SHIFT) + 1; /* Reserve kernel text/data/bss and the memory allocated in head.S */ memblock_reserve(m68k_memory[0].addr, availmem - m68k_memory[0].addr); @@ -285,28 +494,26 @@ void __init paging_init(void) flush_tlb_all(); + early_memtest(min_addr, max_addr); + /* * initialize the bad page table and bad page to point * to a couple of allocated pages */ - empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); - if (!empty_zero_page) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, PAGE_SIZE, PAGE_SIZE); + empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); /* * Set up SFC/DFC registers */ - set_fs(KERNEL_DS); + set_fc(USER_DATA); #ifdef DEBUG printk ("before free_area_init\n"); #endif - for (i = 0; i < m68k_num_memory; i++) { - zones_size[ZONE_DMA] = m68k_memory[i].size >> PAGE_SHIFT; - free_area_init_node(i, zones_size, - m68k_memory[i].addr >> PAGE_SHIFT, NULL); + for (i = 0; i < m68k_num_memory; i++) if (node_present_pages(i)) node_set_state(i, N_NORMAL_MEMORY); - } + + max_zone_pfn[ZONE_DMA] = memblock_end_of_DRAM(); + free_area_init(max_zone_pfn); } diff --git a/arch/m68k/mm/sun3kmap.c b/arch/m68k/mm/sun3kmap.c index ae03555449b8..ac091892d82f 100644 --- a/arch/m68k/mm/sun3kmap.c +++ b/arch/m68k/mm/sun3kmap.c @@ -15,15 +15,12 @@ #include <linux/vmalloc.h> #include <asm/page.h> -#include <asm/pgtable.h> #include <asm/io.h> #include <asm/sun3mmu.h> -#undef SUN3_KMAP_DEBUG +#include "../sun3/sun3.h" -#ifdef SUN3_KMAP_DEBUG -extern void print_pte_vaddr(unsigned long vaddr); -#endif +#undef SUN3_KMAP_DEBUG extern void mmu_emu_map_pmeg (int context, int vaddr); diff --git a/arch/m68k/mm/sun3mmu.c b/arch/m68k/mm/sun3mmu.c index eca1c46bb90a..1ecf6bdd08bf 100644 --- a/arch/m68k/mm/sun3mmu.c +++ b/arch/m68k/mm/sun3mmu.c @@ -21,11 +21,10 @@ #include <asm/setup.h> #include <linux/uaccess.h> #include <asm/page.h> -#include <asm/pgtable.h> #include <asm/machdep.h> #include <asm/io.h> -extern void mmu_emu_init (unsigned long bootmem_end); +#include "../sun3/sun3.h" const char bad_pmd_string[] = "Bad pmd in pte_alloc: %08lx\n"; @@ -42,13 +41,10 @@ void __init paging_init(void) unsigned long address; unsigned long next_pgtable; unsigned long bootmem_end; - unsigned long zones_size[MAX_NR_ZONES] = { 0, }; + unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, }; unsigned long size; - empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); - if (!empty_zero_page) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, PAGE_SIZE, PAGE_SIZE); + empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); address = PAGE_OFFSET; pg_dir = swapper_pg_dir; @@ -58,10 +54,7 @@ void __init paging_init(void) size = num_pages * sizeof(pte_t); size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1); - next_pgtable = (unsigned long)memblock_alloc(size, PAGE_SIZE); - if (!next_pgtable) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, size, PAGE_SIZE); + next_pgtable = (unsigned long)memblock_alloc_or_panic(size, PAGE_SIZE); bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK; /* Map whole memory from PAGE_OFFSET (0x0E000000) */ @@ -76,7 +69,7 @@ void __init paging_init(void) /* now change pg_table to kernel virtual addresses */ pg_table = (pte_t *) __va ((unsigned long) pg_table); for (i=0; i<PTRS_PER_PTE; ++i, ++pg_table) { - pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT); + pte_t pte = pfn_pte(virt_to_pfn((void *)address), PAGE_INIT); if (address >= (unsigned long)high_memory) pte_val (pte) = 0; set_pte (pg_table, pte); @@ -89,14 +82,30 @@ void __init paging_init(void) current->mm = NULL; /* memory sizing is a hack stolen from motorola.c.. hope it works for us */ - zones_size[ZONE_DMA] = ((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT; + max_zone_pfn[ZONE_DMA] = ((unsigned long)high_memory) >> PAGE_SHIFT; /* I really wish I knew why the following change made things better... -- Sam */ -/* free_area_init(zones_size); */ - free_area_init_node(0, zones_size, - (__pa(PAGE_OFFSET) >> PAGE_SHIFT) + 1, NULL); + free_area_init(max_zone_pfn); } - +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READONLY, + [VM_WRITE] = PAGE_COPY, + [VM_WRITE | VM_READ] = PAGE_COPY, + [VM_EXEC] = PAGE_READONLY, + [VM_EXEC | VM_READ] = PAGE_READONLY, + [VM_EXEC | VM_WRITE] = PAGE_COPY, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READONLY, + [VM_SHARED | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, + [VM_SHARED | VM_EXEC] = PAGE_READONLY, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED +}; +DECLARE_VM_GET_PAGE_PROT |