diff options
Diffstat (limited to 'arch/microblaze/mm')
-rw-r--r-- | arch/microblaze/mm/Makefile | 5 | ||||
-rw-r--r-- | arch/microblaze/mm/consistent.c | 38 | ||||
-rw-r--r-- | arch/microblaze/mm/fault.c | 63 | ||||
-rw-r--r-- | arch/microblaze/mm/highmem.c | 89 | ||||
-rw-r--r-- | arch/microblaze/mm/init.c | 151 | ||||
-rw-r--r-- | arch/microblaze/mm/pgtable.c | 22 |
6 files changed, 78 insertions, 290 deletions
diff --git a/arch/microblaze/mm/Makefile b/arch/microblaze/mm/Makefile index 1b16875cea70..75edfc110d3e 100644 --- a/arch/microblaze/mm/Makefile +++ b/arch/microblaze/mm/Makefile @@ -3,7 +3,4 @@ # Makefile # -obj-y := consistent.o init.o - -obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o -obj-$(CONFIG_HIGHMEM) += highmem.o +obj-y := consistent.o init.o pgtable.o mmu_context.o fault.o diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c index 8c5f0c332d8b..b7ad4a98636d 100644 --- a/arch/microblaze/mm/consistent.c +++ b/arch/microblaze/mm/consistent.c @@ -11,7 +11,7 @@ #include <linux/types.h> #include <linux/mm.h> #include <linux/init.h> -#include <linux/dma-noncoherent.h> +#include <linux/dma-map-ops.h> #include <asm/cpuinfo.h> #include <asm/cacheflush.h> @@ -21,39 +21,3 @@ void arch_dma_prep_coherent(struct page *page, size_t size) flush_dcache_range(paddr, paddr + size); } - -#ifndef CONFIG_MMU -/* - * Consistent memory allocators. Used for DMA devices that want to share - * uncached memory with the processor core. My crufty no-MMU approach is - * simple. In the HW platform we can optionally mirror the DDR up above the - * processor cacheable region. So, memory accessed in this mirror region will - * not be cached. It's alloced from the same pool as normal memory, but the - * handle we return is shifted up into the uncached region. This will no doubt - * cause big problems if memory allocated here is not also freed properly. -- JW - * - * I have to use dcache values because I can't relate on ram size: - */ -#ifdef CONFIG_XILINX_UNCACHED_SHADOW -#define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1) -#else -#define UNCACHED_SHADOW_MASK 0 -#endif /* CONFIG_XILINX_UNCACHED_SHADOW */ - -void *uncached_kernel_address(void *ptr) -{ - unsigned long addr = (unsigned long)ptr; - - addr |= UNCACHED_SHADOW_MASK; - if (addr > cpuinfo.dcache_base && addr < cpuinfo.dcache_high) - pr_warn("ERROR: Your cache coherent area is CACHED!!!\n"); - return (void *)addr; -} - -void *cached_kernel_address(void *ptr) -{ - unsigned long addr = (unsigned long)ptr; - - return (void *)(addr & ~UNCACHED_SHADOW_MASK); -} -#endif /* CONFIG_MMU */ diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index e6a810b0c7ad..d3c3c33b73a6 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c @@ -28,9 +28,9 @@ #include <linux/mman.h> #include <linux/mm.h> #include <linux/interrupt.h> +#include <linux/perf_event.h> #include <asm/page.h> -#include <asm/pgtable.h> #include <asm/mmu.h> #include <linux/mmu_context.h> #include <linux/uaccess.h> @@ -91,7 +91,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, int code = SEGV_MAPERR; int is_write = error_code & ESR_S; vm_fault_t fault; - unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; + unsigned int flags = FAULT_FLAG_DEFAULT; regs->ear = address; regs->esr = error_code; @@ -122,10 +122,12 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, if (user_mode(regs)) flags |= FAULT_FLAG_USER; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); + /* When running in the kernel we expect faults to occur only to * addresses in user space. All other faults represent errors in the * kernel and should generate an OOPS. Unfortunately, in the case of an - * erroneous fault occurring in a code path which already holds mmap_sem + * erroneous fault occurring in a code path which already holds mmap_lock * we will deadlock attempting to validate the fault against the * address space. Luckily the kernel only validly references user * space from well defined areas of code, which are listed in the @@ -137,12 +139,12 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, * source. If this is invalid we can skip the address space check, * thus avoiding the deadlock. */ - if (unlikely(!down_read_trylock(&mm->mmap_sem))) { + if (unlikely(!mmap_read_trylock(mm))) { if (kernel_mode(regs) && !search_exception_tables(regs->pc)) goto bad_area_nosemaphore; retry: - down_read(&mm->mmap_sem); + mmap_read_lock(mm); } vma = find_vma(mm, address); @@ -190,8 +192,9 @@ retry: && (kernel_mode(regs) || !store_updates_sp(regs))) goto bad_area; } - if (expand_stack(vma, address)) - goto bad_area; + vma = expand_stack(mm, address); + if (!vma) + goto bad_area_nosemaphore; good_area: code = SEGV_ACCERR; @@ -215,9 +218,16 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, regs); + + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + bad_page_fault(regs, address, SIGBUS); + return; + } - if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) return; if (unlikely(fault & VM_FAULT_ERROR)) { @@ -230,26 +240,19 @@ good_area: BUG(); } - if (flags & FAULT_FLAG_ALLOW_RETRY) { - if (unlikely(fault & VM_FAULT_MAJOR)) - current->maj_flt++; - else - current->min_flt++; - if (fault & VM_FAULT_RETRY) { - flags &= ~FAULT_FLAG_ALLOW_RETRY; - flags |= FAULT_FLAG_TRIED; - - /* - * No need to up_read(&mm->mmap_sem) as we would - * have already released it in __lock_page_or_retry - * in mm/filemap.c. - */ - - goto retry; - } + if (fault & VM_FAULT_RETRY) { + flags |= FAULT_FLAG_TRIED; + + /* + * No need to mmap_read_unlock(mm) as we would + * have already released it in __lock_page_or_retry + * in mm/filemap.c. + */ + + goto retry; } - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); /* * keep track of tlb+htab misses that are good addrs but @@ -260,7 +263,7 @@ good_area: return; bad_area: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); bad_area_nosemaphore: pte_errors++; @@ -279,7 +282,7 @@ bad_area_nosemaphore: * us unable to handle the page fault gracefully. */ out_of_memory: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); if (!user_mode(regs)) bad_page_fault(regs, address, SIGKILL); else @@ -287,7 +290,7 @@ out_of_memory: return; do_sigbus: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); if (user_mode(regs)) { force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address); return; diff --git a/arch/microblaze/mm/highmem.c b/arch/microblaze/mm/highmem.c deleted file mode 100644 index d7569f77fa15..000000000000 --- a/arch/microblaze/mm/highmem.c +++ /dev/null @@ -1,89 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * highmem.c: virtual kernel memory mappings for high memory - * - * PowerPC version, stolen from the i386 version. - * - * Used in CONFIG_HIGHMEM systems for memory pages which - * are not addressable by direct kernel virtual addresses. - * - * Copyright (C) 1999 Gerhard Wichert, Siemens AG - * Gerhard.Wichert@pdb.siemens.de - * - * - * Redesigned the x86 32-bit VM architecture to deal with - * up to 16 Terrabyte physical memory. With current x86 CPUs - * we now support up to 64 Gigabytes physical RAM. - * - * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> - * - * Reworked for PowerPC by various contributors. Moved from - * highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp. - */ - -#include <linux/export.h> -#include <linux/highmem.h> - -/* - * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap - * gives a more generic (and caching) interface. But kmap_atomic can - * be used in IRQ contexts, so in some (very limited) cases we need - * it. - */ -#include <asm/tlbflush.h> - -void *kmap_atomic_prot(struct page *page, pgprot_t prot) -{ - - unsigned long vaddr; - int idx, type; - - preempt_disable(); - pagefault_disable(); - if (!PageHighMem(page)) - return page_address(page); - - - type = kmap_atomic_idx_push(); - idx = type + KM_TYPE_NR*smp_processor_id(); - vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); -#ifdef CONFIG_DEBUG_HIGHMEM - BUG_ON(!pte_none(*(kmap_pte-idx))); -#endif - set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot)); - local_flush_tlb_page(NULL, vaddr); - - return (void *) vaddr; -} -EXPORT_SYMBOL(kmap_atomic_prot); - -void __kunmap_atomic(void *kvaddr) -{ - unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; - int type; - unsigned int idx; - - if (vaddr < __fix_to_virt(FIX_KMAP_END)) { - pagefault_enable(); - preempt_enable(); - return; - } - - type = kmap_atomic_idx(); - - idx = type + KM_TYPE_NR * smp_processor_id(); -#ifdef CONFIG_DEBUG_HIGHMEM - BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); -#endif - /* - * force other mappings to Oops if they'll try to access - * this pte without first remap it - */ - pte_clear(&init_mm, vaddr, kmap_pte-idx); - local_flush_tlb_page(NULL, vaddr); - - kmap_atomic_idx_pop(); - pagefault_enable(); - preempt_enable(); -} -EXPORT_SYMBOL(__kunmap_atomic); diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 050fc621c920..3827dc76edd8 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -7,11 +7,13 @@ * for more details. */ +#include <linux/dma-map-ops.h> #include <linux/memblock.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/mm.h> /* mem_init */ #include <linux/initrd.h> +#include <linux/of_fdt.h> #include <linux/pagemap.h> #include <linux/pfn.h> #include <linux/slab.h> @@ -28,11 +30,6 @@ /* Use for MMU and noMMU because of PCI generic code */ int mem_init_done; -#ifndef CONFIG_MMU -unsigned int __page_offset; -EXPORT_SYMBOL(__page_offset); -#endif /* CONFIG_MMU */ - char *klimit = _end; /* @@ -45,32 +42,18 @@ unsigned long memory_size; EXPORT_SYMBOL(memory_size); unsigned long lowmem_size; -#ifdef CONFIG_HIGHMEM -pte_t *kmap_pte; -EXPORT_SYMBOL(kmap_pte); -pgprot_t kmap_prot; -EXPORT_SYMBOL(kmap_prot); - -static inline pte_t *virt_to_kpte(unsigned long vaddr) -{ - pgd_t *pgd = pgd_offset_k(vaddr); - p4d_t *p4d = p4d_offset(pgd, vaddr); - pud_t *pud = pud_offset(p4d, vaddr); - - return pte_offset_kernel(pmd_offset(pud, vaddr), vaddr); -} +EXPORT_SYMBOL(min_low_pfn); +EXPORT_SYMBOL(max_low_pfn); +#ifdef CONFIG_HIGHMEM static void __init highmem_init(void) { pr_debug("%x\n", (u32)PKMAP_BASE); map_page(PKMAP_BASE, 0, 0); /* XXX gross */ pkmap_page_table = virt_to_kpte(PKMAP_BASE); - - kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); - kmap_prot = PAGE_KERNEL; } -static void highmem_setup(void) +static void __meminit highmem_setup(void) { unsigned long pfn; @@ -90,13 +73,11 @@ static void highmem_setup(void) static void __init paging_init(void) { unsigned long zones_size[MAX_NR_ZONES]; -#ifdef CONFIG_MMU int idx; /* Setup fixmaps */ for (idx = 0; idx < __end_of_fixed_addresses; idx++) clear_fixmap(idx); -#endif /* Clean every zones */ memset(zones_size, 0, sizeof(zones_size)); @@ -111,45 +92,11 @@ static void __init paging_init(void) #endif /* We don't have holes in memory map */ - free_area_init_nodes(zones_size); + free_area_init(zones_size); } void __init setup_memory(void) { - struct memblock_region *reg; - -#ifndef CONFIG_MMU - u32 kernel_align_start, kernel_align_size; - - /* Find main memory where is the kernel */ - for_each_memblock(memory, reg) { - memory_start = (u32)reg->base; - lowmem_size = reg->size; - if ((memory_start <= (u32)_text) && - ((u32)_text <= (memory_start + lowmem_size - 1))) { - memory_size = lowmem_size; - PAGE_OFFSET = memory_start; - pr_info("%s: Main mem: 0x%x, size 0x%08x\n", - __func__, (u32) memory_start, - (u32) memory_size); - break; - } - } - - if (!memory_start || !memory_size) { - panic("%s: Missing memory setting 0x%08x, size=0x%08x\n", - __func__, (u32) memory_start, (u32) memory_size); - } - - /* reservation of region where is the kernel */ - kernel_align_start = PAGE_DOWN((u32)_text); - /* ALIGN can be remove because _end in vmlinux.lds.S is align */ - kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start; - pr_info("%s: kernel addr:0x%08x-0x%08x size=0x%08x\n", - __func__, kernel_align_start, kernel_align_start - + kernel_align_size, kernel_align_size); - memblock_reserve(kernel_align_start, kernel_align_size); -#endif /* * Kernel: * start: base phys address of kernel - page align @@ -172,20 +119,6 @@ void __init setup_memory(void) pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn); pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn); - /* Add active regions with valid PFNs */ - for_each_memblock(memory, reg) { - unsigned long start_pfn, end_pfn; - - start_pfn = memblock_region_memory_base_pfn(reg); - end_pfn = memblock_region_memory_end_pfn(reg); - memblock_set_node(start_pfn << PAGE_SHIFT, - (end_pfn - start_pfn) << PAGE_SHIFT, - &memblock.memory, 0); - } - - /* XXX need to clip this if using highmem? */ - sparse_memory_present_with_active_regions(0); - paging_init(); } @@ -199,28 +132,9 @@ void __init mem_init(void) highmem_setup(); #endif - mem_init_print_info(NULL); -#ifdef CONFIG_MMU - pr_info("Kernel virtual memory layout:\n"); - pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP); -#ifdef CONFIG_HIGHMEM - pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n", - PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP)); -#endif /* CONFIG_HIGHMEM */ - pr_info(" * 0x%08lx..0x%08lx : early ioremap\n", - ioremap_bot, ioremap_base); - pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n", - (unsigned long)VMALLOC_START, VMALLOC_END); -#endif mem_init_done = 1; } -#ifndef CONFIG_MMU -int page_is_ram(unsigned long pfn) -{ - return __range_ok(pfn, 0); -} -#else int page_is_ram(unsigned long pfn) { return pfn < max_low_pfn; @@ -345,34 +259,33 @@ asmlinkage void __init mmu_init(void) /* This will also cause that unflatten device tree will be allocated * inside 768MB limit */ memblock_set_current_limit(memory_start + lowmem_size - 1); -} -/* This is only called until mem_init is done. */ -void __init *early_get_page(void) -{ - /* - * Mem start + kernel_tlb -> here is limit - * because of mem mapping from head.S - */ - return memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE, - MEMBLOCK_LOW_LIMIT, memory_start + kernel_tlb, - NUMA_NO_NODE); -} + parse_early_param(); -#endif /* CONFIG_MMU */ + early_init_fdt_scan_reserved_mem(); -void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask) -{ - void *p; - - if (mem_init_done) { - p = kzalloc(size, mask); - } else { - p = memblock_alloc(size, SMP_CACHE_BYTES); - if (!p) - panic("%s: Failed to allocate %zu bytes\n", - __func__, size); - } + /* CMA initialization */ + dma_contiguous_reserve(memory_start + lowmem_size - 1); - return p; + memblock_dump_all(); } + +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READONLY_X, + [VM_WRITE] = PAGE_COPY, + [VM_WRITE | VM_READ] = PAGE_COPY_X, + [VM_EXEC] = PAGE_READONLY, + [VM_EXEC | VM_READ] = PAGE_READONLY_X, + [VM_EXEC | VM_WRITE] = PAGE_COPY, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_X, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READONLY_X, + [VM_SHARED | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED_X, + [VM_SHARED | VM_EXEC] = PAGE_READONLY, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_X, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_X +}; +DECLARE_VM_GET_PAGE_PROT diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c index 68c26cacd930..9f73265aad4e 100644 --- a/arch/microblaze/mm/pgtable.c +++ b/arch/microblaze/mm/pgtable.c @@ -32,8 +32,10 @@ #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/mm_types.h> +#include <linux/pgtable.h> +#include <linux/memblock.h> +#include <linux/kallsyms.h> -#include <asm/pgtable.h> #include <asm/pgalloc.h> #include <linux/io.h> #include <asm/mmu.h> @@ -170,7 +172,7 @@ void __init mapin_ram(void) for (s = 0; s < lowmem_size; s += PAGE_SIZE) { f = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_SHARED | _PAGE_HWEXEC; - if ((char *) v < _stext || (char *) v >= _etext) + if (!is_kernel_text(v)) f |= _PAGE_WRENABLE; else /* On the MicroBlaze, no user access @@ -242,15 +244,13 @@ unsigned long iopa(unsigned long addr) __ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm) { - pte_t *pte; - if (mem_init_done) { - pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); - } else { - pte = (pte_t *)early_get_page(); - if (pte) - clear_page(pte); - } - return pte; + if (mem_init_done) + return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); + else + return memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE, + MEMBLOCK_LOW_LIMIT, + memory_start + kernel_tlb, + NUMA_NO_NODE); } void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) |