diff options
Diffstat (limited to 'arch/csky/mm')
| -rw-r--r-- | arch/csky/mm/asid.c | 5 | ||||
| -rw-r--r-- | arch/csky/mm/dma-mapping.c | 1 | ||||
| -rw-r--r-- | arch/csky/mm/fault.c | 30 | ||||
| -rw-r--r-- | arch/csky/mm/init.c | 87 |
4 files changed, 33 insertions, 90 deletions
diff --git a/arch/csky/mm/asid.c b/arch/csky/mm/asid.c index b2e914745c1d..7fb6c417bbac 100644 --- a/arch/csky/mm/asid.c +++ b/arch/csky/mm/asid.c @@ -27,7 +27,7 @@ static void flush_context(struct asid_info *info) u64 asid; /* Update the list of reserved ASIDs and the ASID bitmap. */ - bitmap_clear(info->map, 0, NUM_CTXT_ASIDS(info)); + bitmap_zero(info->map, NUM_CTXT_ASIDS(info)); for_each_possible_cpu(i) { asid = atomic64_xchg_relaxed(&active_asid(info, i), 0); @@ -178,8 +178,7 @@ int asid_allocator_init(struct asid_info *info, */ WARN_ON(NUM_CTXT_ASIDS(info) - 1 <= num_possible_cpus()); atomic64_set(&info->generation, ASID_FIRST_VERSION(info)); - info->map = kcalloc(BITS_TO_LONGS(NUM_CTXT_ASIDS(info)), - sizeof(*info->map), GFP_KERNEL); + info->map = bitmap_zalloc(NUM_CTXT_ASIDS(info), GFP_KERNEL); if (!info->map) return -ENOMEM; diff --git a/arch/csky/mm/dma-mapping.c b/arch/csky/mm/dma-mapping.c index c3a775a7e8f9..82447029feb4 100644 --- a/arch/csky/mm/dma-mapping.c +++ b/arch/csky/mm/dma-mapping.c @@ -9,7 +9,6 @@ #include <linux/mm.h> #include <linux/scatterlist.h> #include <linux/types.h> -#include <linux/version.h> #include <asm/cache.h> static inline void cache_op(phys_addr_t paddr, size_t size, diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c index 466ad949818a..a6ca7dff4215 100644 --- a/arch/csky/mm/fault.c +++ b/arch/csky/mm/fault.c @@ -67,7 +67,7 @@ static inline void no_context(struct pt_regs *regs, unsigned long addr) pr_alert("Unable to handle kernel paging request at virtual " "addr 0x%08lx, pc: 0x%08lx\n", addr, regs->pc); die(regs, "Oops"); - do_exit(SIGKILL); + make_task_dead(SIGKILL); } static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault) @@ -97,13 +97,12 @@ static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_f BUG(); } -static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr) +static inline void bad_area_nosemaphore(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr) { /* * Something tried to access memory that isn't in our memory map. * Fix it, but check if it's kernel or user first. */ - mmap_read_unlock(mm); /* User mode accesses just cause a SIGSEGV */ if (user_mode(regs)) { do_trap(regs, SIGSEGV, code, addr); @@ -238,20 +237,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs) if (is_write(regs)) flags |= FAULT_FLAG_WRITE; retry: - mmap_read_lock(mm); - vma = find_vma(mm, addr); + vma = lock_mm_and_find_vma(mm, addr, regs); if (unlikely(!vma)) { - bad_area(regs, mm, code, addr); - return; - } - if (likely(vma->vm_start <= addr)) - goto good_area; - if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { - bad_area(regs, mm, code, addr); - return; - } - if (unlikely(expand_stack(vma, addr))) { - bad_area(regs, mm, code, addr); + bad_area_nosemaphore(regs, mm, code, addr); return; } @@ -259,11 +247,11 @@ retry: * Ok, we have a good vm_area for this memory access, so * we can handle it. */ -good_area: code = SEGV_ACCERR; if (unlikely(access_error(regs, vma))) { - bad_area(regs, mm, code, addr); + mmap_read_unlock(mm); + bad_area_nosemaphore(regs, mm, code, addr); return; } @@ -285,7 +273,11 @@ good_area: return; } - if (unlikely((fault & VM_FAULT_RETRY) && (flags & FAULT_FLAG_ALLOW_RETRY))) { + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + return; + + if (unlikely(fault & VM_FAULT_RETRY)) { flags |= FAULT_FLAG_TRIED; /* diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c index bf2004aa811a..573da66b2543 100644 --- a/arch/csky/mm/init.c +++ b/arch/csky/mm/init.c @@ -42,73 +42,6 @@ unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; EXPORT_SYMBOL(empty_zero_page); -#ifdef CONFIG_BLK_DEV_INITRD -static void __init setup_initrd(void) -{ - unsigned long size; - - if (initrd_start >= initrd_end) { - pr_err("initrd not found or empty"); - goto disable; - } - - if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) { - pr_err("initrd extends beyond end of memory"); - goto disable; - } - - size = initrd_end - initrd_start; - - if (memblock_is_region_reserved(__pa(initrd_start), size)) { - pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region", - __pa(initrd_start), size); - goto disable; - } - - memblock_reserve(__pa(initrd_start), size); - - pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n", - (void *)(initrd_start), size); - - initrd_below_start_ok = 1; - - return; - -disable: - initrd_start = initrd_end = 0; - - pr_err(" - disabling initrd\n"); -} -#endif - -void __init mem_init(void) -{ -#ifdef CONFIG_HIGHMEM - unsigned long tmp; - - set_max_mapnr(highend_pfn - ARCH_PFN_OFFSET); -#else - set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET); -#endif - high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); - -#ifdef CONFIG_BLK_DEV_INITRD - setup_initrd(); -#endif - - memblock_free_all(); - -#ifdef CONFIG_HIGHMEM - for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { - struct page *page = pfn_to_page(tmp); - - /* FIXME not sure about */ - if (!memblock_is_reserved(tmp << PAGE_SHIFT)) - free_highmem_page(page); - } -#endif -} - void free_initmem(void) { free_initmem_default(-1); @@ -197,3 +130,23 @@ void __init fixaddr_init(void) vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; fixrange_init(vaddr, vaddr + PMD_SIZE, swapper_pg_dir); } + +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READ, + [VM_WRITE] = PAGE_READ, + [VM_WRITE | VM_READ] = PAGE_READ, + [VM_EXEC] = PAGE_READ, + [VM_EXEC | VM_READ] = PAGE_READ, + [VM_EXEC | VM_WRITE] = PAGE_READ, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_READ, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READ, + [VM_SHARED | VM_WRITE] = PAGE_WRITE, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_WRITE, + [VM_SHARED | VM_EXEC] = PAGE_READ, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READ, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_WRITE, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_WRITE +}; +DECLARE_VM_GET_PAGE_PROT |
