summaryrefslogtreecommitdiff
path: root/arch/csky/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/csky/mm')
-rw-r--r--arch/csky/mm/fault.c24
-rw-r--r--arch/csky/mm/init.c67
2 files changed, 6 insertions, 85 deletions
diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c
index e15f736cca4b..a6ca7dff4215 100644
--- a/arch/csky/mm/fault.c
+++ b/arch/csky/mm/fault.c
@@ -97,13 +97,12 @@ static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_f
BUG();
}
-static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
+static inline void bad_area_nosemaphore(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
{
/*
* Something tried to access memory that isn't in our memory map.
* Fix it, but check if it's kernel or user first.
*/
- mmap_read_unlock(mm);
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
do_trap(regs, SIGSEGV, code, addr);
@@ -238,20 +237,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
if (is_write(regs))
flags |= FAULT_FLAG_WRITE;
retry:
- mmap_read_lock(mm);
- vma = find_vma(mm, addr);
+ vma = lock_mm_and_find_vma(mm, addr, regs);
if (unlikely(!vma)) {
- bad_area(regs, mm, code, addr);
- return;
- }
- if (likely(vma->vm_start <= addr))
- goto good_area;
- if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
- bad_area(regs, mm, code, addr);
- return;
- }
- if (unlikely(expand_stack(vma, addr))) {
- bad_area(regs, mm, code, addr);
+ bad_area_nosemaphore(regs, mm, code, addr);
return;
}
@@ -259,11 +247,11 @@ retry:
* Ok, we have a good vm_area for this memory access, so
* we can handle it.
*/
-good_area:
code = SEGV_ACCERR;
if (unlikely(access_error(regs, vma))) {
- bad_area(regs, mm, code, addr);
+ mmap_read_unlock(mm);
+ bad_area_nosemaphore(regs, mm, code, addr);
return;
}
@@ -289,7 +277,7 @@ good_area:
if (fault & VM_FAULT_COMPLETED)
return;
- if (unlikely((fault & VM_FAULT_RETRY) && (flags & FAULT_FLAG_ALLOW_RETRY))) {
+ if (unlikely(fault & VM_FAULT_RETRY)) {
flags |= FAULT_FLAG_TRIED;
/*
diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c
index bde7cabd23df..573da66b2543 100644
--- a/arch/csky/mm/init.c
+++ b/arch/csky/mm/init.c
@@ -42,73 +42,6 @@ unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
__page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);
-#ifdef CONFIG_BLK_DEV_INITRD
-static void __init setup_initrd(void)
-{
- unsigned long size;
-
- if (initrd_start >= initrd_end) {
- pr_err("initrd not found or empty");
- goto disable;
- }
-
- if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
- pr_err("initrd extends beyond end of memory");
- goto disable;
- }
-
- size = initrd_end - initrd_start;
-
- if (memblock_is_region_reserved(__pa(initrd_start), size)) {
- pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region",
- __pa(initrd_start), size);
- goto disable;
- }
-
- memblock_reserve(__pa(initrd_start), size);
-
- pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
- (void *)(initrd_start), size);
-
- initrd_below_start_ok = 1;
-
- return;
-
-disable:
- initrd_start = initrd_end = 0;
-
- pr_err(" - disabling initrd\n");
-}
-#endif
-
-void __init mem_init(void)
-{
-#ifdef CONFIG_HIGHMEM
- unsigned long tmp;
-
- set_max_mapnr(highend_pfn - ARCH_PFN_OFFSET);
-#else
- set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET);
-#endif
- high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
-
-#ifdef CONFIG_BLK_DEV_INITRD
- setup_initrd();
-#endif
-
- memblock_free_all();
-
-#ifdef CONFIG_HIGHMEM
- for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
- struct page *page = pfn_to_page(tmp);
-
- /* FIXME not sure about */
- if (!memblock_is_reserved(tmp << PAGE_SHIFT))
- free_highmem_page(page);
- }
-#endif
-}
-
void free_initmem(void)
{
free_initmem_default(-1);