diff options
Diffstat (limited to 'arch/sparc/mm')
-rw-r--r-- | arch/sparc/mm/init_32.c | 31 | ||||
-rw-r--r-- | arch/sparc/mm/init_64.c | 33 | ||||
-rw-r--r-- | arch/sparc/mm/srmmu.c | 2 | ||||
-rw-r--r-- | arch/sparc/mm/tlb.c | 5 |
4 files changed, 20 insertions, 51 deletions
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index d96a14ffceeb..fdc93dd12c3e 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c @@ -232,19 +232,7 @@ static void __init taint_real_pages(void) } } -static void map_high_region(unsigned long start_pfn, unsigned long end_pfn) -{ - unsigned long tmp; - -#ifdef CONFIG_DEBUG_HIGHMEM - printk("mapping high region %08lx - %08lx\n", start_pfn, end_pfn); -#endif - - for (tmp = start_pfn; tmp < end_pfn; tmp++) - free_highmem_page(pfn_to_page(tmp)); -} - -void __init mem_init(void) +void __init arch_mm_preinit(void) { int i; @@ -274,23 +262,6 @@ void __init mem_init(void) memset(sparc_valid_addr_bitmap, 0, i << 2); taint_real_pages(); - - max_mapnr = last_valid_pfn - pfn_base; - high_memory = __va(max_low_pfn << PAGE_SHIFT); - memblock_free_all(); - - for (i = 0; sp_banks[i].num_bytes != 0; i++) { - unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; - unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; - - if (end_pfn <= highstart_pfn) - continue; - - if (start_pfn < highstart_pfn) - start_pfn = highstart_pfn; - - map_high_region(start_pfn, end_pfn); - } } void sparc_flush_page_to_ram(struct page *page) diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 05882bca5b73..25ae4c897aae 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -2505,10 +2505,6 @@ static void __init register_page_bootmem_info(void) } void __init mem_init(void) { - high_memory = __va(last_valid_pfn << PAGE_SHIFT); - - memblock_free_all(); - /* * Must be done after boot memory is put on freelist, because here we * might set fields in deferred struct pages that have not yet been @@ -2882,33 +2878,27 @@ void __flush_tlb_all(void) : : "r" (pstate)); } -pte_t *pte_alloc_one_kernel(struct mm_struct *mm) -{ - struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO); - pte_t *pte = NULL; - - if (page) - pte = (pte_t *) page_address(page); - - return pte; -} - -pgtable_t pte_alloc_one(struct mm_struct *mm) +static pte_t *__pte_alloc_one(struct mm_struct *mm) { struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL | __GFP_ZERO, 0); if (!ptdesc) return NULL; - if (!pagetable_pte_ctor(ptdesc)) { + if (!pagetable_pte_ctor(mm, ptdesc)) { pagetable_free(ptdesc); return NULL; } return ptdesc_address(ptdesc); } -void pte_free_kernel(struct mm_struct *mm, pte_t *pte) +pte_t *pte_alloc_one_kernel(struct mm_struct *mm) +{ + return __pte_alloc_one(mm); +} + +pgtable_t pte_alloc_one(struct mm_struct *mm) { - free_page((unsigned long)pte); + return __pte_alloc_one(mm); } static void __pte_free(pgtable_t pte) @@ -2919,6 +2909,11 @@ static void __pte_free(pgtable_t pte) pagetable_free(ptdesc); } +void pte_free_kernel(struct mm_struct *mm, pte_t *pte) +{ + __pte_free(pte); +} + void pte_free(struct mm_struct *mm, pgtable_t pte) { __pte_free(pte); diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index dd32711022f5..f8fb4911d360 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -350,7 +350,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm) page = pfn_to_page(__nocache_pa((unsigned long)ptep) >> PAGE_SHIFT); spin_lock(&mm->page_table_lock); if (page_ref_inc_return(page) == 2 && - !pagetable_pte_ctor(page_ptdesc(page))) { + !pagetable_pte_ctor(mm, page_ptdesc(page))) { page_ref_dec(page); ptep = NULL; } diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index 8648a50afe88..a35ddcca5e76 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -52,8 +52,10 @@ out: void arch_enter_lazy_mmu_mode(void) { - struct tlb_batch *tb = this_cpu_ptr(&tlb_batch); + struct tlb_batch *tb; + preempt_disable(); + tb = this_cpu_ptr(&tlb_batch); tb->active = 1; } @@ -64,6 +66,7 @@ void arch_leave_lazy_mmu_mode(void) if (tb->tlb_nr) flush_tlb_pending(); tb->active = 0; + preempt_enable(); } static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, |