From 3bb977946998ae0d756279c5a108435d04636e2b Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Thu, 3 Apr 2014 14:48:16 -0700 Subject: mm: consolidate code to setup pte Extract and consolidate code to setup pte from do_read_fault(), do_cow_fault() and do_shared_fault(). Signed-off-by: Kirill A. Shutemov Cc: Mel Gorman Cc: Rik van Riel Cc: Andi Kleen Cc: Matthew Wilcox Cc: Dave Hansen Cc: Naoya Horiguchi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 66 ++++++++++++++++++++++++++++--------------------------------- 1 file changed, 30 insertions(+), 36 deletions(-) (limited to 'mm') diff --git a/mm/memory.c b/mm/memory.c index ac3990d77ec9..90cea22001ef 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3318,13 +3318,37 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address, return ret; } +static void do_set_pte(struct vm_area_struct *vma, unsigned long address, + struct page *page, pte_t *pte, bool write, bool anon) +{ + pte_t entry; + + flush_icache_page(vma, page); + entry = mk_pte(page, vma->vm_page_prot); + if (write) + entry = maybe_mkwrite(pte_mkdirty(entry), vma); + else if (pte_file(*pte) && pte_file_soft_dirty(*pte)) + pte_mksoft_dirty(entry); + if (anon) { + inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); + page_add_new_anon_rmap(page, vma, address); + } else { + inc_mm_counter_fast(vma->vm_mm, MM_FILEPAGES); + page_add_file_rmap(page); + } + set_pte_at(vma->vm_mm, address, pte, entry); + + /* no need to invalidate: a not-present page won't be cached */ + update_mmu_cache(vma, address, pte); +} + static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pgoff_t pgoff, unsigned int flags, pte_t orig_pte) { struct page *fault_page; spinlock_t *ptl; - pte_t entry, *pte; + pte_t *pte; int ret; ret = __do_fault(vma, address, pgoff, flags, &fault_page); @@ -3338,20 +3362,9 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma, page_cache_release(fault_page); return ret; } - - flush_icache_page(vma, fault_page); - entry = mk_pte(fault_page, vma->vm_page_prot); - if (pte_file(orig_pte) && pte_file_soft_dirty(orig_pte)) - pte_mksoft_dirty(entry); - inc_mm_counter_fast(mm, MM_FILEPAGES); - page_add_file_rmap(fault_page); - set_pte_at(mm, address, pte, entry); - - /* no need to invalidate: a not-present page won't be cached */ - update_mmu_cache(vma, address, pte); + do_set_pte(vma, address, fault_page, pte, false, false); pte_unmap_unlock(pte, ptl); unlock_page(fault_page); - return ret; } @@ -3361,7 +3374,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, { struct page *fault_page, *new_page; spinlock_t *ptl; - pte_t entry, *pte; + pte_t *pte; int ret; if (unlikely(anon_vma_prepare(vma))) @@ -3390,17 +3403,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, page_cache_release(fault_page); goto uncharge_out; } - - flush_icache_page(vma, new_page); - entry = mk_pte(new_page, vma->vm_page_prot); - entry = maybe_mkwrite(pte_mkdirty(entry), vma); - inc_mm_counter_fast(mm, MM_ANONPAGES); - page_add_new_anon_rmap(new_page, vma, address); - set_pte_at(mm, address, pte, entry); - - /* no need to invalidate: a not-present page won't be cached */ - update_mmu_cache(vma, address, pte); - + do_set_pte(vma, address, new_page, pte, true, true); pte_unmap_unlock(pte, ptl); unlock_page(fault_page); page_cache_release(fault_page); @@ -3418,7 +3421,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma, struct page *fault_page; struct address_space *mapping; spinlock_t *ptl; - pte_t entry, *pte; + pte_t *pte; int dirtied = 0; int ret, tmp; @@ -3447,16 +3450,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma, page_cache_release(fault_page); return ret; } - - flush_icache_page(vma, fault_page); - entry = mk_pte(fault_page, vma->vm_page_prot); - entry = maybe_mkwrite(pte_mkdirty(entry), vma); - inc_mm_counter_fast(mm, MM_FILEPAGES); - page_add_file_rmap(fault_page); - set_pte_at(mm, address, pte, entry); - - /* no need to invalidate: a not-present page won't be cached */ - update_mmu_cache(vma, address, pte); + do_set_pte(vma, address, fault_page, pte, true, false); pte_unmap_unlock(pte, ptl); if (set_page_dirty(fault_page)) -- cgit