summaryrefslogtreecommitdiff
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c74
1 files changed, 39 insertions, 35 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index e84a10b0d310..cbd977b1d60d 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -62,6 +62,16 @@ static struct shrinker deferred_split_shrinker;
static atomic_t huge_zero_refcount;
struct page *huge_zero_page __read_mostly;
+bool transparent_hugepage_enabled(struct vm_area_struct *vma)
+{
+ if (vma_is_anonymous(vma))
+ return __transparent_hugepage_enabled(vma);
+ if (vma_is_shmem(vma) && shmem_huge_enabled(vma))
+ return __transparent_hugepage_enabled(vma);
+
+ return false;
+}
+
static struct page *get_huge_zero_page(void)
{
struct page *zero_page;
@@ -420,7 +430,7 @@ static int __init hugepage_init(void)
* where the extra memory used could hurt more than TLB overhead
* is likely to save. The admin can still enable it through /sys.
*/
- if (totalram_pages < (512 << (20 - PAGE_SHIFT))) {
+ if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) {
transparent_hugepage_flags = 0;
return 0;
}
@@ -1134,8 +1144,7 @@ static vm_fault_t do_huge_pmd_wp_page_fallback(struct vm_fault *vmf,
int i;
vm_fault_t ret = 0;
struct page **pages;
- unsigned long mmun_start; /* For mmu_notifiers */
- unsigned long mmun_end; /* For mmu_notifiers */
+ struct mmu_notifier_range range;
pages = kmalloc_array(HPAGE_PMD_NR, sizeof(struct page *),
GFP_KERNEL);
@@ -1173,9 +1182,9 @@ static vm_fault_t do_huge_pmd_wp_page_fallback(struct vm_fault *vmf,
cond_resched();
}
- mmun_start = haddr;
- mmun_end = haddr + HPAGE_PMD_SIZE;
- mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
+ mmu_notifier_range_init(&range, vma->vm_mm, haddr,
+ haddr + HPAGE_PMD_SIZE);
+ mmu_notifier_invalidate_range_start(&range);
vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
@@ -1220,8 +1229,7 @@ static vm_fault_t do_huge_pmd_wp_page_fallback(struct vm_fault *vmf,
* No need to double call mmu_notifier->invalidate_range() callback as
* the above pmdp_huge_clear_flush_notify() did already call it.
*/
- mmu_notifier_invalidate_range_only_end(vma->vm_mm, mmun_start,
- mmun_end);
+ mmu_notifier_invalidate_range_only_end(&range);
ret |= VM_FAULT_WRITE;
put_page(page);
@@ -1231,7 +1239,7 @@ out:
out_free_pages:
spin_unlock(vmf->ptl);
- mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
+ mmu_notifier_invalidate_range_end(&range);
for (i = 0; i < HPAGE_PMD_NR; i++) {
memcg = (void *)page_private(pages[i]);
set_page_private(pages[i], 0);
@@ -1248,8 +1256,7 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
struct page *page = NULL, *new_page;
struct mem_cgroup *memcg;
unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
- unsigned long mmun_start; /* For mmu_notifiers */
- unsigned long mmun_end; /* For mmu_notifiers */
+ struct mmu_notifier_range range;
gfp_t huge_gfp; /* for allocation and charge */
vm_fault_t ret = 0;
@@ -1293,7 +1300,7 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
get_page(page);
spin_unlock(vmf->ptl);
alloc:
- if (transparent_hugepage_enabled(vma) &&
+ if (__transparent_hugepage_enabled(vma) &&
!transparent_hugepage_debug_cow()) {
huge_gfp = alloc_hugepage_direct_gfpmask(vma);
new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER);
@@ -1338,9 +1345,9 @@ alloc:
vma, HPAGE_PMD_NR);
__SetPageUptodate(new_page);
- mmun_start = haddr;
- mmun_end = haddr + HPAGE_PMD_SIZE;
- mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
+ mmu_notifier_range_init(&range, vma->vm_mm, haddr,
+ haddr + HPAGE_PMD_SIZE);
+ mmu_notifier_invalidate_range_start(&range);
spin_lock(vmf->ptl);
if (page)
@@ -1375,8 +1382,7 @@ out_mn:
* No need to double call mmu_notifier->invalidate_range() callback as
* the above pmdp_huge_clear_flush_notify() did already call it.
*/
- mmu_notifier_invalidate_range_only_end(vma->vm_mm, mmun_start,
- mmun_end);
+ mmu_notifier_invalidate_range_only_end(&range);
out:
return ret;
out_unlock:
@@ -1490,8 +1496,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
if (!get_page_unless_zero(page))
goto out_unlock;
spin_unlock(vmf->ptl);
- wait_on_page_locked(page);
- put_page(page);
+ put_and_wait_on_page_locked(page);
goto out;
}
@@ -1527,8 +1532,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
if (!get_page_unless_zero(page))
goto out_unlock;
spin_unlock(vmf->ptl);
- wait_on_page_locked(page);
- put_page(page);
+ put_and_wait_on_page_locked(page);
goto out;
}
@@ -2017,14 +2021,15 @@ void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
unsigned long address)
{
spinlock_t *ptl;
- struct mm_struct *mm = vma->vm_mm;
- unsigned long haddr = address & HPAGE_PUD_MASK;
+ struct mmu_notifier_range range;
- mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PUD_SIZE);
- ptl = pud_lock(mm, pud);
+ mmu_notifier_range_init(&range, vma->vm_mm, address & HPAGE_PUD_MASK,
+ (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
+ mmu_notifier_invalidate_range_start(&range);
+ ptl = pud_lock(vma->vm_mm, pud);
if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud)))
goto out;
- __split_huge_pud_locked(vma, pud, haddr);
+ __split_huge_pud_locked(vma, pud, range.start);
out:
spin_unlock(ptl);
@@ -2032,8 +2037,7 @@ out:
* No need to double call mmu_notifier->invalidate_range() callback as
* the above pudp_huge_clear_flush_notify() did already call it.
*/
- mmu_notifier_invalidate_range_only_end(mm, haddr, haddr +
- HPAGE_PUD_SIZE);
+ mmu_notifier_invalidate_range_only_end(&range);
}
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
@@ -2235,11 +2239,12 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long address, bool freeze, struct page *page)
{
spinlock_t *ptl;
- struct mm_struct *mm = vma->vm_mm;
- unsigned long haddr = address & HPAGE_PMD_MASK;
+ struct mmu_notifier_range range;
- mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE);
- ptl = pmd_lock(mm, pmd);
+ mmu_notifier_range_init(&range, vma->vm_mm, address & HPAGE_PMD_MASK,
+ (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE);
+ mmu_notifier_invalidate_range_start(&range);
+ ptl = pmd_lock(vma->vm_mm, pmd);
/*
* If caller asks to setup a migration entries, we need a page to check
@@ -2255,7 +2260,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
clear_page_mlock(page);
} else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd)))
goto out;
- __split_huge_pmd_locked(vma, pmd, haddr, freeze);
+ __split_huge_pmd_locked(vma, pmd, range.start, freeze);
out:
spin_unlock(ptl);
/*
@@ -2271,8 +2276,7 @@ out:
* any further changes to individual pte will notify. So no need
* to call mmu_notifier->invalidate_range()
*/
- mmu_notifier_invalidate_range_only_end(mm, haddr, haddr +
- HPAGE_PMD_SIZE);
+ mmu_notifier_invalidate_range_only_end(&range);
}
void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,