summaryrefslogtreecommitdiff
path: root/mm/page_vma_mapped.c
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2017-03-09 17:24:07 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2017-03-09 11:48:47 -0800
commitc2febafc67734a62196c1b9dfba926412d4077ba (patch)
treea61b7cd503e4c6d2fbb58f0cb53662ffd60b4c14 /mm/page_vma_mapped.c
parent048456dcf2c56ad6f6248e2899dda92fb6a613f6 (diff)
mm: convert generic code to 5-level paging
Convert all non-architecture-specific code to 5-level paging. It's mostly mechanical adding handling one more page table level in places where we deal with pud_t. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_vma_mapped.c')
-rw-r--r--mm/page_vma_mapped.c6
1 files changed, 5 insertions, 1 deletions
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index a23001a22c15..c4c9def8ffea 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -104,6 +104,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
struct mm_struct *mm = pvmw->vma->vm_mm;
struct page *page = pvmw->page;
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
/* The only possible pmd mapping has been handled on last iteration */
@@ -133,7 +134,10 @@ restart:
pgd = pgd_offset(mm, pvmw->address);
if (!pgd_present(*pgd))
return false;
- pud = pud_offset(pgd, pvmw->address);
+ p4d = p4d_offset(pgd, pvmw->address);
+ if (!p4d_present(*p4d))
+ return false;
+ pud = pud_offset(p4d, pvmw->address);
if (!pud_present(*pud))
return false;
pvmw->pmd = pmd_offset(pud, pvmw->address);