From ffe1e7861211aafe12977a3ed2f11bb6fe1e77ea Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Wed, 28 Jun 2023 17:53:06 -0400 Subject: mm/gup: cleanup next_page handling The only path that doesn't use generic "**pages" handling is the gate vma. Make it use the same path, meanwhile tune the next_page label upper to cover "**pages" handling. This prepares for THP handling for "**pages". Link: https://lkml.kernel.org/r/20230628215310.73782-5-peterx@redhat.com Signed-off-by: Peter Xu Reviewed-by: Lorenzo Stoakes Acked-by: David Hildenbrand Cc: Andrea Arcangeli Cc: Hugh Dickins Cc: James Houghton Cc: Jason Gunthorpe Cc: John Hubbard Cc: Kirill A . Shutemov Cc: Matthew Wilcox Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Vlastimil Babka Cc: Yang Shi Signed-off-by: Andrew Morton --- mm/gup.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/gup.c b/mm/gup.c index 818d98b34dec..d70f8f0613f4 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1207,7 +1207,7 @@ static long __get_user_pages(struct mm_struct *mm, if (!vma && in_gate_area(mm, start)) { ret = get_gate_page(mm, start & PAGE_MASK, gup_flags, &vma, - pages ? &pages[i] : NULL); + pages ? &page : NULL); if (ret) goto out; ctx.page_mask = 0; @@ -1277,19 +1277,18 @@ retry: ret = PTR_ERR(page); goto out; } - - goto next_page; } else if (IS_ERR(page)) { ret = PTR_ERR(page); goto out; } +next_page: if (pages) { pages[i] = page; flush_anon_page(vma, page, start); flush_dcache_page(page); ctx.page_mask = 0; } -next_page: + page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask); if (page_increm > nr_pages) page_increm = nr_pages; -- cgit