From 75f360696ce9d8ec8b253452b23b3e24c0689b4b Mon Sep 17 00:00:00 2001 From: Song Liu Date: Sat, 30 Nov 2019 17:57:19 -0800 Subject: mm/thp: flush file for !is_shmem PageDirty() case in collapse_file() For non-shmem file THPs, khugepaged only collapses read only .text mapping (VM_DENYWRITE). These pages should not be dirty except the case where the file hasn't been flushed since first write. Call filemap_flush() in collapse_file() to accelerate the write back in such cases. Link: http://lkml.kernel.org/r/20191106060930.2571389-3-songliubraving@fb.com Signed-off-by: Song Liu Cc: Kirill A. Shutemov Cc: Hugh Dickins Cc: William Kucharski Cc: Johannes Weiner Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/khugepaged.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index a8a57bebb5fa..b679908743cb 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1602,6 +1602,24 @@ static void collapse_file(struct mm_struct *mm, result = SCAN_FAIL; goto xa_unlocked; } + } else if (PageDirty(page)) { + /* + * khugepaged only works on read-only fd, + * so this page is dirty because it hasn't + * been flushed since first write. There + * won't be new dirty pages. + * + * Trigger async flush here and hope the + * writeback is done when khugepaged + * revisits this page. + * + * This is a one-off situation. We are not + * forcing writeback in loop. + */ + xas_unlock_irq(&xas); + filemap_flush(mapping); + result = SCAN_FAIL; + goto xa_unlocked; } else if (trylock_page(page)) { get_page(page); xas_unlock_irq(&xas); -- cgit