From 2033c98cce666b0d125ae956613ab5111bb8d202 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 8 Nov 2023 18:28:09 +0000 Subject: mm: remove invalidate_inode_page() All callers are now converted to call mapping_evict_folio(). Link: https://lkml.kernel.org/r/20231108182809.602073-7-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Cc: Naoya Horiguchi Signed-off-by: Andrew Morton --- mm/truncate.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) (limited to 'mm/truncate.c') diff --git a/mm/truncate.c b/mm/truncate.c index 1d516e51e29d..52e3a703e7b2 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -294,13 +294,6 @@ long mapping_evict_folio(struct address_space *mapping, struct folio *folio) return remove_mapping(mapping, folio); } -long invalidate_inode_page(struct page *page) -{ - struct folio *folio = page_folio(page); - - return mapping_evict_folio(folio_mapping(folio), folio); -} - /** * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets * @mapping: mapping to truncate @@ -559,9 +552,9 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping, EXPORT_SYMBOL(invalidate_mapping_pages); /* - * This is like invalidate_inode_page(), except it ignores the page's + * This is like mapping_evict_folio(), except it ignores the folio's * refcount. We do this because invalidate_inode_pages2() needs stronger - * invalidation guarantees, and cannot afford to leave pages behind because + * invalidation guarantees, and cannot afford to leave folios behind because * shrink_page_list() has a temp ref on them, or because they're transiently * sitting in the folio_add_lru() caches. */ -- cgit