diff options
Diffstat (limited to 'mm/truncate.c')
| -rw-r--r-- | mm/truncate.c | 319 |
1 files changed, 198 insertions, 121 deletions
diff --git a/mm/truncate.c b/mm/truncate.c index e99085bf3d34..12467c1bd711 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -23,31 +23,30 @@ #include <linux/rmap.h> #include "internal.h" -/* - * Regular page slots are stabilized by the page lock even without the tree - * itself locked. These unlocked entries need verification under the tree - * lock. - */ -static inline void __clear_shadow_entry(struct address_space *mapping, - pgoff_t index, void *entry) +static void clear_shadow_entries(struct address_space *mapping, + unsigned long start, unsigned long max) { - XA_STATE(xas, &mapping->i_pages, index); + XA_STATE(xas, &mapping->i_pages, start); + struct folio *folio; - xas_set_update(&xas, workingset_update_node); - if (xas_load(&xas) != entry) + /* Handled by shmem itself, or for DAX we do nothing. */ + if (shmem_mapping(mapping) || dax_mapping(mapping)) return; - xas_store(&xas, NULL); -} -static void clear_shadow_entry(struct address_space *mapping, pgoff_t index, - void *entry) -{ + xas_set_update(&xas, workingset_update_node); + spin_lock(&mapping->host->i_lock); - xa_lock_irq(&mapping->i_pages); - __clear_shadow_entry(mapping, index, entry); - xa_unlock_irq(&mapping->i_pages); + xas_lock_irq(&xas); + + /* Clear all shadow entries from start to max */ + xas_for_each(&xas, folio, max) { + if (xa_is_value(folio)) + xas_store(&xas, NULL); + } + + xas_unlock_irq(&xas); if (mapping_shrinkable(mapping)) - inode_add_lru(mapping->host); + inode_lru_list_add(mapping->host); spin_unlock(&mapping->host->i_lock); } @@ -55,84 +54,67 @@ static void clear_shadow_entry(struct address_space *mapping, pgoff_t index, * Unconditionally remove exceptional entries. Usually called from truncate * path. Note that the folio_batch may be altered by this function by removing * exceptional entries similar to what folio_batch_remove_exceptionals() does. + * Please note that indices[] has entries in ascending order as guaranteed by + * either find_get_entries() or find_lock_entries(). */ static void truncate_folio_batch_exceptionals(struct address_space *mapping, struct folio_batch *fbatch, pgoff_t *indices) { + XA_STATE(xas, &mapping->i_pages, indices[0]); + int nr = folio_batch_count(fbatch); + struct folio *folio; int i, j; - bool dax; /* Handled by shmem itself */ if (shmem_mapping(mapping)) return; - for (j = 0; j < folio_batch_count(fbatch); j++) + for (j = 0; j < nr; j++) if (xa_is_value(fbatch->folios[j])) break; - if (j == folio_batch_count(fbatch)) + if (j == nr) return; - dax = dax_mapping(mapping); - if (!dax) { - spin_lock(&mapping->host->i_lock); - xa_lock_irq(&mapping->i_pages); - } - - for (i = j; i < folio_batch_count(fbatch); i++) { - struct folio *folio = fbatch->folios[i]; - pgoff_t index = indices[i]; + if (dax_mapping(mapping)) { + for (i = j; i < nr; i++) { + if (xa_is_value(fbatch->folios[i])) { + /* + * File systems should already have called + * dax_break_layout_entry() to remove all DAX + * entries while holding a lock to prevent + * establishing new entries. Therefore we + * shouldn't find any here. + */ + WARN_ON_ONCE(1); - if (!xa_is_value(folio)) { - fbatch->folios[j++] = folio; - continue; + /* + * Delete the mapping so truncate_pagecache() + * doesn't loop forever. + */ + dax_delete_mapping_entry(mapping, indices[i]); + } } + goto out; + } - if (unlikely(dax)) { - dax_delete_mapping_entry(mapping, index); - continue; - } + xas_set(&xas, indices[j]); + xas_set_update(&xas, workingset_update_node); - __clear_shadow_entry(mapping, index, folio); - } + spin_lock(&mapping->host->i_lock); + xas_lock_irq(&xas); - if (!dax) { - xa_unlock_irq(&mapping->i_pages); - if (mapping_shrinkable(mapping)) - inode_add_lru(mapping->host); - spin_unlock(&mapping->host->i_lock); + xas_for_each(&xas, folio, indices[nr-1]) { + if (xa_is_value(folio)) + xas_store(&xas, NULL); } - fbatch->nr = j; -} -/* - * Invalidate exceptional entry if easily possible. This handles exceptional - * entries for invalidate_inode_pages(). - */ -static int invalidate_exceptional_entry(struct address_space *mapping, - pgoff_t index, void *entry) -{ - /* Handled by shmem itself, or for DAX we do nothing. */ - if (shmem_mapping(mapping) || dax_mapping(mapping)) - return 1; - clear_shadow_entry(mapping, index, entry); - return 1; -} - -/* - * Invalidate exceptional entry if clean. This handles exceptional entries for - * invalidate_inode_pages2() so for DAX it evicts only clean entries. - */ -static int invalidate_exceptional_entry2(struct address_space *mapping, - pgoff_t index, void *entry) -{ - /* Handled by shmem itself */ - if (shmem_mapping(mapping)) - return 1; - if (dax_mapping(mapping)) - return dax_invalidate_mapping_entry_sync(mapping, index); - clear_shadow_entry(mapping, index, entry); - return 1; + xas_unlock_irq(&xas); + if (mapping_shrinkable(mapping)) + inode_lru_list_add(mapping->host); + spin_unlock(&mapping->host->i_lock); +out: + folio_batch_remove_exceptionals(fbatch); } /** @@ -174,7 +156,7 @@ static void truncate_cleanup_folio(struct folio *folio) if (folio_mapped(folio)) unmap_mapping_folio(folio); - if (folio_has_private(folio)) + if (folio_needs_release(folio)) folio_invalidate(folio, 0, folio_size(folio)); /* @@ -183,7 +165,6 @@ static void truncate_cleanup_folio(struct folio *folio) * Hence dirty accounting check is placed after invalidation. */ folio_cancel_dirty(folio); - folio_clear_mappedtodisk(folio); } int truncate_inode_folio(struct address_space *mapping, struct folio *folio) @@ -196,6 +177,32 @@ int truncate_inode_folio(struct address_space *mapping, struct folio *folio) return 0; } +static int try_folio_split_or_unmap(struct folio *folio, struct page *split_at, + unsigned long min_order) +{ + enum ttu_flags ttu_flags = + TTU_SYNC | + TTU_SPLIT_HUGE_PMD | + TTU_IGNORE_MLOCK; + int ret; + + ret = try_folio_split_to_order(folio, split_at, min_order); + + /* + * If the split fails, unmap the folio, so it will be refaulted + * with PTEs to respect SIGBUS semantics. + * + * Make an exception for shmem/tmpfs that for long time + * intentionally mapped with PMDs across i_size. + */ + if (ret && !shmem_mapping(folio->mapping)) { + try_to_unmap(folio, ttu_flags); + WARN_ON(folio_mapped(folio)); + } + + return ret; +} + /* * Handle partial folios. The folio may be entirely within the * range if a split has raced with us. If not, we zero the part of the @@ -210,20 +217,22 @@ int truncate_inode_folio(struct address_space *mapping, struct folio *folio) bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end) { loff_t pos = folio_pos(folio); + size_t size = folio_size(folio); unsigned int offset, length; + struct page *split_at, *split_at2; + unsigned int min_order; if (pos < start) offset = start - pos; else offset = 0; - length = folio_size(folio); - if (pos + length <= (u64)end) - length = length - offset; + if (pos + size <= (u64)end) + length = size - offset; else length = end + 1 - pos - offset; folio_wait_writeback(folio); - if (length == folio_size(folio)) { + if (length == size) { truncate_inode_folio(folio->mapping, folio); return true; } @@ -233,14 +242,51 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end) * doing a complex calculation here, and then doing the zeroing * anyway if the page split fails. */ - folio_zero_range(folio, offset, length); + if (!mapping_inaccessible(folio->mapping)) + folio_zero_range(folio, offset, length); - if (folio_has_private(folio)) + if (folio_needs_release(folio)) folio_invalidate(folio, offset, length); if (!folio_test_large(folio)) return true; - if (split_folio(folio) == 0) + + min_order = mapping_min_folio_order(folio->mapping); + split_at = folio_page(folio, PAGE_ALIGN_DOWN(offset) / PAGE_SIZE); + if (!try_folio_split_or_unmap(folio, split_at, min_order)) { + /* + * try to split at offset + length to make sure folios within + * the range can be dropped, especially to avoid memory waste + * for shmem truncate + */ + struct folio *folio2; + + if (offset + length == size) + goto no_split; + + split_at2 = folio_page(folio, + PAGE_ALIGN_DOWN(offset + length) / PAGE_SIZE); + folio2 = page_folio(split_at2); + + if (!folio_try_get(folio2)) + goto no_split; + + if (!folio_test_large(folio2)) + goto out; + + if (!folio_trylock(folio2)) + goto out; + + /* make sure folio2 is large and does not change its mapping */ + if (folio_test_large(folio2) && + folio2->mapping == folio->mapping) + try_folio_split_or_unmap(folio2, split_at2, min_order); + + folio_unlock(folio2); +out: + folio_put(folio2); +no_split: return true; + } if (folio_test_dirty(folio)) return false; truncate_inode_folio(folio->mapping, folio); @@ -318,7 +364,7 @@ long mapping_evict_folio(struct address_space *mapping, struct folio *folio) * page aligned properly. */ void truncate_inode_pages_range(struct address_space *mapping, - loff_t lstart, loff_t lend) + loff_t lstart, uoff_t lend) { pgoff_t start; /* inclusive */ pgoff_t end; /* exclusive */ @@ -366,7 +412,7 @@ void truncate_inode_pages_range(struct address_space *mapping, same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT); folio = __filemap_get_folio(mapping, lstart >> PAGE_SHIFT, FGP_LOCK, 0); if (!IS_ERR(folio)) { - same_folio = lend < folio_pos(folio) + folio_size(folio); + same_folio = lend < folio_next_pos(folio); if (!truncate_inode_partial_folio(folio, lstart, lend)) { start = folio_next_index(folio); if (same_folio) @@ -404,7 +450,7 @@ void truncate_inode_pages_range(struct address_space *mapping, for (i = 0; i < folio_batch_count(&fbatch); i++) { struct folio *folio = fbatch.folios[i]; - /* We rely upon deletion not changing page->index */ + /* We rely upon deletion not changing folio->index */ if (xa_is_value(folio)) continue; @@ -497,14 +543,17 @@ unsigned long mapping_try_invalidate(struct address_space *mapping, folio_batch_init(&fbatch); while (find_lock_entries(mapping, &index, end, &fbatch, indices)) { - for (i = 0; i < folio_batch_count(&fbatch); i++) { + bool xa_has_values = false; + int nr = folio_batch_count(&fbatch); + + for (i = 0; i < nr; i++) { struct folio *folio = fbatch.folios[i]; /* We rely upon deletion not changing folio->index */ if (xa_is_value(folio)) { - count += invalidate_exceptional_entry(mapping, - indices[i], folio); + xa_has_values = true; + count++; continue; } @@ -522,6 +571,10 @@ unsigned long mapping_try_invalidate(struct address_space *mapping, } count += ret; } + + if (xa_has_values) + clear_shadow_entries(mapping, indices[0], indices[nr-1]); + folio_batch_remove_exceptionals(&fbatch); folio_batch_release(&fbatch); cond_resched(); @@ -550,21 +603,40 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping, } EXPORT_SYMBOL(invalidate_mapping_pages); +static int folio_launder(struct address_space *mapping, struct folio *folio) +{ + if (!folio_test_dirty(folio)) + return 0; + if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL) + return 0; + return mapping->a_ops->launder_folio(folio); +} + /* * This is like mapping_evict_folio(), except it ignores the folio's * refcount. We do this because invalidate_inode_pages2() needs stronger * invalidation guarantees, and cannot afford to leave folios behind because - * shrink_page_list() has a temp ref on them, or because they're transiently + * shrink_folio_list() has a temp ref on them, or because they're transiently * sitting in the folio_add_lru() caches. */ -static int invalidate_complete_folio2(struct address_space *mapping, - struct folio *folio) +int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio, + gfp_t gfp) { - if (folio->mapping != mapping) - return 0; + int ret; - if (!filemap_release_folio(folio, GFP_KERNEL)) - return 0; + VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); + + if (folio_mapped(folio)) + unmap_mapping_folio(folio); + BUG_ON(folio_mapped(folio)); + + ret = folio_launder(mapping, folio); + if (ret) + return ret; + if (folio->mapping != mapping) + return -EBUSY; + if (!filemap_release_folio(folio, gfp)) + return -EBUSY; spin_lock(&mapping->host->i_lock); xa_lock_irq(&mapping->i_pages); @@ -575,7 +647,7 @@ static int invalidate_complete_folio2(struct address_space *mapping, __filemap_remove_folio(folio, NULL); xa_unlock_irq(&mapping->i_pages); if (mapping_shrinkable(mapping)) - inode_add_lru(mapping->host); + inode_lru_list_add(mapping->host); spin_unlock(&mapping->host->i_lock); filemap_free_folio(mapping, folio); @@ -583,16 +655,7 @@ static int invalidate_complete_folio2(struct address_space *mapping, failed: xa_unlock_irq(&mapping->i_pages); spin_unlock(&mapping->host->i_lock); - return 0; -} - -static int folio_launder(struct address_space *mapping, struct folio *folio) -{ - if (!folio_test_dirty(folio)) - return 0; - if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL) - return 0; - return mapping->a_ops->launder_folio(folio); + return -EBUSY; } /** @@ -623,14 +686,18 @@ int invalidate_inode_pages2_range(struct address_space *mapping, folio_batch_init(&fbatch); index = start; while (find_get_entries(mapping, &index, end, &fbatch, indices)) { - for (i = 0; i < folio_batch_count(&fbatch); i++) { + bool xa_has_values = false; + int nr = folio_batch_count(&fbatch); + + for (i = 0; i < nr; i++) { struct folio *folio = fbatch.folios[i]; /* We rely upon deletion not changing folio->index */ if (xa_is_value(folio)) { - if (!invalidate_exceptional_entry2(mapping, - indices[i], folio)) + xa_has_values = true; + if (dax_mapping(mapping) && + !dax_invalidate_mapping_entry_sync(mapping, indices[i])) ret = -EBUSY; continue; } @@ -652,20 +719,15 @@ int invalidate_inode_pages2_range(struct address_space *mapping, } VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio); folio_wait_writeback(folio); - - if (folio_mapped(folio)) - unmap_mapping_folio(folio); - BUG_ON(folio_mapped(folio)); - - ret2 = folio_launder(mapping, folio); - if (ret2 == 0) { - if (!invalidate_complete_folio2(mapping, folio)) - ret2 = -EBUSY; - } + ret2 = folio_unmap_invalidate(mapping, folio, GFP_KERNEL); if (ret2 < 0) ret = ret2; folio_unlock(folio); } + + if (xa_has_values) + clear_shadow_entries(mapping, indices[0], indices[nr-1]); + folio_batch_remove_exceptionals(&fbatch); folio_batch_release(&fbatch); cond_resched(); @@ -802,6 +864,21 @@ void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to) */ if (folio_mkclean(folio)) folio_mark_dirty(folio); + + /* + * The post-eof range of the folio must be zeroed before it is exposed + * to the file. Writeback normally does this, but since i_size has been + * increased we handle it here. + */ + if (folio_test_dirty(folio)) { + unsigned int offset, end; + + offset = from - folio_pos(folio); + end = min_t(unsigned int, to - folio_pos(folio), + folio_size(folio)); + folio_zero_segment(folio, offset, end); + } + folio_unlock(folio); folio_put(folio); } |
