summaryrefslogtreecommitdiff
path: root/mm/truncate.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/truncate.c')
-rw-r--r--mm/truncate.c458
1 files changed, 261 insertions, 197 deletions
diff --git a/mm/truncate.c b/mm/truncate.c
index 7b4ea4c4a46b..12467c1bd711 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -19,36 +19,34 @@
#include <linux/highmem.h>
#include <linux/pagevec.h>
#include <linux/task_io_accounting_ops.h>
-#include <linux/buffer_head.h> /* grr. try_to_release_page */
#include <linux/shmem_fs.h>
#include <linux/rmap.h>
#include "internal.h"
-/*
- * Regular page slots are stabilized by the page lock even without the tree
- * itself locked. These unlocked entries need verification under the tree
- * lock.
- */
-static inline void __clear_shadow_entry(struct address_space *mapping,
- pgoff_t index, void *entry)
+static void clear_shadow_entries(struct address_space *mapping,
+ unsigned long start, unsigned long max)
{
- XA_STATE(xas, &mapping->i_pages, index);
+ XA_STATE(xas, &mapping->i_pages, start);
+ struct folio *folio;
- xas_set_update(&xas, workingset_update_node);
- if (xas_load(&xas) != entry)
+ /* Handled by shmem itself, or for DAX we do nothing. */
+ if (shmem_mapping(mapping) || dax_mapping(mapping))
return;
- xas_store(&xas, NULL);
-}
-static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
- void *entry)
-{
+ xas_set_update(&xas, workingset_update_node);
+
spin_lock(&mapping->host->i_lock);
- xa_lock_irq(&mapping->i_pages);
- __clear_shadow_entry(mapping, index, entry);
- xa_unlock_irq(&mapping->i_pages);
+ xas_lock_irq(&xas);
+
+ /* Clear all shadow entries from start to max */
+ xas_for_each(&xas, folio, max) {
+ if (xa_is_value(folio))
+ xas_store(&xas, NULL);
+ }
+
+ xas_unlock_irq(&xas);
if (mapping_shrinkable(mapping))
- inode_add_lru(mapping->host);
+ inode_lru_list_add(mapping->host);
spin_unlock(&mapping->host->i_lock);
}
@@ -56,84 +54,67 @@ static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
* Unconditionally remove exceptional entries. Usually called from truncate
* path. Note that the folio_batch may be altered by this function by removing
* exceptional entries similar to what folio_batch_remove_exceptionals() does.
+ * Please note that indices[] has entries in ascending order as guaranteed by
+ * either find_get_entries() or find_lock_entries().
*/
static void truncate_folio_batch_exceptionals(struct address_space *mapping,
struct folio_batch *fbatch, pgoff_t *indices)
{
+ XA_STATE(xas, &mapping->i_pages, indices[0]);
+ int nr = folio_batch_count(fbatch);
+ struct folio *folio;
int i, j;
- bool dax;
/* Handled by shmem itself */
if (shmem_mapping(mapping))
return;
- for (j = 0; j < folio_batch_count(fbatch); j++)
+ for (j = 0; j < nr; j++)
if (xa_is_value(fbatch->folios[j]))
break;
- if (j == folio_batch_count(fbatch))
+ if (j == nr)
return;
- dax = dax_mapping(mapping);
- if (!dax) {
- spin_lock(&mapping->host->i_lock);
- xa_lock_irq(&mapping->i_pages);
- }
-
- for (i = j; i < folio_batch_count(fbatch); i++) {
- struct folio *folio = fbatch->folios[i];
- pgoff_t index = indices[i];
+ if (dax_mapping(mapping)) {
+ for (i = j; i < nr; i++) {
+ if (xa_is_value(fbatch->folios[i])) {
+ /*
+ * File systems should already have called
+ * dax_break_layout_entry() to remove all DAX
+ * entries while holding a lock to prevent
+ * establishing new entries. Therefore we
+ * shouldn't find any here.
+ */
+ WARN_ON_ONCE(1);
- if (!xa_is_value(folio)) {
- fbatch->folios[j++] = folio;
- continue;
+ /*
+ * Delete the mapping so truncate_pagecache()
+ * doesn't loop forever.
+ */
+ dax_delete_mapping_entry(mapping, indices[i]);
+ }
}
+ goto out;
+ }
- if (unlikely(dax)) {
- dax_delete_mapping_entry(mapping, index);
- continue;
- }
+ xas_set(&xas, indices[j]);
+ xas_set_update(&xas, workingset_update_node);
- __clear_shadow_entry(mapping, index, folio);
- }
+ spin_lock(&mapping->host->i_lock);
+ xas_lock_irq(&xas);
- if (!dax) {
- xa_unlock_irq(&mapping->i_pages);
- if (mapping_shrinkable(mapping))
- inode_add_lru(mapping->host);
- spin_unlock(&mapping->host->i_lock);
+ xas_for_each(&xas, folio, indices[nr-1]) {
+ if (xa_is_value(folio))
+ xas_store(&xas, NULL);
}
- fbatch->nr = j;
-}
-/*
- * Invalidate exceptional entry if easily possible. This handles exceptional
- * entries for invalidate_inode_pages().
- */
-static int invalidate_exceptional_entry(struct address_space *mapping,
- pgoff_t index, void *entry)
-{
- /* Handled by shmem itself, or for DAX we do nothing. */
- if (shmem_mapping(mapping) || dax_mapping(mapping))
- return 1;
- clear_shadow_entry(mapping, index, entry);
- return 1;
-}
-
-/*
- * Invalidate exceptional entry if clean. This handles exceptional entries for
- * invalidate_inode_pages2() so for DAX it evicts only clean entries.
- */
-static int invalidate_exceptional_entry2(struct address_space *mapping,
- pgoff_t index, void *entry)
-{
- /* Handled by shmem itself */
- if (shmem_mapping(mapping))
- return 1;
- if (dax_mapping(mapping))
- return dax_invalidate_mapping_entry_sync(mapping, index);
- clear_shadow_entry(mapping, index, entry);
- return 1;
+ xas_unlock_irq(&xas);
+ if (mapping_shrinkable(mapping))
+ inode_lru_list_add(mapping->host);
+ spin_unlock(&mapping->host->i_lock);
+out:
+ folio_batch_remove_exceptionals(fbatch);
}
/**
@@ -175,7 +156,7 @@ static void truncate_cleanup_folio(struct folio *folio)
if (folio_mapped(folio))
unmap_mapping_folio(folio);
- if (folio_has_private(folio))
+ if (folio_needs_release(folio))
folio_invalidate(folio, 0, folio_size(folio));
/*
@@ -184,7 +165,6 @@ static void truncate_cleanup_folio(struct folio *folio)
* Hence dirty accounting check is placed after invalidation.
*/
folio_cancel_dirty(folio);
- folio_clear_mappedtodisk(folio);
}
int truncate_inode_folio(struct address_space *mapping, struct folio *folio)
@@ -197,6 +177,32 @@ int truncate_inode_folio(struct address_space *mapping, struct folio *folio)
return 0;
}
+static int try_folio_split_or_unmap(struct folio *folio, struct page *split_at,
+ unsigned long min_order)
+{
+ enum ttu_flags ttu_flags =
+ TTU_SYNC |
+ TTU_SPLIT_HUGE_PMD |
+ TTU_IGNORE_MLOCK;
+ int ret;
+
+ ret = try_folio_split_to_order(folio, split_at, min_order);
+
+ /*
+ * If the split fails, unmap the folio, so it will be refaulted
+ * with PTEs to respect SIGBUS semantics.
+ *
+ * Make an exception for shmem/tmpfs that for long time
+ * intentionally mapped with PMDs across i_size.
+ */
+ if (ret && !shmem_mapping(folio->mapping)) {
+ try_to_unmap(folio, ttu_flags);
+ WARN_ON(folio_mapped(folio));
+ }
+
+ return ret;
+}
+
/*
* Handle partial folios. The folio may be entirely within the
* range if a split has raced with us. If not, we zero the part of the
@@ -211,20 +217,22 @@ int truncate_inode_folio(struct address_space *mapping, struct folio *folio)
bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
{
loff_t pos = folio_pos(folio);
+ size_t size = folio_size(folio);
unsigned int offset, length;
+ struct page *split_at, *split_at2;
+ unsigned int min_order;
if (pos < start)
offset = start - pos;
else
offset = 0;
- length = folio_size(folio);
- if (pos + length <= (u64)end)
- length = length - offset;
+ if (pos + size <= (u64)end)
+ length = size - offset;
else
length = end + 1 - pos - offset;
folio_wait_writeback(folio);
- if (length == folio_size(folio)) {
+ if (length == size) {
truncate_inode_folio(folio->mapping, folio);
return true;
}
@@ -234,14 +242,51 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
* doing a complex calculation here, and then doing the zeroing
* anyway if the page split fails.
*/
- folio_zero_range(folio, offset, length);
+ if (!mapping_inaccessible(folio->mapping))
+ folio_zero_range(folio, offset, length);
- if (folio_has_private(folio))
+ if (folio_needs_release(folio))
folio_invalidate(folio, offset, length);
if (!folio_test_large(folio))
return true;
- if (split_folio(folio) == 0)
+
+ min_order = mapping_min_folio_order(folio->mapping);
+ split_at = folio_page(folio, PAGE_ALIGN_DOWN(offset) / PAGE_SIZE);
+ if (!try_folio_split_or_unmap(folio, split_at, min_order)) {
+ /*
+ * try to split at offset + length to make sure folios within
+ * the range can be dropped, especially to avoid memory waste
+ * for shmem truncate
+ */
+ struct folio *folio2;
+
+ if (offset + length == size)
+ goto no_split;
+
+ split_at2 = folio_page(folio,
+ PAGE_ALIGN_DOWN(offset + length) / PAGE_SIZE);
+ folio2 = page_folio(split_at2);
+
+ if (!folio_try_get(folio2))
+ goto no_split;
+
+ if (!folio_test_large(folio2))
+ goto out;
+
+ if (!folio_trylock(folio2))
+ goto out;
+
+ /* make sure folio2 is large and does not change its mapping */
+ if (folio_test_large(folio2) &&
+ folio2->mapping == folio->mapping)
+ try_folio_split_or_unmap(folio2, split_at2, min_order);
+
+ folio_unlock(folio2);
+out:
+ folio_put(folio2);
+no_split:
return true;
+ }
if (folio_test_dirty(folio))
return false;
truncate_inode_folio(folio->mapping, folio);
@@ -251,10 +296,9 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
/*
* Used to get rid of pages on hardware memory corruption.
*/
-int generic_error_remove_page(struct address_space *mapping, struct page *page)
+int generic_error_remove_folio(struct address_space *mapping,
+ struct folio *folio)
{
- VM_BUG_ON_PAGE(PageTail(page), page);
-
if (!mapping)
return -EINVAL;
/*
@@ -263,47 +307,39 @@ int generic_error_remove_page(struct address_space *mapping, struct page *page)
*/
if (!S_ISREG(mapping->host->i_mode))
return -EIO;
- return truncate_inode_folio(mapping, page_folio(page));
+ return truncate_inode_folio(mapping, folio);
}
-EXPORT_SYMBOL(generic_error_remove_page);
+EXPORT_SYMBOL(generic_error_remove_folio);
-static long mapping_evict_folio(struct address_space *mapping,
- struct folio *folio)
+/**
+ * mapping_evict_folio() - Remove an unused folio from the page-cache.
+ * @mapping: The mapping this folio belongs to.
+ * @folio: The folio to remove.
+ *
+ * Safely remove one folio from the page cache.
+ * It only drops clean, unused folios.
+ *
+ * Context: Folio must be locked.
+ * Return: The number of pages successfully removed.
+ */
+long mapping_evict_folio(struct address_space *mapping, struct folio *folio)
{
+ /* The page may have been truncated before it was locked */
+ if (!mapping)
+ return 0;
if (folio_test_dirty(folio) || folio_test_writeback(folio))
return 0;
/* The refcount will be elevated if any page in the folio is mapped */
if (folio_ref_count(folio) >
folio_nr_pages(folio) + folio_has_private(folio) + 1)
return 0;
- if (folio_has_private(folio) && !filemap_release_folio(folio, 0))
+ if (!filemap_release_folio(folio, 0))
return 0;
return remove_mapping(mapping, folio);
}
/**
- * invalidate_inode_page() - Remove an unused page from the pagecache.
- * @page: The page to remove.
- *
- * Safely invalidate one page from its pagecache mapping.
- * It only drops clean, unused pages.
- *
- * Context: Page must be locked.
- * Return: The number of pages successfully removed.
- */
-long invalidate_inode_page(struct page *page)
-{
- struct folio *folio = page_folio(page);
- struct address_space *mapping = folio_mapping(folio);
-
- /* The page may have been truncated before it was locked */
- if (!mapping)
- return 0;
- return mapping_evict_folio(mapping, folio);
-}
-
-/**
* truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
* @mapping: mapping to truncate
* @lstart: offset from which to truncate
@@ -328,7 +364,7 @@ long invalidate_inode_page(struct page *page)
* page aligned properly.
*/
void truncate_inode_pages_range(struct address_space *mapping,
- loff_t lstart, loff_t lend)
+ loff_t lstart, uoff_t lend)
{
pgoff_t start; /* inclusive */
pgoff_t end; /* exclusive */
@@ -375,10 +411,10 @@ void truncate_inode_pages_range(struct address_space *mapping,
same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
folio = __filemap_get_folio(mapping, lstart >> PAGE_SHIFT, FGP_LOCK, 0);
- if (folio) {
- same_folio = lend < folio_pos(folio) + folio_size(folio);
+ if (!IS_ERR(folio)) {
+ same_folio = lend < folio_next_pos(folio);
if (!truncate_inode_partial_folio(folio, lstart, lend)) {
- start = folio->index + folio_nr_pages(folio);
+ start = folio_next_index(folio);
if (same_folio)
end = folio->index;
}
@@ -387,14 +423,15 @@ void truncate_inode_pages_range(struct address_space *mapping,
folio = NULL;
}
- if (!same_folio)
+ if (!same_folio) {
folio = __filemap_get_folio(mapping, lend >> PAGE_SHIFT,
FGP_LOCK, 0);
- if (folio) {
- if (!truncate_inode_partial_folio(folio, lstart, lend))
- end = folio->index;
- folio_unlock(folio);
- folio_put(folio);
+ if (!IS_ERR(folio)) {
+ if (!truncate_inode_partial_folio(folio, lstart, lend))
+ end = folio->index;
+ folio_unlock(folio);
+ folio_put(folio);
+ }
}
index = start;
@@ -413,7 +450,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
for (i = 0; i < folio_batch_count(&fbatch); i++) {
struct folio *folio = fbatch.folios[i];
- /* We rely upon deletion not changing page->index */
+ /* We rely upon deletion not changing folio->index */
if (xa_is_value(folio))
continue;
@@ -485,18 +522,17 @@ void truncate_inode_pages_final(struct address_space *mapping)
EXPORT_SYMBOL(truncate_inode_pages_final);
/**
- * invalidate_mapping_pagevec - Invalidate all the unlocked pages of one inode
- * @mapping: the address_space which holds the pages to invalidate
+ * mapping_try_invalidate - Invalidate all the evictable folios of one inode
+ * @mapping: the address_space which holds the folios to invalidate
* @start: the offset 'from' which to invalidate
* @end: the offset 'to' which to invalidate (inclusive)
- * @nr_pagevec: invalidate failed page number for caller
+ * @nr_failed: How many folio invalidations failed
*
- * This helper is similar to invalidate_mapping_pages(), except that it accounts
- * for pages that are likely on a pagevec and counts them in @nr_pagevec, which
- * will be used by the caller.
+ * This function is similar to invalidate_mapping_pages(), except that it
+ * returns the number of folios which could not be evicted in @nr_failed.
*/
-unsigned long invalidate_mapping_pagevec(struct address_space *mapping,
- pgoff_t start, pgoff_t end, unsigned long *nr_pagevec)
+unsigned long mapping_try_invalidate(struct address_space *mapping,
+ pgoff_t start, pgoff_t end, unsigned long *nr_failed)
{
pgoff_t indices[PAGEVEC_SIZE];
struct folio_batch fbatch;
@@ -507,14 +543,17 @@ unsigned long invalidate_mapping_pagevec(struct address_space *mapping,
folio_batch_init(&fbatch);
while (find_lock_entries(mapping, &index, end, &fbatch, indices)) {
- for (i = 0; i < folio_batch_count(&fbatch); i++) {
+ bool xa_has_values = false;
+ int nr = folio_batch_count(&fbatch);
+
+ for (i = 0; i < nr; i++) {
struct folio *folio = fbatch.folios[i];
/* We rely upon deletion not changing folio->index */
if (xa_is_value(folio)) {
- count += invalidate_exceptional_entry(mapping,
- indices[i], folio);
+ xa_has_values = true;
+ count++;
continue;
}
@@ -526,12 +565,16 @@ unsigned long invalidate_mapping_pagevec(struct address_space *mapping,
*/
if (!ret) {
deactivate_file_folio(folio);
- /* It is likely on the pagevec of a remote CPU */
- if (nr_pagevec)
- (*nr_pagevec)++;
+ /* Likely in the lru cache of a remote CPU */
+ if (nr_failed)
+ (*nr_failed)++;
}
count += ret;
}
+
+ if (xa_has_values)
+ clear_shadow_entries(mapping, indices[0], indices[nr-1]);
+
folio_batch_remove_exceptionals(&fbatch);
folio_batch_release(&fbatch);
cond_resched();
@@ -551,31 +594,49 @@ unsigned long invalidate_mapping_pagevec(struct address_space *mapping,
* If you want to remove all the pages of one inode, regardless of
* their use and writeback state, use truncate_inode_pages().
*
- * Return: the number of the cache entries that were invalidated
+ * Return: The number of indices that had their contents invalidated
*/
unsigned long invalidate_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t end)
{
- return invalidate_mapping_pagevec(mapping, start, end, NULL);
+ return mapping_try_invalidate(mapping, start, end, NULL);
}
EXPORT_SYMBOL(invalidate_mapping_pages);
+static int folio_launder(struct address_space *mapping, struct folio *folio)
+{
+ if (!folio_test_dirty(folio))
+ return 0;
+ if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL)
+ return 0;
+ return mapping->a_ops->launder_folio(folio);
+}
+
/*
- * This is like invalidate_inode_page(), except it ignores the page's
+ * This is like mapping_evict_folio(), except it ignores the folio's
* refcount. We do this because invalidate_inode_pages2() needs stronger
- * invalidation guarantees, and cannot afford to leave pages behind because
- * shrink_page_list() has a temp ref on them, or because they're transiently
- * sitting in the folio_add_lru() pagevecs.
+ * invalidation guarantees, and cannot afford to leave folios behind because
+ * shrink_folio_list() has a temp ref on them, or because they're transiently
+ * sitting in the folio_add_lru() caches.
*/
-static int invalidate_complete_folio2(struct address_space *mapping,
- struct folio *folio)
+int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
+ gfp_t gfp)
{
- if (folio->mapping != mapping)
- return 0;
+ int ret;
- if (folio_has_private(folio) &&
- !filemap_release_folio(folio, GFP_KERNEL))
- return 0;
+ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
+
+ if (folio_mapped(folio))
+ unmap_mapping_folio(folio);
+ BUG_ON(folio_mapped(folio));
+
+ ret = folio_launder(mapping, folio);
+ if (ret)
+ return ret;
+ if (folio->mapping != mapping)
+ return -EBUSY;
+ if (!filemap_release_folio(folio, gfp))
+ return -EBUSY;
spin_lock(&mapping->host->i_lock);
xa_lock_irq(&mapping->i_pages);
@@ -586,7 +647,7 @@ static int invalidate_complete_folio2(struct address_space *mapping,
__filemap_remove_folio(folio, NULL);
xa_unlock_irq(&mapping->i_pages);
if (mapping_shrinkable(mapping))
- inode_add_lru(mapping->host);
+ inode_lru_list_add(mapping->host);
spin_unlock(&mapping->host->i_lock);
filemap_free_folio(mapping, folio);
@@ -594,16 +655,7 @@ static int invalidate_complete_folio2(struct address_space *mapping,
failed:
xa_unlock_irq(&mapping->i_pages);
spin_unlock(&mapping->host->i_lock);
- return 0;
-}
-
-static int folio_launder(struct address_space *mapping, struct folio *folio)
-{
- if (!folio_test_dirty(folio))
- return 0;
- if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL)
- return 0;
- return mapping->a_ops->launder_folio(folio);
+ return -EBUSY;
}
/**
@@ -634,14 +686,18 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
folio_batch_init(&fbatch);
index = start;
while (find_get_entries(mapping, &index, end, &fbatch, indices)) {
- for (i = 0; i < folio_batch_count(&fbatch); i++) {
+ bool xa_has_values = false;
+ int nr = folio_batch_count(&fbatch);
+
+ for (i = 0; i < nr; i++) {
struct folio *folio = fbatch.folios[i];
/* We rely upon deletion not changing folio->index */
if (xa_is_value(folio)) {
- if (!invalidate_exceptional_entry2(mapping,
- indices[i], folio))
+ xa_has_values = true;
+ if (dax_mapping(mapping) &&
+ !dax_invalidate_mapping_entry_sync(mapping, indices[i]))
ret = -EBUSY;
continue;
}
@@ -657,26 +713,21 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
}
folio_lock(folio);
- VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio);
- if (folio->mapping != mapping) {
+ if (unlikely(folio->mapping != mapping)) {
folio_unlock(folio);
continue;
}
+ VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio);
folio_wait_writeback(folio);
-
- if (folio_mapped(folio))
- unmap_mapping_folio(folio);
- BUG_ON(folio_mapped(folio));
-
- ret2 = folio_launder(mapping, folio);
- if (ret2 == 0) {
- if (!invalidate_complete_folio2(mapping, folio))
- ret2 = -EBUSY;
- }
+ ret2 = folio_unmap_invalidate(mapping, folio, GFP_KERNEL);
if (ret2 < 0)
ret = ret2;
folio_unlock(folio);
}
+
+ if (xa_has_values)
+ clear_shadow_entries(mapping, indices[0], indices[nr-1]);
+
folio_batch_remove_exceptionals(&fbatch);
folio_batch_release(&fbatch);
cond_resched();
@@ -775,15 +826,15 @@ EXPORT_SYMBOL(truncate_setsize);
* @from: original inode size
* @to: new inode size
*
- * Handle extension of inode size either caused by extending truncate or by
- * write starting after current i_size. We mark the page straddling current
- * i_size RO so that page_mkwrite() is called on the nearest write access to
- * the page. This way filesystem can be sure that page_mkwrite() is called on
- * the page before user writes to the page via mmap after the i_size has been
- * changed.
+ * Handle extension of inode size either caused by extending truncate or
+ * by write starting after current i_size. We mark the page straddling
+ * current i_size RO so that page_mkwrite() is called on the first
+ * write access to the page. The filesystem will update its per-block
+ * information before user writes to the page via mmap after the i_size
+ * has been changed.
*
* The function must be called after i_size is updated so that page fault
- * coming after we unlock the page will already see the new i_size.
+ * coming after we unlock the folio will already see the new i_size.
* The function must be called while we still hold i_rwsem - this not only
* makes sure i_size is stable but also that userspace cannot observe new
* i_size value before we are prepared to store mmap writes at new inode size.
@@ -792,31 +843,44 @@ void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
{
int bsize = i_blocksize(inode);
loff_t rounded_from;
- struct page *page;
- pgoff_t index;
+ struct folio *folio;
WARN_ON(to > inode->i_size);
- if (from >= to || bsize == PAGE_SIZE)
+ if (from >= to || bsize >= PAGE_SIZE)
return;
/* Page straddling @from will not have any hole block created? */
rounded_from = round_up(from, bsize);
if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
return;
- index = from >> PAGE_SHIFT;
- page = find_lock_page(inode->i_mapping, index);
- /* Page not cached? Nothing to do */
- if (!page)
+ folio = filemap_lock_folio(inode->i_mapping, from / PAGE_SIZE);
+ /* Folio not cached? Nothing to do */
+ if (IS_ERR(folio))
return;
/*
- * See clear_page_dirty_for_io() for details why set_page_dirty()
+ * See folio_clear_dirty_for_io() for details why folio_mark_dirty()
* is needed.
*/
- if (page_mkclean(page))
- set_page_dirty(page);
- unlock_page(page);
- put_page(page);
+ if (folio_mkclean(folio))
+ folio_mark_dirty(folio);
+
+ /*
+ * The post-eof range of the folio must be zeroed before it is exposed
+ * to the file. Writeback normally does this, but since i_size has been
+ * increased we handle it here.
+ */
+ if (folio_test_dirty(folio)) {
+ unsigned int offset, end;
+
+ offset = from - folio_pos(folio);
+ end = min_t(unsigned int, to - folio_pos(folio),
+ folio_size(folio));
+ folio_zero_segment(folio, offset, end);
+ }
+
+ folio_unlock(folio);
+ folio_put(folio);
}
EXPORT_SYMBOL(pagecache_isize_extended);