summaryrefslogtreecommitdiff
path: root/fs/buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c306
1 files changed, 160 insertions, 146 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index d3bcf601d3e5..cc8452f60251 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -55,7 +55,7 @@
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
- struct writeback_control *wbc);
+ enum rw_hint hint, struct writeback_control *wbc);
#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
@@ -189,8 +189,8 @@ EXPORT_SYMBOL(end_buffer_write_sync);
static struct buffer_head *
__find_get_block_slow(struct block_device *bdev, sector_t block)
{
- struct inode *bd_inode = bdev->bd_inode;
- struct address_space *bd_mapping = bd_inode->i_mapping;
+ struct address_space *bd_mapping = bdev->bd_mapping;
+ const int blkbits = bd_mapping->host->i_blkbits;
struct buffer_head *ret = NULL;
pgoff_t index;
struct buffer_head *bh;
@@ -199,7 +199,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
int all_mapped = 1;
static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
- index = ((loff_t)block << bd_inode->i_blkbits) / PAGE_SIZE;
+ index = ((loff_t)block << blkbits) / PAGE_SIZE;
folio = __filemap_get_folio(bd_mapping, index, FGP_ACCESSED, 0);
if (IS_ERR(folio))
goto out;
@@ -233,7 +233,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
(unsigned long long)block,
(unsigned long long)bh->b_blocknr,
bh->b_state, bh->b_size, bdev,
- 1 << bd_inode->i_blkbits);
+ 1 << blkbits);
}
out_unlock:
spin_unlock(&bd_mapping->i_private_lock);
@@ -258,7 +258,6 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
} else {
clear_buffer_uptodate(bh);
buffer_io_error(bh, ", async page read");
- folio_set_error(folio);
}
/*
@@ -391,7 +390,6 @@ static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
buffer_io_error(bh, ", lost async page write");
mark_buffer_write_io_error(bh);
clear_buffer_uptodate(bh);
- folio_set_error(folio);
}
first = folio_buffers(folio);
@@ -464,7 +462,7 @@ EXPORT_SYMBOL(mark_buffer_async_write);
* a successful fsync(). For example, ext2 indirect blocks need to be
* written back and waited upon before fsync() returns.
*
- * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
+ * The functions mark_buffer_dirty_inode(), fsync_inode_buffers(),
* inode_has_buffers() and invalidate_inode_buffers() are provided for the
* management of a list of dependent buffers at ->i_mapping->i_private_list.
*
@@ -687,30 +685,37 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
}
EXPORT_SYMBOL(mark_buffer_dirty_inode);
-/*
- * Add a page to the dirty page list.
- *
- * It is a sad fact of life that this function is called from several places
- * deeply under spinlocking. It may not sleep.
- *
- * If the page has buffers, the uptodate buffers are set dirty, to preserve
- * dirty-state coherency between the page and the buffers. It the page does
- * not have buffers then when they are later attached they will all be set
- * dirty.
- *
- * The buffers are dirtied before the page is dirtied. There's a small race
- * window in which a writepage caller may see the page cleanness but not the
- * buffer dirtiness. That's fine. If this code were to set the page dirty
- * before the buffers, a concurrent writepage caller could clear the page dirty
- * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
- * page on the dirty page list.
- *
- * We use i_private_lock to lock against try_to_free_buffers while using the
- * page's buffer list. Also use this to protect against clean buffers being
- * added to the page after it was set dirty.
- *
- * FIXME: may need to call ->reservepage here as well. That's rather up to the
- * address_space though.
+/**
+ * block_dirty_folio - Mark a folio as dirty.
+ * @mapping: The address space containing this folio.
+ * @folio: The folio to mark dirty.
+ *
+ * Filesystems which use buffer_heads can use this function as their
+ * ->dirty_folio implementation. Some filesystems need to do a little
+ * work before calling this function. Filesystems which do not use
+ * buffer_heads should call filemap_dirty_folio() instead.
+ *
+ * If the folio has buffers, the uptodate buffers are set dirty, to
+ * preserve dirty-state coherency between the folio and the buffers.
+ * Buffers added to a dirty folio are created dirty.
+ *
+ * The buffers are dirtied before the folio is dirtied. There's a small
+ * race window in which writeback may see the folio cleanness but not the
+ * buffer dirtiness. That's fine. If this code were to set the folio
+ * dirty before the buffers, writeback could clear the folio dirty flag,
+ * see a bunch of clean buffers and we'd end up with dirty buffers/clean
+ * folio on the dirty folio list.
+ *
+ * We use i_private_lock to lock against try_to_free_buffers() while
+ * using the folio's buffer list. This also prevents clean buffers
+ * being added to the folio after it was set dirty.
+ *
+ * Context: May only be called from process context. Does not sleep.
+ * Caller must ensure that @folio cannot be truncated during this call,
+ * typically by holding the folio lock or having a page in the folio
+ * mapped and holding the page table lock.
+ *
+ * Return: True if the folio was dirtied; false if it was already dirtied.
*/
bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
{
@@ -731,15 +736,12 @@ bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
* Lock out page's memcg migration to keep PageDirty
* synchronized with per-memcg dirty page counters.
*/
- folio_memcg_lock(folio);
newly_dirty = !folio_test_set_dirty(folio);
spin_unlock(&mapping->i_private_lock);
if (newly_dirty)
__folio_mark_dirty(folio, mapping, 1);
- folio_memcg_unlock(folio);
-
if (newly_dirty)
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
@@ -769,12 +771,11 @@ EXPORT_SYMBOL(block_dirty_folio);
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
{
struct buffer_head *bh;
- struct list_head tmp;
struct address_space *mapping;
int err = 0, err2;
struct blk_plug plug;
+ LIST_HEAD(tmp);
- INIT_LIST_HEAD(&tmp);
blk_start_plug(&plug);
spin_lock(lock);
@@ -851,8 +852,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
* done a sync(). Just drop the buffers from the inode list.
*
* NOTE: we take the inode's blockdev's mapping's i_private_lock. Which
- * assumes that all the buffers are against the blockdev. Not true
- * for reiserfs.
+ * assumes that all the buffers are against the blockdev.
*/
void invalidate_inode_buffers(struct inode *inode)
{
@@ -953,12 +953,9 @@ no_grow:
}
EXPORT_SYMBOL_GPL(folio_alloc_buffers);
-struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
- bool retry)
+struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size)
{
gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
- if (retry)
- gfp |= __GFP_NOFAIL;
return folio_alloc_buffers(page_folio(page), size, gfp);
}
@@ -1034,12 +1031,12 @@ static sector_t folio_init_buffers(struct folio *folio,
static bool grow_dev_folio(struct block_device *bdev, sector_t block,
pgoff_t index, unsigned size, gfp_t gfp)
{
- struct inode *inode = bdev->bd_inode;
+ struct address_space *mapping = bdev->bd_mapping;
struct folio *folio;
struct buffer_head *bh;
sector_t end_block = 0;
- folio = __filemap_get_folio(inode->i_mapping, index,
+ folio = __filemap_get_folio(mapping, index,
FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
if (IS_ERR(folio))
return false;
@@ -1073,10 +1070,10 @@ static bool grow_dev_folio(struct block_device *bdev, sector_t block,
* lock to be atomic wrt __find_get_block(), which does not
* run under the folio lock.
*/
- spin_lock(&inode->i_mapping->i_private_lock);
+ spin_lock(&mapping->i_private_lock);
link_dev_buffers(folio, bh);
end_block = folio_init_buffers(folio, bdev, size);
- spin_unlock(&inode->i_mapping->i_private_lock);
+ spin_unlock(&mapping->i_private_lock);
unlock:
folio_unlock(folio);
folio_put(folio);
@@ -1193,13 +1190,11 @@ void mark_buffer_dirty(struct buffer_head *bh)
struct folio *folio = bh->b_folio;
struct address_space *mapping = NULL;
- folio_memcg_lock(folio);
if (!folio_test_set_dirty(folio)) {
mapping = folio->mapping;
if (mapping)
__folio_mark_dirty(folio, mapping, 0);
}
- folio_memcg_unlock(folio);
if (mapping)
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
}
@@ -1219,26 +1214,28 @@ void mark_buffer_write_io_error(struct buffer_head *bh)
}
EXPORT_SYMBOL(mark_buffer_write_io_error);
-/*
- * Decrement a buffer_head's reference count. If all buffers against a page
- * have zero reference count, are clean and unlocked, and if the page is clean
- * and unlocked then try_to_free_buffers() may strip the buffers from the page
- * in preparation for freeing it (sometimes, rarely, buffers are removed from
- * a page but it ends up not being freed, and buffers may later be reattached).
+/**
+ * __brelse - Release a buffer.
+ * @bh: The buffer to release.
+ *
+ * This variant of brelse() can be called if @bh is guaranteed to not be NULL.
*/
-void __brelse(struct buffer_head * buf)
+void __brelse(struct buffer_head *bh)
{
- if (atomic_read(&buf->b_count)) {
- put_bh(buf);
+ if (atomic_read(&bh->b_count)) {
+ put_bh(bh);
return;
}
WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
}
EXPORT_SYMBOL(__brelse);
-/*
- * bforget() is like brelse(), except it discards any
- * potentially dirty data.
+/**
+ * __bforget - Discard any dirty data in a buffer.
+ * @bh: The buffer to forget.
+ *
+ * This variant of bforget() can be called if @bh is guaranteed to not
+ * be NULL.
*/
void __bforget(struct buffer_head *bh)
{
@@ -1415,6 +1412,11 @@ EXPORT_SYMBOL(__find_get_block);
* @size: The size of buffer_heads for this @bdev.
* @gfp: The memory allocation flags to use.
*
+ * The returned buffer head has its reference count incremented, but is
+ * not locked. The caller should call brelse() when it has finished
+ * with the buffer. The buffer may not be uptodate. If needed, the
+ * caller can bring it uptodate either by reading it or overwriting it.
+ *
* Return: The buffer head, or NULL if memory could not be allocated.
*/
struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block,
@@ -1446,24 +1448,33 @@ void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
EXPORT_SYMBOL(__breadahead);
/**
- * __bread_gfp() - reads a specified block and returns the bh
- * @bdev: the block_device to read from
- * @block: number of block
- * @size: size (in bytes) to read
- * @gfp: page allocation flag
- *
- * Reads a specified block, and returns buffer head that contains it.
- * The page cache can be allocated from non-movable area
- * not to prevent page migration if you set gfp to zero.
- * It returns NULL if the block was unreadable.
+ * __bread_gfp() - Read a block.
+ * @bdev: The block device to read from.
+ * @block: Block number in units of block size.
+ * @size: The block size of this device in bytes.
+ * @gfp: Not page allocation flags; see below.
+ *
+ * You are not expected to call this function. You should use one of
+ * sb_bread(), sb_bread_unmovable() or __bread().
+ *
+ * Read a specified block, and return the buffer head that refers to it.
+ * If @gfp is 0, the memory will be allocated using the block device's
+ * default GFP flags. If @gfp is __GFP_MOVABLE, the memory may be
+ * allocated from a movable area. Do not pass in a complete set of
+ * GFP flags.
+ *
+ * The returned buffer head has its refcount increased. The caller should
+ * call brelse() when it has finished with the buffer.
+ *
+ * Context: May sleep waiting for I/O.
+ * Return: NULL if the block was unreadable.
*/
-struct buffer_head *
-__bread_gfp(struct block_device *bdev, sector_t block,
- unsigned size, gfp_t gfp)
+struct buffer_head *__bread_gfp(struct block_device *bdev, sector_t block,
+ unsigned size, gfp_t gfp)
{
struct buffer_head *bh;
- gfp |= mapping_gfp_constraint(bdev->bd_inode->i_mapping, ~__GFP_FS);
+ gfp |= mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS);
/*
* Prefer looping in the allocator rather than here, at least that
@@ -1632,6 +1643,7 @@ void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
if (length == folio_size(folio))
filemap_release_folio(folio, 0);
out:
+ folio_clear_mappedtodisk(folio);
return;
}
EXPORT_SYMBOL(block_invalidate_folio);
@@ -1696,16 +1708,16 @@ EXPORT_SYMBOL(create_empty_buffers);
*/
void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
{
- struct inode *bd_inode = bdev->bd_inode;
- struct address_space *bd_mapping = bd_inode->i_mapping;
+ struct address_space *bd_mapping = bdev->bd_mapping;
+ const int blkbits = bd_mapping->host->i_blkbits;
struct folio_batch fbatch;
- pgoff_t index = ((loff_t)block << bd_inode->i_blkbits) / PAGE_SIZE;
+ pgoff_t index = ((loff_t)block << blkbits) / PAGE_SIZE;
pgoff_t end;
int i, count;
struct buffer_head *bh;
struct buffer_head *head;
- end = ((loff_t)(block + len - 1) << bd_inode->i_blkbits) / PAGE_SIZE;
+ end = ((loff_t)(block + len - 1) << blkbits) / PAGE_SIZE;
folio_batch_init(&fbatch);
while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) {
count = folio_batch_count(&fbatch);
@@ -1889,7 +1901,8 @@ int __block_write_full_folio(struct inode *inode, struct folio *folio,
do {
struct buffer_head *next = bh->b_this_page;
if (buffer_async_write(bh)) {
- submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc);
+ submit_bh_wbc(REQ_OP_WRITE | write_flags, bh,
+ inode->i_write_hint, wbc);
nr_underway++;
}
bh = next;
@@ -1936,7 +1949,6 @@ recover:
clear_buffer_dirty(bh);
}
} while ((bh = bh->b_this_page) != head);
- folio_set_error(folio);
BUG_ON(folio_test_writeback(folio));
mapping_set_error(folio->mapping, err);
folio_start_writeback(folio);
@@ -1944,7 +1956,8 @@ recover:
struct buffer_head *next = bh->b_this_page;
if (buffer_async_write(bh)) {
clear_buffer_dirty(bh);
- submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc);
+ submit_bh_wbc(REQ_OP_WRITE | write_flags, bh,
+ inode->i_write_hint, wbc);
nr_underway++;
}
bh = next;
@@ -2146,11 +2159,10 @@ int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
return err;
}
-int __block_write_begin(struct page *page, loff_t pos, unsigned len,
+int __block_write_begin(struct folio *folio, loff_t pos, unsigned len,
get_block_t *get_block)
{
- return __block_write_begin_int(page_folio(page), pos, len, get_block,
- NULL);
+ return __block_write_begin_int(folio, pos, len, get_block, NULL);
}
EXPORT_SYMBOL(__block_write_begin);
@@ -2162,6 +2174,8 @@ static void __block_commit_write(struct folio *folio, size_t from, size_t to)
struct buffer_head *bh, *head;
bh = head = folio_buffers(folio);
+ if (!bh)
+ return;
blocksize = bh->b_size;
block_start = 0;
@@ -2198,33 +2212,33 @@ static void __block_commit_write(struct folio *folio, size_t from, size_t to)
* The filesystem needs to handle block truncation upon failure.
*/
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
- struct page **pagep, get_block_t *get_block)
+ struct folio **foliop, get_block_t *get_block)
{
pgoff_t index = pos >> PAGE_SHIFT;
- struct page *page;
+ struct folio *folio;
int status;
- page = grab_cache_page_write_begin(mapping, index);
- if (!page)
- return -ENOMEM;
+ folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
+ mapping_gfp_mask(mapping));
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
- status = __block_write_begin(page, pos, len, get_block);
+ status = __block_write_begin_int(folio, pos, len, get_block, NULL);
if (unlikely(status)) {
- unlock_page(page);
- put_page(page);
- page = NULL;
+ folio_unlock(folio);
+ folio_put(folio);
+ folio = NULL;
}
- *pagep = page;
+ *foliop = folio;
return status;
}
EXPORT_SYMBOL(block_write_begin);
int block_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct folio *folio, void *fsdata)
{
- struct folio *folio = page_folio(page);
size_t start = pos - folio_pos(folio);
if (unlikely(copied < len)) {
@@ -2256,19 +2270,19 @@ EXPORT_SYMBOL(block_write_end);
int generic_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct folio *folio, void *fsdata)
{
struct inode *inode = mapping->host;
loff_t old_size = inode->i_size;
bool i_size_changed = false;
- copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
+ copied = block_write_end(file, mapping, pos, len, copied, folio, fsdata);
/*
* No need to use i_size_read() here, the i_size cannot change under us
* because we hold i_rwsem.
*
- * But it's important to update i_size while still holding page lock:
+ * But it's important to update i_size while still holding folio lock:
* page writeout could otherwise come in and zero beyond i_size.
*/
if (pos + copied > inode->i_size) {
@@ -2276,8 +2290,8 @@ int generic_write_end(struct file *file, struct address_space *mapping,
i_size_changed = true;
}
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
if (old_size < pos)
pagecache_isize_extended(inode, old_size, pos);
@@ -2380,10 +2394,8 @@ int block_read_full_folio(struct folio *folio, get_block_t *get_block)
if (iblock < lblock) {
WARN_ON(bh->b_size != blocksize);
err = get_block(inode, iblock, bh, 0);
- if (err) {
- folio_set_error(folio);
+ if (err)
page_error = true;
- }
}
if (!buffer_mapped(bh)) {
folio_zero_range(folio, i * blocksize,
@@ -2445,7 +2457,7 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size)
{
struct address_space *mapping = inode->i_mapping;
const struct address_space_operations *aops = mapping->a_ops;
- struct page *page;
+ struct folio *folio;
void *fsdata = NULL;
int err;
@@ -2453,11 +2465,11 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size)
if (err)
goto out;
- err = aops->write_begin(NULL, mapping, size, 0, &page, &fsdata);
+ err = aops->write_begin(NULL, mapping, size, 0, &folio, &fsdata);
if (err)
goto out;
- err = aops->write_end(NULL, mapping, size, 0, 0, page, fsdata);
+ err = aops->write_end(NULL, mapping, size, 0, 0, folio, fsdata);
BUG_ON(err > 0);
out:
@@ -2471,7 +2483,7 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
struct inode *inode = mapping->host;
const struct address_space_operations *aops = mapping->a_ops;
unsigned int blocksize = i_blocksize(inode);
- struct page *page;
+ struct folio *folio;
void *fsdata = NULL;
pgoff_t index, curidx;
loff_t curpos;
@@ -2490,12 +2502,12 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
len = PAGE_SIZE - zerofrom;
err = aops->write_begin(file, mapping, curpos, len,
- &page, &fsdata);
+ &folio, &fsdata);
if (err)
goto out;
- zero_user(page, zerofrom, len);
+ folio_zero_range(folio, offset_in_folio(folio, curpos), len);
err = aops->write_end(file, mapping, curpos, len, len,
- page, fsdata);
+ folio, fsdata);
if (err < 0)
goto out;
BUG_ON(err != len);
@@ -2523,12 +2535,12 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
len = offset - zerofrom;
err = aops->write_begin(file, mapping, curpos, len,
- &page, &fsdata);
+ &folio, &fsdata);
if (err)
goto out;
- zero_user(page, zerofrom, len);
+ folio_zero_range(folio, offset_in_folio(folio, curpos), len);
err = aops->write_end(file, mapping, curpos, len, len,
- page, fsdata);
+ folio, fsdata);
if (err < 0)
goto out;
BUG_ON(err != len);
@@ -2544,7 +2556,7 @@ out:
*/
int cont_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len,
- struct page **pagep, void **fsdata,
+ struct folio **foliop, void **fsdata,
get_block_t *get_block, loff_t *bytes)
{
struct inode *inode = mapping->host;
@@ -2562,7 +2574,7 @@ int cont_write_begin(struct file *file, struct address_space *mapping,
(*bytes)++;
}
- return block_write_begin(mapping, pos, len, pagep, get_block);
+ return block_write_begin(mapping, pos, len, foliop, get_block);
}
EXPORT_SYMBOL(cont_write_begin);
@@ -2756,6 +2768,7 @@ static void end_bio_bh_io_sync(struct bio *bio)
}
static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
+ enum rw_hint write_hint,
struct writeback_control *wbc)
{
const enum req_op op = opf & REQ_OP_MASK;
@@ -2783,8 +2796,9 @@ static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
+ bio->bi_write_hint = write_hint;
- __bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
+ bio_add_folio_nofail(bio, bh->b_folio, bh->b_size, bh_offset(bh));
bio->bi_end_io = end_bio_bh_io_sync;
bio->bi_private = bh;
@@ -2794,7 +2808,7 @@ static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
if (wbc) {
wbc_init_bio(wbc, bio);
- wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size);
+ wbc_account_cgroup_owner(wbc, bh->b_folio, bh->b_size);
}
submit_bio(bio);
@@ -2802,7 +2816,7 @@ static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
void submit_bh(blk_opf_t opf, struct buffer_head *bh)
{
- submit_bh_wbc(opf, bh, NULL);
+ submit_bh_wbc(opf, bh, WRITE_LIFE_NOT_SET, NULL);
}
EXPORT_SYMBOL(submit_bh);
@@ -2857,26 +2871,6 @@ int sync_dirty_buffer(struct buffer_head *bh)
}
EXPORT_SYMBOL(sync_dirty_buffer);
-/*
- * try_to_free_buffers() checks if all the buffers on this particular folio
- * are unused, and releases them if so.
- *
- * Exclusion against try_to_free_buffers may be obtained by either
- * locking the folio or by holding its mapping's i_private_lock.
- *
- * If the folio is dirty but all the buffers are clean then we need to
- * be sure to mark the folio clean as well. This is because the folio
- * may be against a block device, and a later reattachment of buffers
- * to a dirty folio will set *all* buffers dirty. Which would corrupt
- * filesystem data on the same device.
- *
- * The same applies to regular filesystem folios: if all the buffers are
- * clean then we set the folio clean and proceed. To do that, we require
- * total exclusion from block_dirty_folio(). That is obtained with
- * i_private_lock.
- *
- * try_to_free_buffers() is non-blocking.
- */
static inline int buffer_busy(struct buffer_head *bh)
{
return atomic_read(&bh->b_count) |
@@ -2910,6 +2904,30 @@ failed:
return false;
}
+/**
+ * try_to_free_buffers - Release buffers attached to this folio.
+ * @folio: The folio.
+ *
+ * If any buffers are in use (dirty, under writeback, elevated refcount),
+ * no buffers will be freed.
+ *
+ * If the folio is dirty but all the buffers are clean then we need to
+ * be sure to mark the folio clean as well. This is because the folio
+ * may be against a block device, and a later reattachment of buffers
+ * to a dirty folio will set *all* buffers dirty. Which would corrupt
+ * filesystem data on the same device.
+ *
+ * The same applies to regular filesystem folios: if all the buffers are
+ * clean then we set the folio clean and proceed. To do that, we require
+ * total exclusion from block_dirty_folio(). That is obtained with
+ * i_private_lock.
+ *
+ * Exclusion against try_to_free_buffers may be obtained by either
+ * locking the folio or by holding its mapping's i_private_lock.
+ *
+ * Context: Process context. @folio must be locked. Will not sleep.
+ * Return: true if all buffers attached to this folio were freed.
+ */
bool try_to_free_buffers(struct folio *folio)
{
struct address_space * const mapping = folio->mapping;
@@ -3121,12 +3139,8 @@ void __init buffer_init(void)
unsigned long nrpages;
int ret;
- bh_cachep = kmem_cache_create("buffer_head",
- sizeof(struct buffer_head), 0,
- (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
- SLAB_MEM_SPREAD),
- NULL);
-
+ bh_cachep = KMEM_CACHE(buffer_head,
+ SLAB_RECLAIM_ACCOUNT|SLAB_PANIC);
/*
* Limit the bh occupancy to 10% of ZONE_NORMAL
*/