summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/inode.c2
-rw-r--r--fs/btrfs/reflink.c4
-rw-r--r--fs/buffer.c3
-rw-r--r--fs/ceph/file.c2
-rw-r--r--fs/cifs/cifssmb.c2
-rw-r--r--fs/cifs/inode.c2
-rw-r--r--fs/crypto/crypto.c2
-rw-r--r--fs/ext4/file.c2
-rw-r--r--fs/ext4/inode.c2
-rw-r--r--fs/ext4/readpage.c2
-rw-r--r--fs/f2fs/checkpoint.c2
-rw-r--r--fs/f2fs/data.c6
-rw-r--r--fs/f2fs/file.c2
-rw-r--r--fs/f2fs/node.c4
-rw-r--r--fs/fuse/fuse_i.h2
-rw-r--r--fs/iomap/buffered-io.c9
-rw-r--r--fs/nfs/file.c2
-rw-r--r--fs/ntfs/aops.c2
-rw-r--r--fs/verity/verify.c4
19 files changed, 27 insertions, 29 deletions
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index aa0a60ee26cb..6bfc4343c98d 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -8296,7 +8296,7 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
* cover the full folio, like invalidating the last folio, we're
* still safe to wait for ordered extent to finish.
*/
- if (!(offset == 0 && length == PAGE_SIZE)) {
+ if (!(offset == 0 && length == folio_size(folio))) {
btrfs_releasepage(&folio->page, GFP_NOFS);
return;
}
diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c
index 04a88bfe4fcf..998e3f180d90 100644
--- a/fs/btrfs/reflink.c
+++ b/fs/btrfs/reflink.c
@@ -645,7 +645,7 @@ static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len,
int ret;
/*
- * Lock destination range to serialize with concurrent readpages() and
+ * Lock destination range to serialize with concurrent readahead() and
* source range to serialize with relocation.
*/
btrfs_double_extent_lock(src, loff, dst, dst_loff, len);
@@ -739,7 +739,7 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
}
/*
- * Lock destination range to serialize with concurrent readpages() and
+ * Lock destination range to serialize with concurrent readahead() and
* source range to serialize with relocation.
*/
btrfs_double_extent_lock(src, off, inode, destoff, len);
diff --git a/fs/buffer.c b/fs/buffer.c
index d67fbe063a3a..2b5561ae5d0b 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2352,8 +2352,7 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size)
if (err)
goto out;
- err = pagecache_write_begin(NULL, mapping, size, 0,
- AOP_FLAG_CONT_EXPAND, &page, &fsdata);
+ err = pagecache_write_begin(NULL, mapping, size, 0, 0, &page, &fsdata);
if (err)
goto out;
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index feb75eb1cd82..6c9e837aa1d3 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -1869,7 +1869,7 @@ retry_snap:
* are pending vmtruncate. So write and vmtruncate
* can not run at the same time
*/
- written = generic_perform_write(file, from, pos);
+ written = generic_perform_write(iocb, from);
if (likely(written >= 0))
iocb->ki_pos = pos + written;
ceph_end_io_write(inode);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 071e2f21a7db..bc3ded4f34f6 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -597,7 +597,7 @@ CIFSSMBNegotiate(const unsigned int xid,
set_credits(server, server->maxReq);
/* probably no need to store and check maxvcs */
server->maxBuf = le32_to_cpu(pSMBr->MaxBufferSize);
- /* set up max_read for readpages check */
+ /* set up max_read for readahead check */
server->max_read = server->maxBuf;
server->max_rw = le32_to_cpu(pSMBr->MaxRawSize);
cifs_dbg(NOISY, "Max buf = %d\n", ses->server->maxBuf);
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 60d853c92f6a..2f9e7d2f81b6 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -49,7 +49,7 @@ static void cifs_set_ops(struct inode *inode)
inode->i_fop = &cifs_file_ops;
}
- /* check if server can support readpages */
+ /* check if server can support readahead */
if (cifs_sb_master_tcon(cifs_sb)->ses->server->max_read <
PAGE_SIZE + MAX_CIFS_HDR_SIZE)
inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 4fcca79f39ae..526a4c1bed99 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -248,7 +248,7 @@ EXPORT_SYMBOL(fscrypt_encrypt_block_inplace);
* which must still be locked and not uptodate. Normally, blocksize ==
* PAGE_SIZE and the whole page is decrypted at once.
*
- * This is for use by the filesystem's ->readpages() method.
+ * This is for use by the filesystem's ->readahead() method.
*
* Return: 0 on success; -errno on failure
*/
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 8bd66cdc41be..6feb07e3e1eb 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -267,7 +267,7 @@ static ssize_t ext4_buffered_write_iter(struct kiocb *iocb,
goto out;
current->backing_dev_info = inode_to_bdi(inode);
- ret = generic_perform_write(iocb->ki_filp, from, iocb->ki_pos);
+ ret = generic_perform_write(iocb, from);
current->backing_dev_info = NULL;
out:
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 1ce13f69fbec..13740f2d0e61 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3589,7 +3589,7 @@ const struct iomap_ops ext4_iomap_report_ops = {
static bool ext4_journalled_dirty_folio(struct address_space *mapping,
struct folio *folio)
{
- WARN_ON_ONCE(!page_has_buffers(&folio->page));
+ WARN_ON_ONCE(!folio_buffers(folio));
folio_set_checked(folio);
return filemap_dirty_folio(mapping, folio);
}
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index 1aa26d6634fc..af491e170c4a 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -109,7 +109,7 @@ static void verity_work(struct work_struct *work)
struct bio *bio = ctx->bio;
/*
- * fsverity_verify_bio() may call readpages() again, and although verity
+ * fsverity_verify_bio() may call readahead() again, and although verity
* will be disabled for that, decryption may still be needed, causing
* another bio_post_read_ctx to be allocated. So to guarantee that
* mempool_alloc() never deadlocks we must free the current ctx first.
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index a8fc4fa511a8..f5366feea82d 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -456,7 +456,7 @@ static bool f2fs_dirty_meta_folio(struct address_space *mapping,
folio_mark_uptodate(folio);
if (!folio_test_dirty(folio)) {
filemap_dirty_folio(mapping, folio);
- inc_page_count(F2FS_P_SB(&folio->page), F2FS_DIRTY_META);
+ inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_META);
set_page_private_reference(&folio->page);
return true;
}
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index f8fcbe91059b..8e0c2e773c8d 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -164,7 +164,7 @@ static void f2fs_verify_bio(struct work_struct *work)
bool may_have_compressed_pages = (ctx->enabled_steps & STEP_DECOMPRESS);
/*
- * fsverity_verify_bio() may call readpages() again, and while verity
+ * fsverity_verify_bio() may call readahead() again, and while verity
* will be disabled for this, decryption and/or decompression may still
* be needed, resulting in another bio_post_read_ctx being allocated.
* So to prevent deadlocks we need to release the current ctx to the
@@ -2392,7 +2392,7 @@ static void f2fs_readahead(struct readahead_control *rac)
if (!f2fs_is_compress_backend_ready(inode))
return;
- /* If the file has inline data, skip readpages */
+ /* If the file has inline data, skip readahead */
if (f2fs_has_inline_data(inode))
return;
@@ -3571,7 +3571,7 @@ static bool f2fs_dirty_data_folio(struct address_space *mapping,
f2fs_update_dirty_folio(inode, folio);
return true;
}
- return true;
+ return false;
}
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index d3f39a704b8b..5b89af0f27f0 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -4448,7 +4448,7 @@ static ssize_t f2fs_buffered_write_iter(struct kiocb *iocb,
return -EOPNOTSUPP;
current->backing_dev_info = inode_to_bdi(inode);
- ret = generic_perform_write(file, from, iocb->ki_pos);
+ ret = generic_perform_write(iocb, from);
current->backing_dev_info = NULL;
if (ret > 0) {
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 0b6e741e94a0..c45d341dcf6e 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -2146,11 +2146,11 @@ static bool f2fs_dirty_node_folio(struct address_space *mapping,
folio_mark_uptodate(folio);
#ifdef CONFIG_F2FS_CHECK_FS
if (IS_INODE(&folio->page))
- f2fs_inode_chksum_set(F2FS_P_SB(&folio->page), &folio->page);
+ f2fs_inode_chksum_set(F2FS_M_SB(mapping), &folio->page);
#endif
if (!folio_test_dirty(folio)) {
filemap_dirty_folio(mapping, folio);
- inc_page_count(F2FS_P_SB(&folio->page), F2FS_DIRTY_NODES);
+ inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES);
set_page_private_reference(&folio->page);
return true;
}
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index eac4984cc753..488b460e046f 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -627,7 +627,7 @@ struct fuse_conn {
/** Connection successful. Only set in INIT */
unsigned conn_init:1;
- /** Do readpages asynchronously? Only set in INIT */
+ /** Do readahead asynchronously? Only set in INIT */
unsigned async_read:1;
/** Return an unique read error after abort. Only set in INIT */
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 49dccd9050f1..8ce8720093b9 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -435,18 +435,17 @@ bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
{
struct iomap_page *iop = to_iomap_page(folio);
struct inode *inode = folio->mapping->host;
- size_t len;
unsigned first, last, i;
if (!iop)
return false;
- /* Limit range to this folio */
- len = min(folio_size(folio) - from, count);
+ /* Caller's range may extend past the end of this folio */
+ count = min(folio_size(folio) - from, count);
- /* First and last blocks in range within page */
+ /* First and last blocks in range within folio */
first = from >> inode->i_blkbits;
- last = (from + len - 1) >> inode->i_blkbits;
+ last = (from + count - 1) >> inode->i_blkbits;
for (i = first; i <= last; i++)
if (!test_bit(i, iop->uptodate))
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index b0ca244c50d0..150b7fa8f0a7 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -646,7 +646,7 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
result = generic_write_checks(iocb, from);
if (result > 0) {
current->backing_dev_info = inode_to_bdi(inode);
- result = generic_perform_write(file, from, iocb->ki_pos);
+ result = generic_perform_write(iocb, from);
current->backing_dev_info = NULL;
}
nfs_end_io_write(inode);
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index d154dcfe06af..90e3dad8ee45 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -1746,7 +1746,7 @@ void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {
set_buffer_dirty(bh);
} while ((bh = bh->b_this_page) != head);
spin_unlock(&mapping->private_lock);
- block_dirty_folio(mapping, page_folio(page));
+ filemap_dirty_folio(mapping, page_folio(page));
if (unlikely(buffers_to_free)) {
do {
bh = buffers_to_free->b_this_page;
diff --git a/fs/verity/verify.c b/fs/verity/verify.c
index 0adb970f4e73..14e2fb49cff5 100644
--- a/fs/verity/verify.c
+++ b/fs/verity/verify.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Data verification functions, i.e. hooks for ->readpages()
+ * Data verification functions, i.e. hooks for ->readahead()
*
* Copyright 2019 Google LLC
*/
@@ -214,7 +214,7 @@ EXPORT_SYMBOL_GPL(fsverity_verify_page);
* that fail verification are set to the Error state. Verification is skipped
* for pages already in the Error state, e.g. due to fscrypt decryption failure.
*
- * This is a helper function for use by the ->readpages() method of filesystems
+ * This is a helper function for use by the ->readahead() method of filesystems
* that issue bios to read data directly into the page cache. Filesystems that
* populate the page cache without issuing bios (e.g. non block-based
* filesystems) must instead call fsverity_verify_page() directly on each page.