summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2022-03-23 21:29:04 -0400
committerMatthew Wilcox (Oracle) <willy@infradead.org>2022-04-01 13:45:33 -0400
commit704528d895dd3e7b173e672116b4eb2b0a0fceb0 (patch)
tree2fd71b29ddd4e61c18b5114f94bd5433bbbda00d /mm
parentebf921a9fac38560e0fc3a4381e163a6969efd5a (diff)
fs: Remove ->readpages address space operation
All filesystems have now been converted to use ->readahead, so remove the ->readpages operation and fix all the comments that used to refer to it. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Al Viro <viro@zeniv.linux.org.uk> Acked-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/readahead.c15
2 files changed, 3 insertions, 14 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 647d72bf23b6..d904cd7e4181 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2538,7 +2538,7 @@ static int filemap_create_folio(struct file *file,
* the page cache as the locked folio would then be enough to
* synchronize with hole punching. But there are code paths
* such as filemap_update_page() filling in partially uptodate
- * pages or ->readpages() that need to hold invalidate_lock
+ * pages or ->readahead() that need to hold invalidate_lock
* while mapping blocks for IO so let's hold the lock here as
* well to keep locking rules simple.
*/
diff --git a/mm/readahead.c b/mm/readahead.c
index 9097af639beb..297bd0719cda 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -170,13 +170,6 @@ static void read_pages(struct readahead_control *rac, struct list_head *pages,
unlock_page(page);
put_page(page);
}
- } else if (aops->readpages) {
- aops->readpages(rac->file, rac->mapping, pages,
- readahead_count(rac));
- /* Clean up the remaining pages */
- put_pages_list(pages);
- rac->_index += rac->_nr_pages;
- rac->_nr_pages = 0;
} else {
while ((page = readahead_page(rac))) {
aops->readpage(rac->file, page);
@@ -253,10 +246,7 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
folio = filemap_alloc_folio(gfp_mask, 0);
if (!folio)
break;
- if (mapping->a_ops->readpages) {
- folio->index = index + i;
- list_add(&folio->lru, &page_pool);
- } else if (filemap_add_folio(mapping, folio, index + i,
+ if (filemap_add_folio(mapping, folio, index + i,
gfp_mask) < 0) {
folio_put(folio);
read_pages(ractl, &page_pool, true);
@@ -318,8 +308,7 @@ void force_page_cache_ra(struct readahead_control *ractl,
struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
unsigned long max_pages, index;
- if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages &&
- !mapping->a_ops->readahead))
+ if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readahead))
return;
/*