summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/fs.h2
-rw-r--r--mm/filemap.c6
-rw-r--r--mm/internal.h3
-rw-r--r--mm/readahead.c21
4 files changed, 20 insertions, 12 deletions
diff --git a/include/linux/fs.h b/include/linux/fs.h
index ef819b232d66..e14e9d11ca0f 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1043,6 +1043,7 @@ struct fown_struct {
* and so were/are genuinely "ahead". Start next readahead when
* the first of these pages is accessed.
* @ra_pages: Maximum size of a readahead request, copied from the bdi.
+ * @order: Preferred folio order used for most recent readahead.
* @mmap_miss: How many mmap accesses missed in the page cache.
* @prev_pos: The last byte in the most recent read request.
*
@@ -1054,6 +1055,7 @@ struct file_ra_state {
unsigned int size;
unsigned int async_size;
unsigned int ra_pages;
+ unsigned short order;
unsigned short mmap_miss;
loff_t prev_pos;
};
diff --git a/mm/filemap.c b/mm/filemap.c
index 7bb4ffca8487..4b5c8d69f04c 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -3232,7 +3232,8 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
if (!(vm_flags & VM_RAND_READ))
ra->size *= 2;
ra->async_size = HPAGE_PMD_NR;
- page_cache_ra_order(&ractl, ra, HPAGE_PMD_ORDER);
+ ra->order = HPAGE_PMD_ORDER;
+ page_cache_ra_order(&ractl, ra);
return fpin;
}
#endif
@@ -3268,8 +3269,9 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2);
ra->size = ra->ra_pages;
ra->async_size = ra->ra_pages / 4;
+ ra->order = 0;
ractl._index = ra->start;
- page_cache_ra_order(&ractl, ra, 0);
+ page_cache_ra_order(&ractl, ra);
return fpin;
}
diff --git a/mm/internal.h b/mm/internal.h
index 6b8ed2017743..f91688e2894f 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -436,8 +436,7 @@ void zap_page_range_single_batched(struct mmu_gather *tlb,
int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
gfp_t gfp);
-void page_cache_ra_order(struct readahead_control *, struct file_ra_state *,
- unsigned int order);
+void page_cache_ra_order(struct readahead_control *, struct file_ra_state *);
void force_page_cache_ra(struct readahead_control *, unsigned long nr);
static inline void force_page_cache_readahead(struct address_space *mapping,
struct file *file, pgoff_t index, unsigned long nr_to_read)
diff --git a/mm/readahead.c b/mm/readahead.c
index 87be20ae00d0..95a24f12d1e7 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -457,7 +457,7 @@ static inline int ra_alloc_folio(struct readahead_control *ractl, pgoff_t index,
}
void page_cache_ra_order(struct readahead_control *ractl,
- struct file_ra_state *ra, unsigned int new_order)
+ struct file_ra_state *ra)
{
struct address_space *mapping = ractl->mapping;
pgoff_t start = readahead_index(ractl);
@@ -468,9 +468,12 @@ void page_cache_ra_order(struct readahead_control *ractl,
unsigned int nofs;
int err = 0;
gfp_t gfp = readahead_gfp_mask(mapping);
+ unsigned int new_order = ra->order;
- if (!mapping_large_folio_support(mapping))
+ if (!mapping_large_folio_support(mapping)) {
+ ra->order = 0;
goto fallback;
+ }
limit = min(limit, index + ra->size - 1);
@@ -478,6 +481,8 @@ void page_cache_ra_order(struct readahead_control *ractl,
new_order = min_t(unsigned int, new_order, ilog2(ra->size));
new_order = max(new_order, min_order);
+ ra->order = new_order;
+
/* See comment in page_cache_ra_unbounded() */
nofs = memalloc_nofs_save();
filemap_invalidate_lock_shared(mapping);
@@ -609,8 +614,9 @@ void page_cache_sync_ra(struct readahead_control *ractl,
ra->size = min(contig_count + req_count, max_pages);
ra->async_size = 1;
readit:
+ ra->order = 0;
ractl->_index = ra->start;
- page_cache_ra_order(ractl, ra, 0);
+ page_cache_ra_order(ractl, ra);
}
EXPORT_SYMBOL_GPL(page_cache_sync_ra);
@@ -621,7 +627,6 @@ void page_cache_async_ra(struct readahead_control *ractl,
struct file_ra_state *ra = ractl->ra;
pgoff_t index = readahead_index(ractl);
pgoff_t expected, start, end, aligned_end, align;
- unsigned int order = folio_order(folio);
/* no readahead */
if (!ra->ra_pages)
@@ -644,7 +649,7 @@ void page_cache_async_ra(struct readahead_control *ractl,
* Ramp up sizes, and push forward the readahead window.
*/
expected = round_down(ra->start + ra->size - ra->async_size,
- 1UL << order);
+ 1UL << folio_order(folio));
if (index == expected) {
ra->start += ra->size;
/*
@@ -673,15 +678,15 @@ void page_cache_async_ra(struct readahead_control *ractl,
ra->size += req_count;
ra->size = get_next_ra_size(ra, max_pages);
readit:
- order += 2;
- align = 1UL << min(order, ffs(max_pages) - 1);
+ ra->order += 2;
+ align = 1UL << min(ra->order, ffs(max_pages) - 1);
end = ra->start + ra->size;
aligned_end = round_down(end, align);
if (aligned_end > ra->start)
ra->size -= end - aligned_end;
ra->async_size = ra->size;
ractl->_index = ra->start;
- page_cache_ra_order(ractl, ra, order);
+ page_cache_ra_order(ractl, ra);
}
EXPORT_SYMBOL_GPL(page_cache_async_ra);