summaryrefslogtreecommitdiff
path: root/mm/readahead.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/readahead.c')
-rw-r--r--mm/readahead.c8
1 files changed, 8 insertions, 0 deletions
diff --git a/mm/readahead.c b/mm/readahead.c
index 406756d34309..3a4b5d58eeb6 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -129,6 +129,9 @@
#include <linux/fadvise.h>
#include <linux/sched/mm.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/readahead.h>
+
#include "internal.h"
/*
@@ -225,6 +228,8 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
*/
unsigned int nofs = memalloc_nofs_save();
+ trace_page_cache_ra_unbounded(mapping->host, index, nr_to_read,
+ lookahead_size);
filemap_invalidate_lock_shared(mapping);
index = mapping_align_index(mapping, index);
@@ -470,6 +475,7 @@ void page_cache_ra_order(struct readahead_control *ractl,
gfp_t gfp = readahead_gfp_mask(mapping);
unsigned int new_order = ra->order;
+ trace_page_cache_ra_order(mapping->host, start, ra);
if (!mapping_large_folio_support(mapping)) {
ra->order = 0;
goto fallback;
@@ -554,6 +560,7 @@ void page_cache_sync_ra(struct readahead_control *ractl,
unsigned long max_pages, contig_count;
pgoff_t prev_index, miss;
+ trace_page_cache_sync_ra(ractl->mapping->host, index, ra, req_count);
/*
* Even if readahead is disabled, issue this request as readahead
* as we'll need it to satisfy the requested range. The forced
@@ -638,6 +645,7 @@ void page_cache_async_ra(struct readahead_control *ractl,
if (folio_test_writeback(folio))
return;
+ trace_page_cache_async_ra(ractl->mapping->host, index, ra, req_count);
folio_clear_readahead(folio);
if (blk_cgroup_congested())