summaryrefslogtreecommitdiff
path: root/mm/page_io.c
diff options
context:
space:
mode:
authorXin Hao <vernhao@tencent.com>2023-09-14 00:49:37 +0800
committerAndrew Morton <akpm@linux-foundation.org>2023-10-04 10:32:27 -0700
commit811244a501b967b00fecb1ae906d5dc6329c91e0 (patch)
treee95148a2d7edb118c193837f9c84d37f75a25c8b /mm/page_io.c
parented547ab6f4041c2186df672029eb17f98f44d125 (diff)
mm: memcg: add THP swap out info for anonymous reclaim
At present, we support per-memcg reclaim strategy, however we do not know the number of transparent huge pages being reclaimed, as we know the transparent huge pages need to be splited before reclaim them, and they will bring some performance bottleneck effect. for example, when two memcg (A & B) are doing reclaim for anonymous pages at same time, and 'A' memcg is reclaiming a large number of transparent huge pages, we can better analyze that the performance bottleneck will be caused by 'A' memcg. therefore, in order to better analyze such problems, there add THP swap out info for per-memcg. [akpm@linux-foundation.orgL fix swap_writepage_fs(), per Johannes] Link: https://lkml.kernel.org/r/20230913213343.GB48476@cmpxchg.org Link: https://lkml.kernel.org/r/20230913164938.16918-1-vernhao@tencent.com Signed-off-by: Xin Hao <vernhao@tencent.com> Suggested-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Roman Gushchin <roman.gushchin@linux.dev> Cc: Shakeel Butt <shakeelb@google.com> Cc: Muchun Song <songmuchun@bytedance.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/page_io.c')
-rw-r--r--mm/page_io.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/mm/page_io.c b/mm/page_io.c
index fe4c21af23f2..cb559ae324c6 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -208,8 +208,10 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
static inline void count_swpout_vm_event(struct folio *folio)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- if (unlikely(folio_test_pmd_mappable(folio)))
+ if (unlikely(folio_test_pmd_mappable(folio))) {
+ count_memcg_folio_events(folio, THP_SWPOUT, 1);
count_vm_event(THP_SWPOUT);
+ }
#endif
count_vm_events(PSWPOUT, folio_nr_pages(folio));
}
@@ -278,9 +280,6 @@ static void sio_write_complete(struct kiocb *iocb, long ret)
set_page_dirty(page);
ClearPageReclaim(page);
}
- } else {
- for (p = 0; p < sio->pages; p++)
- count_swpout_vm_event(page_folio(sio->bvec[p].bv_page));
}
for (p = 0; p < sio->pages; p++)
@@ -296,6 +295,7 @@ static void swap_writepage_fs(struct page *page, struct writeback_control *wbc)
struct file *swap_file = sis->swap_file;
loff_t pos = page_file_offset(page);
+ count_swpout_vm_event(page_folio(page));
set_page_writeback(page);
unlock_page(page);
if (wbc->swap_plug)