summaryrefslogtreecommitdiff
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorShakeel Butt <shakeelb@google.com>2019-11-30 17:50:16 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2019-12-01 06:29:18 -0800
commitfa40d1ee9f156624658ca409a04a78882ca5b3c5 (patch)
tree4f456e9480a8050070ee795e5dc4a34bc919095c /mm/vmscan.c
parent242c37b459ce9ea1be53b75bdb76a7d9268a0791 (diff)
mm: vmscan: memcontrol: remove mem_cgroup_select_victim_node()
Since commit 1ba6fc9af35b ("mm: vmscan: do not share cgroup iteration between reclaimers"), the memcg reclaim does not bail out earlier based on sc->nr_reclaimed and will traverse all the nodes. All the reclaimable pages of the memcg on all the nodes will be scanned relative to the reclaim priority. So, there is no need to maintain state regarding which node to start the memcg reclaim from. This patch effectively reverts the commit 889976dbcb12 ("memcg: reclaim memory from nodes in round-robin order") and commit 453a9bf347f1 ("memcg: fix numa scan information update to be triggered by memory event"). [shakeelb@google.com: v2] Link: http://lkml.kernel.org/r/20191030204232.139424-1-shakeelb@google.com Link: http://lkml.kernel.org/r/20191029234753.224143-1-shakeelb@google.com Signed-off-by: Shakeel Butt <shakeelb@google.com> Acked-by: Roman Gushchin <guro@fb.com> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Greg Thelen <gthelen@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c14
1 files changed, 5 insertions, 9 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index ee4eecc7e1c2..2beff0e0dc7b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3348,10 +3348,8 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
gfp_t gfp_mask,
bool may_swap)
{
- struct zonelist *zonelist;
unsigned long nr_reclaimed;
unsigned long pflags;
- int nid;
unsigned int noreclaim_flag;
struct scan_control sc = {
.nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
@@ -3364,16 +3362,14 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
.may_unmap = 1,
.may_swap = may_swap,
};
-
- set_task_reclaim_state(current, &sc.reclaim_state);
/*
- * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
- * take care of from where we get pages. So the node where we start the
- * scan does not need to be the current node.
+ * Traverse the ZONELIST_FALLBACK zonelist of the current node to put
+ * equal pressure on all the nodes. This is based on the assumption that
+ * the reclaim does not bail out early.
*/
- nid = mem_cgroup_select_victim_node(memcg);
+ struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
- zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
+ set_task_reclaim_state(current, &sc.reclaim_state);
trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask);