summaryrefslogtreecommitdiff
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2016-07-28 15:46:11 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-28 16:07:41 -0700
commit281e37265f2826ed401d84d6790226448ef3f0e8 (patch)
treea29b375b754c242f29082cd9e0df1a48c8109ac2 /mm/page_alloc.c
parent1e6b10857f91685c60c341703ece4ae9bb775cf3 (diff)
mm, page_alloc: consider dirtyable memory in terms of nodes
Historically dirty pages were spread among zones but now that LRUs are per-node it is more appropriate to consider dirty pages in a node. Link: http://lkml.kernel.org/r/1467970510-21195-17-git-send-email-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Rik van Riel <riel@surriel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c26
1 files changed, 11 insertions, 15 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 749b3c358ead..73b018df6e42 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2912,31 +2912,24 @@ zonelist_scan:
}
/*
* When allocating a page cache page for writing, we
- * want to get it from a zone that is within its dirty
- * limit, such that no single zone holds more than its
+ * want to get it from a node that is within its dirty
+ * limit, such that no single node holds more than its
* proportional share of globally allowed dirty pages.
- * The dirty limits take into account the zone's
+ * The dirty limits take into account the node's
* lowmem reserves and high watermark so that kswapd
* should be able to balance it without having to
* write pages from its LRU list.
*
- * This may look like it could increase pressure on
- * lower zones by failing allocations in higher zones
- * before they are full. But the pages that do spill
- * over are limited as the lower zones are protected
- * by this very same mechanism. It should not become
- * a practical burden to them.
- *
* XXX: For now, allow allocations to potentially
- * exceed the per-zone dirty limit in the slowpath
+ * exceed the per-node dirty limit in the slowpath
* (spread_dirty_pages unset) before going into reclaim,
* which is important when on a NUMA setup the allowed
- * zones are together not big enough to reach the
+ * nodes are together not big enough to reach the
* global limit. The proper fix for these situations
- * will require awareness of zones in the
+ * will require awareness of nodes in the
* dirty-throttling and the flusher threads.
*/
- if (ac->spread_dirty_pages && !zone_dirty_ok(zone))
+ if (ac->spread_dirty_pages && !node_dirty_ok(zone->zone_pgdat))
continue;
mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
@@ -6701,6 +6694,9 @@ static void calculate_totalreserve_pages(void)
enum zone_type i, j;
for_each_online_pgdat(pgdat) {
+
+ pgdat->totalreserve_pages = 0;
+
for (i = 0; i < MAX_NR_ZONES; i++) {
struct zone *zone = pgdat->node_zones + i;
long max = 0;
@@ -6717,7 +6713,7 @@ static void calculate_totalreserve_pages(void)
if (max > zone->managed_pages)
max = zone->managed_pages;
- zone->totalreserve_pages = max;
+ pgdat->totalreserve_pages += max;
reserve_pages += max;
}