summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/compaction.c20
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/page-writeback.c13
-rw-r--r--mm/page_alloc.c55
-rw-r--r--mm/vmscan.c16
-rw-r--r--mm/vmstat.c3
6 files changed, 67 insertions, 42 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index e5995f38d677..cd93ea24c565 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1438,6 +1438,11 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
{
struct zone *zone;
struct zoneref *z;
+ pg_data_t *last_pgdat = NULL;
+
+ /* Do not retry compaction for zone-constrained allocations */
+ if (ac->high_zoneidx < ZONE_NORMAL)
+ return false;
/*
* Make sure at least one zone would pass __compaction_suitable if we continue
@@ -1448,14 +1453,27 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
unsigned long available;
enum compact_result compact_result;
+ if (last_pgdat == zone->zone_pgdat)
+ continue;
+
+ /*
+ * This over-estimates the number of pages available for
+ * reclaim/compaction but walking the LRU would take too
+ * long. The consequences are that compaction may retry
+ * longer than it should for a zone-constrained allocation
+ * request.
+ */
+ last_pgdat = zone->zone_pgdat;
+ available = pgdat_reclaimable_pages(zone->zone_pgdat) / order;
+
/*
* Do not consider all the reclaimable memory because we do not
* want to trash just for a single high order allocation which
* is even not guaranteed to appear even if __compaction_suitable
* is happy about the watermark check.
*/
- available = zone_reclaimable_pages(zone) / order;
available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
+ available = min(zone->managed_pages, available);
compact_result = __compaction_suitable(zone, order, alloc_flags,
ac_classzone_idx(ac), available);
if (compact_result != COMPACT_SKIPPED &&
diff --git a/mm/migrate.c b/mm/migrate.c
index ed0268268e93..ed2f85e61de1 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -513,9 +513,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
}
if (dirty && mapping_cap_account_dirty(mapping)) {
__dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY);
- __dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING);
__inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY);
- __inc_zone_state(newzone, NR_ZONE_WRITE_PENDING);
}
}
local_irq_enable();
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 3c02aa603f5a..0bca2376bd42 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -299,6 +299,9 @@ static unsigned long node_dirtyable_memory(struct pglist_data *pgdat)
return nr_pages;
}
+#ifdef CONFIG_HIGHMEM
+atomic_t highmem_file_pages;
+#endif
static unsigned long highmem_dirtyable_memory(unsigned long total)
{
@@ -306,18 +309,17 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
int node;
unsigned long x = 0;
int i;
+ unsigned long dirtyable = atomic_read(&highmem_file_pages);
for_each_node_state(node, N_HIGH_MEMORY) {
for (i = ZONE_NORMAL + 1; i < MAX_NR_ZONES; i++) {
struct zone *z;
- unsigned long dirtyable;
if (!is_highmem_idx(i))
continue;
z = &NODE_DATA(node)->node_zones[i];
- dirtyable = zone_page_state(z, NR_FREE_PAGES) +
- zone_page_state(z, NR_ZONE_LRU_FILE);
+ dirtyable += zone_page_state(z, NR_FREE_PAGES);
/* watch for underflows */
dirtyable -= min(dirtyable, high_wmark_pages(z));
@@ -2460,7 +2462,6 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_DIRTY);
__inc_node_page_state(page, NR_FILE_DIRTY);
- __inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
__inc_node_page_state(page, NR_DIRTIED);
__inc_wb_stat(wb, WB_RECLAIMABLE);
__inc_wb_stat(wb, WB_DIRTIED);
@@ -2482,7 +2483,6 @@ void account_page_cleaned(struct page *page, struct address_space *mapping,
if (mapping_cap_account_dirty(mapping)) {
mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
dec_node_page_state(page, NR_FILE_DIRTY);
- dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
dec_wb_stat(wb, WB_RECLAIMABLE);
task_io_account_cancelled_write(PAGE_SIZE);
}
@@ -2739,7 +2739,6 @@ int clear_page_dirty_for_io(struct page *page)
if (TestClearPageDirty(page)) {
mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
dec_node_page_state(page, NR_FILE_DIRTY);
- dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
dec_wb_stat(wb, WB_RECLAIMABLE);
ret = 1;
}
@@ -2786,7 +2785,6 @@ int test_clear_page_writeback(struct page *page)
if (ret) {
mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
dec_node_page_state(page, NR_WRITEBACK);
- dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
inc_node_page_state(page, NR_WRITTEN);
}
unlock_page_memcg(page);
@@ -2841,7 +2839,6 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
if (!ret) {
mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
inc_node_page_state(page, NR_WRITEBACK);
- inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
}
unlock_page_memcg(page);
return ret;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 03e67f2dfdaa..f1b5a0bc11f2 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3402,6 +3402,7 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
{
struct zone *zone;
struct zoneref *z;
+ pg_data_t *current_pgdat = NULL;
/*
* Make sure we converge to OOM if we cannot make any progress
@@ -3411,27 +3412,56 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
return false;
/*
- * Keep reclaiming pages while there is a chance this will lead somewhere.
- * If none of the target zones can satisfy our allocation request even
- * if all reclaimable pages are considered then we are screwed and have
- * to go OOM.
+ * Blindly retry lowmem allocation requests that are often ignored by
+ * the OOM killer up to MAX_RECLAIM_RETRIES as we not have a reliable
+ * and fast means of calculating reclaimable, dirty and writeback pages
+ * in eligible zones.
+ */
+ if (ac->high_zoneidx < ZONE_NORMAL)
+ goto out;
+
+ /*
+ * Keep reclaiming pages while there is a chance this will lead
+ * somewhere. If none of the target zones can satisfy our allocation
+ * request even if all reclaimable pages are considered then we are
+ * screwed and have to go OOM.
*/
for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
ac->nodemask) {
unsigned long available;
unsigned long reclaimable;
+ int zid;
- available = reclaimable = zone_reclaimable_pages(zone);
+ if (current_pgdat == zone->zone_pgdat)
+ continue;
+
+ current_pgdat = zone->zone_pgdat;
+ available = reclaimable = pgdat_reclaimable_pages(current_pgdat);
available -= DIV_ROUND_UP(no_progress_loops * available,
MAX_RECLAIM_RETRIES);
- available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
+
+ /* Account for all free pages on eligible zones */
+ for (zid = 0; zid <= zone_idx(zone); zid++) {
+ struct zone *acct_zone = &current_pgdat->node_zones[zid];
+
+ available += zone_page_state_snapshot(acct_zone, NR_FREE_PAGES);
+ }
/*
* Would the allocation succeed if we reclaimed the whole
- * available?
+ * available? This is approximate because there is no
+ * accurate count of reclaimable pages per zone.
*/
- if (__zone_watermark_ok(zone, order, min_wmark_pages(zone),
- ac_classzone_idx(ac), alloc_flags, available)) {
+ for (zid = 0; zid <= zone_idx(zone); zid++) {
+ struct zone *check_zone = &current_pgdat->node_zones[zid];
+ unsigned long estimate;
+
+ estimate = min(check_zone->managed_pages, available);
+ if (!__zone_watermark_ok(check_zone, order,
+ min_wmark_pages(check_zone), ac_classzone_idx(ac),
+ alloc_flags, estimate))
+ continue;
+
/*
* If we didn't make any progress and have a lot of
* dirty + writeback pages then we should wait for
@@ -3441,15 +3471,16 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
if (!did_some_progress) {
unsigned long write_pending;
- write_pending = zone_page_state_snapshot(zone,
- NR_ZONE_WRITE_PENDING);
+ write_pending =
+ node_page_state(current_pgdat, NR_WRITEBACK) +
+ node_page_state(current_pgdat, NR_FILE_DIRTY);
if (2 * write_pending > reclaimable) {
congestion_wait(BLK_RW_ASYNC, HZ/10);
return true;
}
}
-
+out:
/*
* Memory allocation/reclaim might be called from a WQ
* context and the current implementation of the WQ
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d5ee6d998b5e..5625eccc0140 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -194,22 +194,6 @@ static bool sane_reclaim(struct scan_control *sc)
}
#endif
-/*
- * This misses isolated pages which are not accounted for to save counters.
- * As the data only determines if reclaim or compaction continues, it is
- * not expected that isolated pages will be a dominating factor.
- */
-unsigned long zone_reclaimable_pages(struct zone *zone)
-{
- unsigned long nr;
-
- nr = zone_page_state_snapshot(zone, NR_ZONE_LRU_FILE);
- if (get_nr_swap_pages() > 0)
- nr += zone_page_state_snapshot(zone, NR_ZONE_LRU_ANON);
-
- return nr;
-}
-
unsigned long pgdat_reclaimable_pages(struct pglist_data *pgdat)
{
unsigned long nr;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index ac509572a50b..91ecca96dcae 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -921,9 +921,6 @@ int fragmentation_index(struct zone *zone, unsigned int order)
const char * const vmstat_text[] = {
/* enum zone_stat_item countes */
"nr_free_pages",
- "nr_zone_anon_lru",
- "nr_zone_file_lru",
- "nr_zone_write_pending",
"nr_mlock",
"nr_slab_reclaimable",
"nr_slab_unreclaimable",