summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--mm/vmscan.c27
1 files changed, 27 insertions, 0 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 0294ab34f475..650d26832569 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1652,6 +1652,30 @@ static int current_may_throttle(void)
bdi_write_congested(current->backing_dev_info);
}
+static bool inactive_reclaimable_pages(struct lruvec *lruvec,
+ struct scan_control *sc, enum lru_list lru)
+{
+ int zid;
+ struct zone *zone;
+ int file = is_file_lru(lru);
+ struct pglist_data *pgdat = lruvec_pgdat(lruvec);
+
+ if (!global_reclaim(sc))
+ return true;
+
+ for (zid = sc->reclaim_idx; zid >= 0; zid--) {
+ zone = &pgdat->node_zones[zid];
+ if (!populated_zone(zone))
+ continue;
+
+ if (zone_page_state_snapshot(zone, NR_ZONE_LRU_BASE +
+ LRU_FILE * file) >= SWAP_CLUSTER_MAX)
+ return true;
+ }
+
+ return false;
+}
+
/*
* shrink_inactive_list() is a helper for shrink_node(). It returns the number
* of reclaimed pages
@@ -1674,6 +1698,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
+ if (!inactive_reclaimable_pages(lruvec, sc, lru))
+ return 0;
+
while (unlikely(too_many_isolated(pgdat, file, sc))) {
congestion_wait(BLK_RW_ASYNC, HZ/10);