summaryrefslogtreecommitdiff
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c81
1 files changed, 47 insertions, 34 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index bee53495a829..4390a8d5be41 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -442,16 +442,8 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
if (memcg && (!memcg_kmem_enabled() || !mem_cgroup_online(memcg)))
return 0;
- if (!down_read_trylock(&shrinker_rwsem)) {
- /*
- * If we would return 0, our callers would understand that we
- * have nothing else to shrink and give up trying. By returning
- * 1 we keep it going and assume we'll be able to shrink next
- * time.
- */
- freed = 1;
+ if (!down_read_trylock(&shrinker_rwsem))
goto out;
- }
list_for_each_entry(shrinker, &shrinker_list, list) {
struct shrink_control sc = {
@@ -1780,6 +1772,20 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
set_bit(PGDAT_WRITEBACK, &pgdat->flags);
/*
+ * If dirty pages are scanned that are not queued for IO, it
+ * implies that flushers are not doing their job. This can
+ * happen when memory pressure pushes dirty pages to the end of
+ * the LRU before the dirty limits are breached and the dirty
+ * data has expired. It can also happen when the proportion of
+ * dirty pages grows not through writes but through memory
+ * pressure reclaiming all the clean cache. And in some cases,
+ * the flushers simply cannot keep up with the allocation
+ * rate. Nudge the flusher threads in case they are asleep.
+ */
+ if (stat.nr_unqueued_dirty == nr_taken)
+ wakeup_flusher_threads(WB_REASON_VMSCAN);
+
+ /*
* Legacy memcg will stall in page writeback so avoid forcibly
* stalling here.
*/
@@ -1791,22 +1797,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
if (stat.nr_dirty && stat.nr_dirty == stat.nr_congested)
set_bit(PGDAT_CONGESTED, &pgdat->flags);
- /*
- * If dirty pages are scanned that are not queued for IO, it
- * implies that flushers are not doing their job. This can
- * happen when memory pressure pushes dirty pages to the end of
- * the LRU before the dirty limits are breached and the dirty
- * data has expired. It can also happen when the proportion of
- * dirty pages grows not through writes but through memory
- * pressure reclaiming all the clean cache. And in some cases,
- * the flushers simply cannot keep up with the allocation
- * rate. Nudge the flusher threads in case they are asleep, but
- * also allow kswapd to start writing pages during reclaim.
- */
- if (stat.nr_unqueued_dirty == nr_taken) {
- wakeup_flusher_threads(WB_REASON_VMSCAN);
+ /* Allow kswapd to start writing pages during reclaim. */
+ if (stat.nr_unqueued_dirty == nr_taken)
set_bit(PGDAT_DIRTY, &pgdat->flags);
- }
/*
* If kswapd scans pages marked marked for immediate
@@ -3546,16 +3539,21 @@ kswapd_try_sleep:
}
/*
- * A zone is low on free memory, so wake its kswapd task to service it.
+ * A zone is low on free memory or too fragmented for high-order memory. If
+ * kswapd should reclaim (direct reclaim is deferred), wake it up for the zone's
+ * pgdat. It will wake up kcompactd after reclaiming memory. If kswapd reclaim
+ * has failed or is not needed, still wake up kcompactd if only compaction is
+ * needed.
*/
-void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
+void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order,
+ enum zone_type classzone_idx)
{
pg_data_t *pgdat;
if (!managed_zone(zone))
return;
- if (!cpuset_zone_allowed(zone, GFP_KERNEL | __GFP_HARDWALL))
+ if (!cpuset_zone_allowed(zone, gfp_flags))
return;
pgdat = zone->zone_pgdat;
pgdat->kswapd_classzone_idx = kswapd_classzone_idx(pgdat,
@@ -3564,14 +3562,23 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
if (!waitqueue_active(&pgdat->kswapd_wait))
return;
- /* Hopeless node, leave it to direct reclaim */
- if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
- return;
-
- if (pgdat_balanced(pgdat, order, classzone_idx))
+ /* Hopeless node, leave it to direct reclaim if possible */
+ if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ||
+ pgdat_balanced(pgdat, order, classzone_idx)) {
+ /*
+ * There may be plenty of free memory available, but it's too
+ * fragmented for high-order allocations. Wake up kcompactd
+ * and rely on compaction_suitable() to determine if it's
+ * needed. If it fails, it will defer subsequent attempts to
+ * ratelimit its work.
+ */
+ if (!(gfp_flags & __GFP_DIRECT_RECLAIM))
+ wakeup_kcompactd(pgdat, order, classzone_idx);
return;
+ }
- trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, classzone_idx, order);
+ trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, classzone_idx, order,
+ gfp_flags);
wake_up_interruptible(&pgdat->kswapd_wait);
}
@@ -3876,7 +3883,13 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
*/
int page_evictable(struct page *page)
{
- return !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
+ int ret;
+
+ /* Prevent address_space of inode and swap cache from being freed */
+ rcu_read_lock();
+ ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
+ rcu_read_unlock();
+ return ret;
}
#ifdef CONFIG_SHMEM