diff options
Diffstat (limited to 'mm/vmscan.c')
| -rw-r--r-- | mm/vmscan.c | 66 | 
1 files changed, 17 insertions, 49 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index b7719ec10dc5..b55699cd9067 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -105,7 +105,6 @@ struct scan_control {  	/* Which cgroup do we reclaim from */  	struct mem_cgroup *mem_cgroup; -	struct memcg_scanrecord *memcg_record;  	/*  	 * Nodemask of nodes allowed by the caller. If NULL, all nodes @@ -1349,8 +1348,6 @@ putback_lru_pages(struct zone *zone, struct scan_control *sc,  			int file = is_file_lru(lru);  			int numpages = hpage_nr_pages(page);  			reclaim_stat->recent_rotated[file] += numpages; -			if (!scanning_global_lru(sc)) -				sc->memcg_record->nr_rotated[file] += numpages;  		}  		if (!pagevec_add(&pvec, page)) {  			spin_unlock_irq(&zone->lru_lock); @@ -1394,10 +1391,6 @@ static noinline_for_stack void update_isolated_counts(struct zone *zone,  	reclaim_stat->recent_scanned[0] += *nr_anon;  	reclaim_stat->recent_scanned[1] += *nr_file; -	if (!scanning_global_lru(sc)) { -		sc->memcg_record->nr_scanned[0] += *nr_anon; -		sc->memcg_record->nr_scanned[1] += *nr_file; -	}  }  /* @@ -1511,9 +1504,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,  		nr_reclaimed += shrink_page_list(&page_list, zone, sc);  	} -	if (!scanning_global_lru(sc)) -		sc->memcg_record->nr_freed[file] += nr_reclaimed; -  	local_irq_disable();  	if (current_is_kswapd())  		__count_vm_events(KSWAPD_STEAL, nr_reclaimed); @@ -1613,8 +1603,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,  	}  	reclaim_stat->recent_scanned[file] += nr_taken; -	if (!scanning_global_lru(sc)) -		sc->memcg_record->nr_scanned[file] += nr_taken;  	__count_zone_vm_events(PGREFILL, zone, pgscanned);  	if (file) @@ -1666,8 +1654,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,  	 * get_scan_ratio.  	 */  	reclaim_stat->recent_rotated[file] += nr_rotated; -	if (!scanning_global_lru(sc)) -		sc->memcg_record->nr_rotated[file] += nr_rotated;  	move_active_pages_to_lru(zone, &l_active,  						LRU_ACTIVE + file * LRU_FILE); @@ -1808,23 +1794,15 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,  	u64 fraction[2], denominator;  	enum lru_list l;  	int noswap = 0; -	int force_scan = 0; +	bool force_scan = false;  	unsigned long nr_force_scan[2]; - -	anon  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) + -		zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON); -	file  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) + -		zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE); - -	if (((anon + file) >> priority) < SWAP_CLUSTER_MAX) { -		/* kswapd does zone balancing and need to scan this zone */ -		if (scanning_global_lru(sc) && current_is_kswapd()) -			force_scan = 1; -		/* memcg may have small limit and need to avoid priority drop */ -		if (!scanning_global_lru(sc)) -			force_scan = 1; -	} +	/* kswapd does zone balancing and needs to scan this zone */ +	if (scanning_global_lru(sc) && current_is_kswapd()) +		force_scan = true; +	/* memcg may have small limit and need to avoid priority drop */ +	if (!scanning_global_lru(sc)) +		force_scan = true;  	/* If we have no swap space, do not bother scanning anon pages. */  	if (!sc->may_swap || (nr_swap_pages <= 0)) { @@ -1837,6 +1815,11 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,  		goto out;  	} +	anon  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) + +		zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON); +	file  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) + +		zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE); +  	if (scanning_global_lru(sc)) {  		free  = zone_page_state(zone, NR_FREE_PAGES);  		/* If we have very few page cache pages, @@ -2268,10 +2251,9 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,  #ifdef CONFIG_CGROUP_MEM_RES_CTLR  unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, -					gfp_t gfp_mask, bool noswap, -					struct zone *zone, -					struct memcg_scanrecord *rec, -					unsigned long *scanned) +						gfp_t gfp_mask, bool noswap, +						struct zone *zone, +						unsigned long *nr_scanned)  {  	struct scan_control sc = {  		.nr_scanned = 0, @@ -2281,9 +2263,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,  		.may_swap = !noswap,  		.order = 0,  		.mem_cgroup = mem, -		.memcg_record = rec,  	}; -	ktime_t start, end;  	sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |  			(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); @@ -2292,7 +2272,6 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,  						      sc.may_writepage,  						      sc.gfp_mask); -	start = ktime_get();  	/*  	 * NOTE: Although we can get the priority field, using it  	 * here is not a good idea, since it limits the pages we can scan. @@ -2301,25 +2280,19 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,  	 * the priority and make it zero.  	 */  	shrink_zone(0, zone, &sc); -	end = ktime_get(); - -	if (rec) -		rec->elapsed += ktime_to_ns(ktime_sub(end, start)); -	*scanned = sc.nr_scanned;  	trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); +	*nr_scanned = sc.nr_scanned;  	return sc.nr_reclaimed;  }  unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,  					   gfp_t gfp_mask, -					   bool noswap, -					   struct memcg_scanrecord *rec) +					   bool noswap)  {  	struct zonelist *zonelist;  	unsigned long nr_reclaimed; -	ktime_t start, end;  	int nid;  	struct scan_control sc = {  		.may_writepage = !laptop_mode, @@ -2328,7 +2301,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,  		.nr_to_reclaim = SWAP_CLUSTER_MAX,  		.order = 0,  		.mem_cgroup = mem_cont, -		.memcg_record = rec,  		.nodemask = NULL, /* we don't care the placement */  		.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |  				(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), @@ -2337,7 +2309,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,  		.gfp_mask = sc.gfp_mask,  	}; -	start = ktime_get();  	/*  	 * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't  	 * take care of from where we get pages. So the node where we start the @@ -2352,9 +2323,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,  					    sc.gfp_mask);  	nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); -	end = ktime_get(); -	if (rec) -		rec->elapsed += ktime_to_ns(ktime_sub(end, start));  	trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);  | 
