diff options
Diffstat (limited to 'mm/page_alloc.c')
| -rw-r--r-- | mm/page_alloc.c | 83 | 
1 files changed, 33 insertions, 50 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index bb90971182bd..a8f2c87792c3 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1405,7 +1405,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)  	mt = get_pageblock_migratetype(page);  	if (unlikely(mt != MIGRATE_ISOLATE)) -		__mod_zone_freepage_state(zone, -(1UL << order), mt); +		__mod_zone_freepage_state(zone, -(1UL << alloc_order), mt);  	if (alloc_order != order)  		expand(zone, page, alloc_order, order, @@ -1422,7 +1422,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)  		}  	} -	return 1UL << order; +	return 1UL << alloc_order;  }  /* @@ -1809,10 +1809,10 @@ static void __paginginit init_zone_allows_reclaim(int nid)  	int i;  	for_each_online_node(i) -		if (node_distance(nid, i) <= RECLAIM_DISTANCE) { +		if (node_distance(nid, i) <= RECLAIM_DISTANCE)  			node_set(i, NODE_DATA(nid)->reclaim_nodes); +		else  			zone_reclaim_mode = 1; -		}  }  #else	/* CONFIG_NUMA */ @@ -2378,6 +2378,15 @@ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)  	return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);  } +/* Returns true if the allocation is likely for THP */ +static bool is_thp_alloc(gfp_t gfp_mask, unsigned int order) +{ +	if (order == pageblock_order && +	    (gfp_mask & (__GFP_MOVABLE|__GFP_REPEAT)) == __GFP_MOVABLE) +		return true; +	return false; +} +  static inline struct page *  __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,  	struct zonelist *zonelist, enum zone_type high_zoneidx, @@ -2416,7 +2425,9 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,  		goto nopage;  restart: -	wake_all_kswapd(order, zonelist, high_zoneidx, +	/* The decision whether to wake kswapd for THP is made later */ +	if (!is_thp_alloc(gfp_mask, order)) +		wake_all_kswapd(order, zonelist, high_zoneidx,  					zone_idx(preferred_zone));  	/* @@ -2487,15 +2498,21 @@ rebalance:  		goto got_pg;  	sync_migration = true; -	/* -	 * If compaction is deferred for high-order allocations, it is because -	 * sync compaction recently failed. In this is the case and the caller -	 * requested a movable allocation that does not heavily disrupt the -	 * system then fail the allocation instead of entering direct reclaim. -	 */ -	if ((deferred_compaction || contended_compaction) && -	    (gfp_mask & (__GFP_MOVABLE|__GFP_REPEAT)) == __GFP_MOVABLE) -		goto nopage; +	if (is_thp_alloc(gfp_mask, order)) { +		/* +		 * If compaction is deferred for high-order allocations, it is +		 * because sync compaction recently failed. If this is the case +		 * and the caller requested a movable allocation that does not +		 * heavily disrupt the system then fail the allocation instead +		 * of entering direct reclaim. +		 */ +		if (deferred_compaction || contended_compaction) +			goto nopage; + +		/* If process is willing to reclaim/compact then wake kswapd */ +		wake_all_kswapd(order, zonelist, high_zoneidx, +					zone_idx(preferred_zone)); +	}  	/* Try direct reclaim and then allocating */  	page = __alloc_pages_direct_reclaim(gfp_mask, order, @@ -4505,7 +4522,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,  		zone->zone_pgdat = pgdat;  		zone_pcp_init(zone); -		lruvec_init(&zone->lruvec, zone); +		lruvec_init(&zone->lruvec);  		if (!size)  			continue; @@ -5825,7 +5842,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,  	ret = start_isolate_page_range(pfn_max_align_down(start),  				       pfn_max_align_up(end), migratetype);  	if (ret) -		goto done; +		return ret;  	ret = __alloc_contig_migrate_range(&cc, start, end);  	if (ret) @@ -6098,37 +6115,3 @@ void dump_page(struct page *page)  	dump_page_flags(page->flags);  	mem_cgroup_print_bad_page(page);  } - -/* reset zone->present_pages */ -void reset_zone_present_pages(void) -{ -	struct zone *z; -	int i, nid; - -	for_each_node_state(nid, N_HIGH_MEMORY) { -		for (i = 0; i < MAX_NR_ZONES; i++) { -			z = NODE_DATA(nid)->node_zones + i; -			z->present_pages = 0; -		} -	} -} - -/* calculate zone's present pages in buddy system */ -void fixup_zone_present_pages(int nid, unsigned long start_pfn, -				unsigned long end_pfn) -{ -	struct zone *z; -	unsigned long zone_start_pfn, zone_end_pfn; -	int i; - -	for (i = 0; i < MAX_NR_ZONES; i++) { -		z = NODE_DATA(nid)->node_zones + i; -		zone_start_pfn = z->zone_start_pfn; -		zone_end_pfn = zone_start_pfn + z->spanned_pages; - -		/* if the two regions intersect */ -		if (!(zone_start_pfn >= end_pfn	|| zone_end_pfn <= start_pfn)) -			z->present_pages += min(end_pfn, zone_end_pfn) - -					    max(start_pfn, zone_start_pfn); -	} -}  | 
