diff options
Diffstat (limited to 'mm/page_alloc.c')
| -rw-r--r-- | mm/page_alloc.c | 37 | 
1 files changed, 25 insertions, 12 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a9649f4b261e..f12ad1836abe 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -588,13 +588,13 @@ static void free_pcppages_bulk(struct zone *zone, int count,  {  	int migratetype = 0;  	int batch_free = 0; +	int to_free = count;  	spin_lock(&zone->lock);  	zone->all_unreclaimable = 0;  	zone->pages_scanned = 0; -	__mod_zone_page_state(zone, NR_FREE_PAGES, count); -	while (count) { +	while (to_free) {  		struct page *page;  		struct list_head *list; @@ -619,8 +619,9 @@ static void free_pcppages_bulk(struct zone *zone, int count,  			/* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */  			__free_one_page(page, zone, 0, page_private(page));  			trace_mm_page_pcpu_drain(page, 0, page_private(page)); -		} while (--count && --batch_free && !list_empty(list)); +		} while (--to_free && --batch_free && !list_empty(list));  	} +	__mod_zone_page_state(zone, NR_FREE_PAGES, count);  	spin_unlock(&zone->lock);  } @@ -631,8 +632,8 @@ static void free_one_page(struct zone *zone, struct page *page, int order,  	zone->all_unreclaimable = 0;  	zone->pages_scanned = 0; -	__mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);  	__free_one_page(page, zone, order, migratetype); +	__mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);  	spin_unlock(&zone->lock);  } @@ -1461,7 +1462,7 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,  {  	/* free_pages my go negative - that's OK */  	long min = mark; -	long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1; +	long free_pages = zone_nr_free_pages(z) - (1 << order) + 1;  	int o;  	if (alloc_flags & ALLOC_HIGH) @@ -1846,6 +1847,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,  	struct page *page = NULL;  	struct reclaim_state reclaim_state;  	struct task_struct *p = current; +	bool drained = false;  	cond_resched(); @@ -1864,14 +1866,25 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,  	cond_resched(); -	if (order != 0) -		drain_all_pages(); +	if (unlikely(!(*did_some_progress))) +		return NULL; -	if (likely(*did_some_progress)) -		page = get_page_from_freelist(gfp_mask, nodemask, order, +retry: +	page = get_page_from_freelist(gfp_mask, nodemask, order,  					zonelist, high_zoneidx,  					alloc_flags, preferred_zone,  					migratetype); + +	/* +	 * If an allocation failed after direct reclaim, it could be because +	 * pages are pinned on the per-cpu lists. Drain them and try again +	 */ +	if (!page && !drained) { +		drain_all_pages(); +		drained = true; +		goto retry; +	} +  	return page;  } @@ -2423,7 +2436,7 @@ void show_free_areas(void)  			" all_unreclaimable? %s"  			"\n",  			zone->name, -			K(zone_page_state(zone, NR_FREE_PAGES)), +			K(zone_nr_free_pages(zone)),  			K(min_wmark_pages(zone)),  			K(low_wmark_pages(zone)),  			K(high_wmark_pages(zone)), @@ -5169,9 +5182,9 @@ void *__init alloc_large_system_hash(const char *tablename,  	if (!table)  		panic("Failed to allocate %s hash table\n", tablename); -	printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n", +	printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n",  	       tablename, -	       (1U << log2qty), +	       (1UL << log2qty),  	       ilog2(size) - PAGE_SHIFT,  	       size);  | 
