diff options
| author | Ingo Molnar <mingo@elte.hu> | 2009-08-31 17:54:18 +0200 | 
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2009-08-31 18:05:25 +0200 | 
| commit | bbe69aa57a7374b51242b95a54eefcf0d0393b7e (patch) | |
| tree | c45e48d11cc9cb81a57c8c27f7243863b117cec8 /mm/page_alloc.c | |
| parent | a417887637e862b434b293404f2a31ad1f282a58 (diff) | |
| parent | 326ba5010a5429a5a528b268b36a5900d4ab0eba (diff) | |
Merge commit 'v2.6.31-rc8' into core/locking
Merge reason: we were on -rc4, move to -rc8 before applying
              a new batch of locking infrastructure changes.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'mm/page_alloc.c')
| -rw-r--r-- | mm/page_alloc.c | 25 | 
1 files changed, 19 insertions, 6 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index caa92689aac9..5cc986eb9f6f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -882,7 +882,7 @@ retry_reserve:   */  static int rmqueue_bulk(struct zone *zone, unsigned int order,   			unsigned long count, struct list_head *list, -			int migratetype) +			int migratetype, int cold)  {  	int i; @@ -901,7 +901,10 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,  		 * merge IO requests if the physical pages are ordered  		 * properly.  		 */ -		list_add(&page->lru, list); +		if (likely(cold == 0)) +			list_add(&page->lru, list); +		else +			list_add_tail(&page->lru, list);  		set_page_private(page, migratetype);  		list = &page->lru;  	} @@ -1119,7 +1122,8 @@ again:  		local_irq_save(flags);  		if (!pcp->count) {  			pcp->count = rmqueue_bulk(zone, 0, -					pcp->batch, &pcp->list, migratetype); +					pcp->batch, &pcp->list, +					migratetype, cold);  			if (unlikely(!pcp->count))  				goto failed;  		} @@ -1138,7 +1142,8 @@ again:  		/* Allocate more to the pcp list if necessary */  		if (unlikely(&page->lru == &pcp->list)) {  			pcp->count += rmqueue_bulk(zone, 0, -					pcp->batch, &pcp->list, migratetype); +					pcp->batch, &pcp->list, +					migratetype, cold);  			page = list_entry(pcp->list.next, struct page, lru);  		} @@ -1740,8 +1745,10 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,  	 * be using allocators in order of preference for an area that is  	 * too large.  	 */ -	if (WARN_ON_ONCE(order >= MAX_ORDER)) +	if (order >= MAX_ORDER) { +		WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));  		return NULL; +	}  	/*  	 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and @@ -1789,6 +1796,10 @@ rebalance:  	if (p->flags & PF_MEMALLOC)  		goto nopage; +	/* Avoid allocations with no watermarks from looping endlessly */ +	if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL)) +		goto nopage; +  	/* Try direct reclaim and then allocating */  	page = __alloc_pages_direct_reclaim(gfp_mask, order,  					zonelist, high_zoneidx, @@ -2533,7 +2544,6 @@ static void build_zonelists(pg_data_t *pgdat)  	prev_node = local_node;  	nodes_clear(used_mask); -	memset(node_load, 0, sizeof(node_load));  	memset(node_order, 0, sizeof(node_order));  	j = 0; @@ -2642,6 +2652,9 @@ static int __build_all_zonelists(void *dummy)  {  	int nid; +#ifdef CONFIG_NUMA +	memset(node_load, 0, sizeof(node_load)); +#endif  	for_each_online_node(nid) {  		pg_data_t *pgdat = NODE_DATA(nid);  | 
