diff options
Diffstat (limited to 'mm/compaction.c')
| -rw-r--r-- | mm/compaction.c | 142 | 
1 files changed, 23 insertions, 119 deletions
| diff --git a/mm/compaction.c b/mm/compaction.c index 4ac338af5120..7ea259d82a99 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -236,7 +236,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,  	 */  	while (unlikely(too_many_isolated(zone))) {  		/* async migration should just abort */ -		if (cc->mode != COMPACT_SYNC) +		if (!cc->sync)  			return 0;  		congestion_wait(BLK_RW_ASYNC, HZ/10); @@ -304,8 +304,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,  		 * satisfies the allocation  		 */  		pageblock_nr = low_pfn >> pageblock_order; -		if (cc->mode != COMPACT_SYNC && -		    last_pageblock_nr != pageblock_nr && +		if (!cc->sync && last_pageblock_nr != pageblock_nr &&  		    !migrate_async_suitable(get_pageblock_migratetype(page))) {  			low_pfn += pageblock_nr_pages;  			low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1; @@ -326,7 +325,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,  			continue;  		} -		if (cc->mode != COMPACT_SYNC) +		if (!cc->sync)  			mode |= ISOLATE_ASYNC_MIGRATE;  		lruvec = mem_cgroup_page_lruvec(page, zone); @@ -361,90 +360,27 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,  #endif /* CONFIG_COMPACTION || CONFIG_CMA */  #ifdef CONFIG_COMPACTION -/* - * Returns true if MIGRATE_UNMOVABLE pageblock was successfully - * converted to MIGRATE_MOVABLE type, false otherwise. - */ -static bool rescue_unmovable_pageblock(struct page *page) -{ -	unsigned long pfn, start_pfn, end_pfn; -	struct page *start_page, *end_page; - -	pfn = page_to_pfn(page); -	start_pfn = pfn & ~(pageblock_nr_pages - 1); -	end_pfn = start_pfn + pageblock_nr_pages; - -	start_page = pfn_to_page(start_pfn); -	end_page = pfn_to_page(end_pfn); - -	/* Do not deal with pageblocks that overlap zones */ -	if (page_zone(start_page) != page_zone(end_page)) -		return false; - -	for (page = start_page, pfn = start_pfn; page < end_page; pfn++, -								  page++) { -		if (!pfn_valid_within(pfn)) -			continue; - -		if (PageBuddy(page)) { -			int order = page_order(page); - -			pfn += (1 << order) - 1; -			page += (1 << order) - 1; - -			continue; -		} else if (page_count(page) == 0 || PageLRU(page)) -			continue; - -		return false; -	} - -	set_pageblock_migratetype(page, MIGRATE_MOVABLE); -	move_freepages_block(page_zone(page), page, MIGRATE_MOVABLE); -	return true; -} -enum smt_result { -	GOOD_AS_MIGRATION_TARGET, -	FAIL_UNMOVABLE_TARGET, -	FAIL_BAD_TARGET, -}; - -/* - * Returns GOOD_AS_MIGRATION_TARGET if the page is within a block - * suitable for migration to, FAIL_UNMOVABLE_TARGET if the page - * is within a MIGRATE_UNMOVABLE block, FAIL_BAD_TARGET otherwise. - */ -static enum smt_result suitable_migration_target(struct page *page, -				      struct compact_control *cc) +/* Returns true if the page is within a block suitable for migration to */ +static bool suitable_migration_target(struct page *page)  {  	int migratetype = get_pageblock_migratetype(page);  	/* Don't interfere with memory hot-remove or the min_free_kbytes blocks */  	if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE) -		return FAIL_BAD_TARGET; +		return false;  	/* If the page is a large free page, then allow migration */  	if (PageBuddy(page) && page_order(page) >= pageblock_order) -		return GOOD_AS_MIGRATION_TARGET; +		return true;  	/* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ -	if (cc->mode != COMPACT_ASYNC_UNMOVABLE && -	    migrate_async_suitable(migratetype)) -		return GOOD_AS_MIGRATION_TARGET; - -	if (cc->mode == COMPACT_ASYNC_MOVABLE && -	    migratetype == MIGRATE_UNMOVABLE) -		return FAIL_UNMOVABLE_TARGET; - -	if (cc->mode != COMPACT_ASYNC_MOVABLE && -	    migratetype == MIGRATE_UNMOVABLE && -	    rescue_unmovable_pageblock(page)) -		return GOOD_AS_MIGRATION_TARGET; +	if (migrate_async_suitable(migratetype)) +		return true;  	/* Otherwise skip the block */ -	return FAIL_BAD_TARGET; +	return false;  }  /* @@ -478,13 +414,6 @@ static void isolate_freepages(struct zone *zone,  	zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;  	/* -	 * isolate_freepages() may be called more than once during -	 * compact_zone_order() run and we want only the most recent -	 * count. -	 */ -	cc->nr_pageblocks_skipped = 0; - -	/*  	 * Isolate free pages until enough are available to migrate the  	 * pages on cc->migratepages. We stop searching if the migrate  	 * and free page scanners meet or enough free pages are isolated. @@ -492,7 +421,6 @@ static void isolate_freepages(struct zone *zone,  	for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;  					pfn -= pageblock_nr_pages) {  		unsigned long isolated; -		enum smt_result ret;  		if (!pfn_valid(pfn))  			continue; @@ -509,12 +437,9 @@ static void isolate_freepages(struct zone *zone,  			continue;  		/* Check the block is suitable for migration */ -		ret = suitable_migration_target(page, cc); -		if (ret != GOOD_AS_MIGRATION_TARGET) { -			if (ret == FAIL_UNMOVABLE_TARGET) -				cc->nr_pageblocks_skipped++; +		if (!suitable_migration_target(page))  			continue; -		} +  		/*  		 * Found a block suitable for isolating free pages from. Now  		 * we disabled interrupts, double check things are ok and @@ -523,14 +448,12 @@ static void isolate_freepages(struct zone *zone,  		 */  		isolated = 0;  		spin_lock_irqsave(&zone->lock, flags); -		ret = suitable_migration_target(page, cc); -		if (ret == GOOD_AS_MIGRATION_TARGET) { +		if (suitable_migration_target(page)) {  			end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);  			isolated = isolate_freepages_block(pfn, end_pfn,  							   freelist, false);  			nr_freepages += isolated; -		} else if (ret == FAIL_UNMOVABLE_TARGET) -			cc->nr_pageblocks_skipped++; +		}  		spin_unlock_irqrestore(&zone->lock, flags);  		/* @@ -762,9 +685,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)  		nr_migrate = cc->nr_migratepages;  		err = migrate_pages(&cc->migratepages, compaction_alloc, -			(unsigned long)&cc->freepages, false, -			(cc->mode == COMPACT_SYNC) ? MIGRATE_SYNC_LIGHT -						      : MIGRATE_ASYNC); +				(unsigned long)cc, false, +				cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC);  		update_nr_listpages(cc);  		nr_remaining = cc->nr_migratepages; @@ -793,8 +715,7 @@ out:  static unsigned long compact_zone_order(struct zone *zone,  				 int order, gfp_t gfp_mask, -				 enum compact_mode mode, -				 unsigned long *nr_pageblocks_skipped) +				 bool sync)  {  	struct compact_control cc = {  		.nr_freepages = 0, @@ -802,17 +723,12 @@ static unsigned long compact_zone_order(struct zone *zone,  		.order = order,  		.migratetype = allocflags_to_migratetype(gfp_mask),  		.zone = zone, -		.mode = mode, +		.sync = sync,  	}; -	unsigned long rc; -  	INIT_LIST_HEAD(&cc.freepages);  	INIT_LIST_HEAD(&cc.migratepages); -	rc = compact_zone(zone, &cc); -	*nr_pageblocks_skipped = cc.nr_pageblocks_skipped; - -	return rc; +	return compact_zone(zone, &cc);  }  int sysctl_extfrag_threshold = 500; @@ -837,8 +753,6 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,  	struct zoneref *z;  	struct zone *zone;  	int rc = COMPACT_SKIPPED; -	unsigned long nr_pageblocks_skipped; -	enum compact_mode mode;  	/*  	 * Check whether it is worth even starting compaction. The order check is @@ -855,22 +769,12 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,  								nodemask) {  		int status; -		mode = sync ? COMPACT_SYNC : COMPACT_ASYNC_MOVABLE; -retry: -		status = compact_zone_order(zone, order, gfp_mask, mode, -						&nr_pageblocks_skipped); +		status = compact_zone_order(zone, order, gfp_mask, sync);  		rc = max(status, rc);  		/* If a normal allocation would succeed, stop compacting */  		if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))  			break; - -		if (rc == COMPACT_COMPLETE && mode == COMPACT_ASYNC_MOVABLE) { -			if (nr_pageblocks_skipped) { -				mode = COMPACT_ASYNC_UNMOVABLE; -				goto retry; -			} -		}  	}  	return rc; @@ -904,7 +808,7 @@ static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)  			if (ok && cc->order > zone->compact_order_failed)  				zone->compact_order_failed = cc->order + 1;  			/* Currently async compaction is never deferred. */ -			else if (!ok && cc->mode == COMPACT_SYNC) +			else if (!ok && cc->sync)  				defer_compaction(zone, cc->order);  		} @@ -919,7 +823,7 @@ int compact_pgdat(pg_data_t *pgdat, int order)  {  	struct compact_control cc = {  		.order = order, -		.mode = COMPACT_ASYNC_MOVABLE, +		.sync = false,  	};  	return __compact_pgdat(pgdat, &cc); @@ -929,7 +833,7 @@ static int compact_node(int nid)  {  	struct compact_control cc = {  		.order = -1, -		.mode = COMPACT_SYNC, +		.sync = true,  	};  	return __compact_pgdat(NODE_DATA(nid), &cc); | 
