summaryrefslogtreecommitdiff
path: root/mm/compaction.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c48
1 files changed, 35 insertions, 13 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 85395dc6eb13..10cd757f1006 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -219,6 +219,24 @@ static void reset_cached_positions(struct zone *zone)
}
/*
+ * Compound pages of >= pageblock_order should consistenly be skipped until
+ * released. It is always pointless to compact pages of such order (if they are
+ * migratable), and the pageblocks they occupy cannot contain any free pages.
+ */
+static bool pageblock_skip_persistent(struct page *page)
+{
+ if (!PageCompound(page))
+ return false;
+
+ page = compound_head(page);
+
+ if (compound_order(page) >= pageblock_order)
+ return true;
+
+ return false;
+}
+
+/*
* This function is called to clear all cached information on pageblocks that
* should be skipped for page isolation when the migrate and free page scanner
* meet.
@@ -242,6 +260,8 @@ static void __reset_isolation_suitable(struct zone *zone)
continue;
if (zone != page_zone(page))
continue;
+ if (pageblock_skip_persistent(page))
+ continue;
clear_pageblock_skip(page);
}
@@ -275,7 +295,7 @@ static void update_pageblock_skip(struct compact_control *cc,
struct zone *zone = cc->zone;
unsigned long pfn;
- if (cc->ignore_skip_hint)
+ if (cc->no_set_skip_hint)
return;
if (!page)
@@ -307,7 +327,12 @@ static inline bool isolation_suitable(struct compact_control *cc,
return true;
}
-static void update_pageblock_skip(struct compact_control *cc,
+static inline bool pageblock_skip_persistent(struct page *page)
+{
+ return false;
+}
+
+static inline void update_pageblock_skip(struct compact_control *cc,
struct page *page, unsigned long nr_isolated,
bool migrate_scanner)
{
@@ -449,13 +474,12 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
* and the only danger is skipping too much.
*/
if (PageCompound(page)) {
- unsigned int comp_order = compound_order(page);
+ const unsigned int order = compound_order(page);
- if (likely(comp_order < MAX_ORDER)) {
- blockpfn += (1UL << comp_order) - 1;
- cursor += (1UL << comp_order) - 1;
+ if (likely(order < MAX_ORDER)) {
+ blockpfn += (1UL << order) - 1;
+ cursor += (1UL << order) - 1;
}
-
goto isolate_fail;
}
@@ -772,11 +796,10 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
* danger is skipping too much.
*/
if (PageCompound(page)) {
- unsigned int comp_order = compound_order(page);
-
- if (likely(comp_order < MAX_ORDER))
- low_pfn += (1UL << comp_order) - 1;
+ const unsigned int order = compound_order(page);
+ if (likely(order < MAX_ORDER))
+ low_pfn += (1UL << order) - 1;
goto isolate_fail;
}
@@ -1928,9 +1951,8 @@ static void kcompactd_do_work(pg_data_t *pgdat)
.total_free_scanned = 0,
.classzone_idx = pgdat->kcompactd_classzone_idx,
.mode = MIGRATE_SYNC_LIGHT,
- .ignore_skip_hint = true,
+ .ignore_skip_hint = false,
.gfp_mask = GFP_KERNEL,
-
};
trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
cc.classzone_idx);