diff options
Diffstat (limited to 'mm/compaction.c')
| -rw-r--r-- | mm/compaction.c | 813 |
1 files changed, 464 insertions, 349 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index dbc9f86b1934..1e8f8eca318c 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -23,6 +23,7 @@ #include <linux/freezer.h> #include <linux/page_owner.h> #include <linux/psi.h> +#include <linux/cpuset.h> #include "internal.h" #ifdef CONFIG_COMPACTION @@ -40,9 +41,22 @@ static inline void count_compact_events(enum vm_event_item item, long delta) { count_vm_events(item, delta); } + +/* + * order == -1 is expected when compacting proactively via + * 1. /proc/sys/vm/compact_memory + * 2. /sys/devices/system/node/nodex/compact + * 3. /proc/sys/vm/compaction_proactiveness + */ +static inline bool is_via_compact_memory(int order) +{ + return order == -1; +} + #else #define count_compact_event(item) do { } while (0) #define count_compact_events(item, delta) do { } while (0) +static inline bool is_via_compact_memory(int order) { return false; } #endif #if defined CONFIG_COMPACTION || defined CONFIG_CMA @@ -66,81 +80,40 @@ static inline void count_compact_events(enum vm_event_item item, long delta) #define COMPACTION_HPAGE_ORDER (PMD_SHIFT - PAGE_SHIFT) #endif -static unsigned long release_freepages(struct list_head *freelist) +static struct page *mark_allocated_noprof(struct page *page, unsigned int order, gfp_t gfp_flags) { - struct page *page, *next; - unsigned long high_pfn = 0; - - list_for_each_entry_safe(page, next, freelist, lru) { - unsigned long pfn = page_to_pfn(page); - list_del(&page->lru); - __free_page(page); - if (pfn > high_pfn) - high_pfn = pfn; - } - - return high_pfn; + post_alloc_hook(page, order, __GFP_MOVABLE); + set_page_refcounted(page); + return page; } +#define mark_allocated(...) alloc_hooks(mark_allocated_noprof(__VA_ARGS__)) -static void split_map_pages(struct list_head *list) +static unsigned long release_free_list(struct list_head *freepages) { - unsigned int i, order, nr_pages; - struct page *page, *next; - LIST_HEAD(tmp_list); - - list_for_each_entry_safe(page, next, list, lru) { - list_del(&page->lru); + int order; + unsigned long high_pfn = 0; - order = page_private(page); - nr_pages = 1 << order; + for (order = 0; order < NR_PAGE_ORDERS; order++) { + struct page *page, *next; - post_alloc_hook(page, order, __GFP_MOVABLE); - if (order) - split_page(page, order); + list_for_each_entry_safe(page, next, &freepages[order], lru) { + unsigned long pfn = page_to_pfn(page); - for (i = 0; i < nr_pages; i++) { - list_add(&page->lru, &tmp_list); - page++; + list_del(&page->lru); + /* + * Convert free pages into post allocation pages, so + * that we can free them via __free_page. + */ + mark_allocated(page, order, __GFP_MOVABLE); + __free_pages(page, order); + if (pfn > high_pfn) + high_pfn = pfn; } } - - list_splice(&tmp_list, list); + return high_pfn; } #ifdef CONFIG_COMPACTION -bool PageMovable(struct page *page) -{ - const struct movable_operations *mops; - - VM_BUG_ON_PAGE(!PageLocked(page), page); - if (!__PageMovable(page)) - return false; - - mops = page_movable_ops(page); - if (mops) - return true; - - return false; -} - -void __SetPageMovable(struct page *page, const struct movable_operations *mops) -{ - VM_BUG_ON_PAGE(!PageLocked(page), page); - VM_BUG_ON_PAGE((unsigned long)mops & PAGE_MAPPING_MOVABLE, page); - page->mapping = (void *)((unsigned long)mops | PAGE_MAPPING_MOVABLE); -} -EXPORT_SYMBOL(__SetPageMovable); - -void __ClearPageMovable(struct page *page) -{ - VM_BUG_ON_PAGE(!PageMovable(page), page); - /* - * This page still has the type of a movable page, but it's - * actually not movable any more. - */ - page->mapping = (void *)PAGE_MAPPING_MOVABLE; -} -EXPORT_SYMBOL(__ClearPageMovable); /* Do not skip compaction more than 64 times */ #define COMPACT_MAX_DEFER_SHIFT 6 @@ -249,11 +222,36 @@ static unsigned long skip_offline_sections(unsigned long start_pfn) return 0; } + +/* + * If the PFN falls into an offline section, return the end PFN of the + * next online section in reverse. If the PFN falls into an online section + * or if there is no next online section in reverse, return 0. + */ +static unsigned long skip_offline_sections_reverse(unsigned long start_pfn) +{ + unsigned long start_nr = pfn_to_section_nr(start_pfn); + + if (!start_nr || online_section_nr(start_nr)) + return 0; + + while (start_nr-- > 0) { + if (online_section_nr(start_nr)) + return section_nr_to_pfn(start_nr) + PAGES_PER_SECTION; + } + + return 0; +} #else static unsigned long skip_offline_sections(unsigned long start_pfn) { return 0; } + +static unsigned long skip_offline_sections_reverse(unsigned long start_pfn) +{ + return 0; +} #endif /* @@ -357,6 +355,7 @@ static void __reset_isolation_suitable(struct zone *zone) bool source_set = false; bool free_set = false; + /* Only flush if a full compaction finished recently */ if (!zone->compact_blockskip_flush) return; @@ -409,9 +408,7 @@ void reset_isolation_suitable(pg_data_t *pgdat) if (!populated_zone(zone)) continue; - /* Only flush if a full compaction finished recently */ - if (zone->compact_blockskip_flush) - __reset_isolation_suitable(zone); + __reset_isolation_suitable(zone); } } @@ -438,12 +435,13 @@ static void update_cached_migrate(struct compact_control *cc, unsigned long pfn) { struct zone *zone = cc->zone; - pfn = pageblock_end_pfn(pfn); - /* Set for isolation rather than compaction */ if (cc->no_set_skip_hint) return; + pfn = pageblock_end_pfn(pfn); + + /* Update where async and sync compaction should restart */ if (pfn > zone->compact_cached_migrate_pfn[0]) zone->compact_cached_migrate_pfn[0] = pfn; if (cc->mode != MIGRATE_ASYNC && @@ -465,7 +463,6 @@ static void update_pageblock_skip(struct compact_control *cc, set_pageblock_skip(page); - /* Update where async and sync compaction should restart */ if (pfn < zone->compact_cached_free_pfn) zone->compact_cached_free_pfn = pfn; } @@ -564,7 +561,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, bool strict) { int nr_scanned = 0, total_isolated = 0; - struct page *cursor; + struct page *page; unsigned long flags = 0; bool locked = false; unsigned long blockpfn = *start_pfn; @@ -574,12 +571,11 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, if (strict) stride = 1; - cursor = pfn_to_page(blockpfn); + page = pfn_to_page(blockpfn); /* Isolate free pages. */ - for (; blockpfn < end_pfn; blockpfn += stride, cursor += stride) { + for (; blockpfn < end_pfn; blockpfn += stride, page += stride) { int isolated; - struct page *page = cursor; /* * Periodically drop the lock (if held) regardless of its @@ -602,11 +598,13 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, if (PageCompound(page)) { const unsigned int order = compound_order(page); - if (likely(order <= MAX_ORDER)) { + if ((order <= MAX_PAGE_ORDER) && + (blockpfn + (1UL << order) <= end_pfn)) { blockpfn += (1UL << order) - 1; - cursor += (1UL << order) - 1; + page += (1UL << order) - 1; nr_scanned += (1UL << order) - 1; } + goto isolate_fail; } @@ -633,7 +631,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, nr_scanned += isolated - 1; total_isolated += isolated; cc->nr_freepages += isolated; - list_add_tail(&page->lru, freelist); + list_add_tail(&page->lru, &freelist[order]); if (!strict && cc->nr_migratepages <= cc->nr_freepages) { blockpfn += isolated; @@ -641,14 +639,12 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, } /* Advance to the end of split page */ blockpfn += isolated - 1; - cursor += isolated - 1; + page += isolated - 1; continue; isolate_fail: if (strict) break; - else - continue; } @@ -656,8 +652,7 @@ isolate_fail: spin_unlock_irqrestore(&cc->zone->lock, flags); /* - * There is a tiny chance that we have read bogus compound_order(), - * so be careful to not go outside of the pageblock. + * Be careful to not go outside of the pageblock. */ if (unlikely(blockpfn > end_pfn)) blockpfn = end_pfn; @@ -690,18 +685,21 @@ isolate_fail: * * Non-free pages, invalid PFNs, or zone boundaries within the * [start_pfn, end_pfn) range are considered errors, cause function to - * undo its actions and return zero. + * undo its actions and return zero. cc->freepages[] are empty. * * Otherwise, function returns one-past-the-last PFN of isolated page * (which may be greater then end_pfn if end fell in a middle of - * a free page). + * a free page). cc->freepages[] contain free pages isolated. */ unsigned long isolate_freepages_range(struct compact_control *cc, unsigned long start_pfn, unsigned long end_pfn) { unsigned long isolated, pfn, block_start_pfn, block_end_pfn; - LIST_HEAD(freelist); + int order; + + for (order = 0; order < NR_PAGE_ORDERS; order++) + INIT_LIST_HEAD(&cc->freepages[order]); pfn = start_pfn; block_start_pfn = pageblock_start_pfn(pfn); @@ -715,8 +713,6 @@ isolate_freepages_range(struct compact_control *cc, /* Protect pfn from changing by isolate_freepages_block */ unsigned long isolate_start_pfn = pfn; - block_end_pfn = min(block_end_pfn, end_pfn); - /* * pfn could pass the block_end_pfn if isolated freepage * is more than pageblock order. In this case, we adjust @@ -725,15 +721,16 @@ isolate_freepages_range(struct compact_control *cc, if (pfn >= block_end_pfn) { block_start_pfn = pageblock_start_pfn(pfn); block_end_pfn = pageblock_end_pfn(pfn); - block_end_pfn = min(block_end_pfn, end_pfn); } + block_end_pfn = min(block_end_pfn, end_pfn); + if (!pageblock_pfn_to_page(block_start_pfn, block_end_pfn, cc->zone)) break; isolated = isolate_freepages_block(cc, &isolate_start_pfn, - block_end_pfn, &freelist, 0, true); + block_end_pfn, cc->freepages, 0, true); /* * In strict mode, isolate_freepages_block() returns 0 if @@ -750,12 +747,9 @@ isolate_freepages_range(struct compact_control *cc, */ } - /* __isolate_free_page() does not map the pages */ - split_map_pages(&freelist); - if (pfn < end_pfn) { /* Loop terminated early, cleanup. */ - release_freepages(&freelist); + release_free_list(cc->freepages); return 0; } @@ -797,6 +791,32 @@ static bool too_many_isolated(struct compact_control *cc) } /** + * skip_isolation_on_order() - determine when to skip folio isolation based on + * folio order and compaction target order + * @order: to-be-isolated folio order + * @target_order: compaction target order + * + * This avoids unnecessary folio isolations during compaction. + */ +static bool skip_isolation_on_order(int order, int target_order) +{ + /* + * Unless we are performing global compaction (i.e., + * is_via_compact_memory), skip any folios that are larger than the + * target order: we wouldn't be here if we'd have a free folio with + * the desired target_order, so migrating this folio would likely fail + * later. + */ + if (!is_via_compact_memory(target_order) && order >= target_order) + return true; + /* + * We limit memory compaction to pageblocks and won't try + * creating free blocks of memory that are larger than that. + */ + return order >= pageblock_order; +} + +/** * isolate_migratepages_block() - isolate all migrate-able pages within * a single pageblock * @cc: Compaction control structure. @@ -862,6 +882,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, /* Time to isolate some pages for migration */ for (; low_pfn < end_pfn; low_pfn++) { + bool is_dirty, is_unevictable; if (skip_on_failure && low_pfn >= next_skip_pfn) { /* @@ -912,11 +933,12 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, /* * Check if the pageblock has already been marked skipped. - * Only the aligned PFN is checked as the caller isolates + * Only the first PFN is checked as the caller isolates * COMPACT_CLUSTER_MAX at a time so the second call must * not falsely conclude that the block should be skipped. */ - if (!valid_page && pageblock_aligned(low_pfn)) { + if (!valid_page && (pageblock_aligned(low_pfn) || + low_pfn == cc->zone->zone_start_pfn)) { if (!isolation_suitable(cc, page)) { low_pfn = end_pfn; folio = NULL; @@ -925,34 +947,49 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, valid_page = page; } - if (PageHuge(page) && cc->alloc_contig) { + if (PageHuge(page)) { + const unsigned int order = compound_order(page); + /* + * skip hugetlbfs if we are not compacting for pages + * bigger than its order. THPs and other compound pages + * are handled below. + */ + if (!cc->alloc_contig) { + + if (order <= MAX_PAGE_ORDER) { + low_pfn += (1UL << order) - 1; + nr_scanned += (1UL << order) - 1; + } + goto isolate_fail; + } + /* for alloc_contig case */ if (locked) { unlock_page_lruvec_irqrestore(locked, flags); locked = NULL; } - ret = isolate_or_dissolve_huge_page(page, &cc->migratepages); + folio = page_folio(page); + ret = isolate_or_dissolve_huge_folio(folio, &cc->migratepages); /* - * Fail isolation in case isolate_or_dissolve_huge_page() + * Fail isolation in case isolate_or_dissolve_huge_folio() * reports an error. In case of -ENOMEM, abort right away. */ if (ret < 0) { /* Do not report -EBUSY down the chain */ if (ret == -EBUSY) ret = 0; - low_pfn += compound_nr(page) - 1; - nr_scanned += compound_nr(page) - 1; + low_pfn += (1UL << order) - 1; + nr_scanned += (1UL << order) - 1; goto isolate_fail; } - if (PageHuge(page)) { + if (folio_test_hugetlb(folio)) { /* * Hugepage was successfully isolated and placed * on the cc->migratepages list. */ - folio = page_folio(page); - low_pfn += folio_nr_pages(folio) - 1; + low_pfn += folio_nr_pages(folio) - folio_page_idx(folio, page) - 1; goto isolate_success_no_list; } @@ -978,7 +1015,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, * a valid page order. Consider only values in the * valid order range to prevent low_pfn overflow. */ - if (freepage_order > 0 && freepage_order <= MAX_ORDER) { + if (freepage_order > 0 && freepage_order <= MAX_PAGE_ORDER) { low_pfn += (1UL << freepage_order) - 1; nr_scanned += (1UL << freepage_order) - 1; } @@ -986,21 +1023,24 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, } /* - * Regardless of being on LRU, compound pages such as THP and - * hugetlbfs are not to be compacted unless we are attempting - * an allocation much larger than the huge page size (eg CMA). - * We can potentially save a lot of iterations if we skip them - * at once. The check is racy, but we can consider only valid - * values and the only danger is skipping too much. + * Regardless of being on LRU, compound pages such as THP + * (hugetlbfs is handled above) are not to be compacted unless + * we are attempting an allocation larger than the compound + * page size. We can potentially save a lot of iterations if we + * skip them at once. The check is racy, but we can consider + * only valid values and the only danger is skipping too much. */ if (PageCompound(page) && !cc->alloc_contig) { const unsigned int order = compound_order(page); - if (likely(order <= MAX_ORDER)) { - low_pfn += (1UL << order) - 1; - nr_scanned += (1UL << order) - 1; + /* Skip based on page order and compaction target order. */ + if (skip_isolation_on_order(order, cc->order)) { + if (order <= MAX_PAGE_ORDER) { + low_pfn += (1UL << order) - 1; + nr_scanned += (1UL << order) - 1; + } + goto isolate_fail; } - goto isolate_fail; } /* @@ -1009,18 +1049,15 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, * Skip any other type of page */ if (!PageLRU(page)) { - /* - * __PageMovable can return false positive so we need - * to verify it under page_lock. - */ - if (unlikely(__PageMovable(page)) && - !PageIsolated(page)) { + /* Isolation code will deal with any races. */ + if (unlikely(page_has_movable_ops(page)) && + !PageMovableOpsIsolated(page)) { if (locked) { unlock_page_lruvec_irqrestore(locked, flags); locked = NULL; } - if (isolate_movable_page(page, mode)) { + if (isolate_movable_ops_page(page, mode)) { folio = page_folio(page); goto isolate_success; } @@ -1058,8 +1095,10 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, if (!folio_test_lru(folio)) goto isolate_fail_put; + is_unevictable = folio_test_unevictable(folio); + /* Compaction might skip unevictable pages but CMA takes them */ - if (!(mode & ISOLATE_UNEVICTABLE) && folio_test_unevictable(folio)) + if (!(mode & ISOLATE_UNEVICTABLE) && is_unevictable) goto isolate_fail_put; /* @@ -1071,26 +1110,42 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, if ((mode & ISOLATE_ASYNC_MIGRATE) && folio_test_writeback(folio)) goto isolate_fail_put; - if ((mode & ISOLATE_ASYNC_MIGRATE) && folio_test_dirty(folio)) { - bool migrate_dirty; + is_dirty = folio_test_dirty(folio); + + if (((mode & ISOLATE_ASYNC_MIGRATE) && is_dirty) || + (mapping && is_unevictable)) { + bool migrate_dirty = true; + bool is_inaccessible; /* - * Only pages without mappings or that have a - * ->migrate_folio callback are possible to migrate - * without blocking. However, we can be racing with - * truncation so it's necessary to lock the page - * to stabilise the mapping as truncation holds - * the page lock until after the page is removed - * from the page cache. + * Only folios without mappings or that have + * a ->migrate_folio callback are possible to migrate + * without blocking. + * + * Folios from inaccessible mappings are not migratable. + * + * However, we can be racing with truncation, which can + * free the mapping that we need to check. Truncation + * holds the folio lock until after the folio is removed + * from the page so holding it ourselves is sufficient. + * + * To avoid locking the folio just to check inaccessible, + * assume every inaccessible folio is also unevictable, + * which is a cheaper test. If our assumption goes + * wrong, it's not a correctness bug, just potentially + * wasted cycles. */ if (!folio_trylock(folio)) goto isolate_fail_put; mapping = folio_mapping(folio); - migrate_dirty = !mapping || - mapping->a_ops->migrate_folio; + if ((mode & ISOLATE_ASYNC_MIGRATE) && is_dirty) { + migrate_dirty = !mapping || + mapping->a_ops->migrate_folio; + } + is_inaccessible = mapping && mapping_inaccessible(mapping); folio_unlock(folio); - if (!migrate_dirty) + if (!migrate_dirty || is_inaccessible) goto isolate_fail_put; } @@ -1119,15 +1174,17 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, skip_updated = true; if (test_and_set_skip(cc, valid_page) && !cc->finish_pageblock) { + low_pfn = end_pfn; goto isolate_abort; } } /* - * folio become large since the non-locked check, - * and it's on LRU. + * Check LRU folio order under the lock */ - if (unlikely(folio_test_large(folio) && !cc->alloc_contig)) { + if (unlikely(skip_isolation_on_order(folio_order(folio), + cc->order) && + !cc->alloc_contig)) { low_pfn += folio_nr_pages(folio) - 1; nr_scanned += folio_nr_pages(folio) - 1; folio_set_lru(folio); @@ -1324,12 +1381,14 @@ static bool suitable_migration_target(struct compact_control *cc, { /* If the page is a large free page, then disallow migration */ if (PageBuddy(page)) { + int order = cc->order > 0 ? cc->order : pageblock_order; + /* * We are checking page_order without zone->lock taken. But * the only small danger is that we skip a potentially suitable * pageblock, so it's not worth to check order for valid range. */ - if (buddy_order_unsafe(page) >= pageblock_order) + if (buddy_order_unsafe(page) >= order) return false; } @@ -1372,8 +1431,8 @@ move_freelist_head(struct list_head *freelist, struct page *freepage) { LIST_HEAD(sublist); - if (!list_is_last(freelist, &freepage->lru)) { - list_cut_before(&sublist, freelist, &freepage->lru); + if (!list_is_first(&freepage->buddy_list, freelist)) { + list_cut_before(&sublist, freelist, &freepage->buddy_list); list_splice_tail(&sublist, freelist); } } @@ -1389,8 +1448,8 @@ move_freelist_tail(struct list_head *freelist, struct page *freepage) { LIST_HEAD(sublist); - if (!list_is_first(freelist, &freepage->lru)) { - list_cut_position(&sublist, freelist, &freepage->lru); + if (!list_is_last(&freepage->buddy_list, freelist)) { + list_cut_position(&sublist, freelist, &freepage->buddy_list); list_splice_tail(&sublist, freelist); } } @@ -1417,13 +1476,11 @@ fast_isolate_around(struct compact_control *cc, unsigned long pfn) if (!page) return; - isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false); + isolate_freepages_block(cc, &start_pfn, end_pfn, cc->freepages, 1, false); /* Skip this pageblock in the future as it's full or nearly full */ - if (start_pfn == end_pfn) + if (start_pfn == end_pfn && !cc->no_set_skip_hint) set_pageblock_skip(page); - - return; } /* Search orders in round-robin fashion */ @@ -1500,7 +1557,7 @@ static void fast_isolate_freepages(struct compact_control *cc) spin_lock_irqsave(&cc->zone->lock, flags); freelist = &area->free_list[MIGRATE_MOVABLE]; - list_for_each_entry_reverse(freepage, freelist, lru) { + list_for_each_entry_reverse(freepage, freelist, buddy_list) { unsigned long pfn; order_scanned++; @@ -1529,7 +1586,7 @@ static void fast_isolate_freepages(struct compact_control *cc) break; } - /* Use a minimum pfn if a preferred one was not found */ + /* Use a maximum candidate pfn if a preferred one was not found */ if (!page && high_pfn) { page = pfn_to_page(high_pfn); @@ -1548,7 +1605,7 @@ static void fast_isolate_freepages(struct compact_control *cc) nr_scanned += nr_isolated - 1; total_isolated += nr_isolated; cc->nr_freepages += nr_isolated; - list_add_tail(&page->lru, &cc->freepages); + list_add_tail(&page->lru, &cc->freepages[order]); count_compact_events(COMPACTISOLATED, nr_isolated); } else { /* If isolation fails, abort the search */ @@ -1591,6 +1648,9 @@ static void fast_isolate_freepages(struct compact_control *cc) min(pageblock_end_pfn(min_pfn), zone_end_pfn(cc->zone)), cc->zone); + if (page && !suitable_migration_target(cc, page)) + page = NULL; + cc->free_pfn = min_pfn; } } @@ -1622,13 +1682,12 @@ static void isolate_freepages(struct compact_control *cc) unsigned long isolate_start_pfn; /* exact pfn we start at */ unsigned long block_end_pfn; /* end of current pageblock */ unsigned long low_pfn; /* lowest pfn scanner is able to scan */ - struct list_head *freelist = &cc->freepages; unsigned int stride; /* Try a small search of the free lists for a candidate */ fast_isolate_freepages(cc); if (cc->nr_freepages) - goto splitmap; + return; /* * Initialise the free scanner. The starting point is where we last @@ -1668,8 +1727,15 @@ static void isolate_freepages(struct compact_control *cc) page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, zone); - if (!page) + if (!page) { + unsigned long next_pfn; + + next_pfn = skip_offline_sections_reverse(block_start_pfn); + if (next_pfn) + block_start_pfn = max(next_pfn, low_pfn); + continue; + } /* Check the block is suitable for migration */ if (!suitable_migration_target(cc, page)) @@ -1681,11 +1747,12 @@ static void isolate_freepages(struct compact_control *cc) /* Found a block suitable for isolating free pages from. */ nr_isolated = isolate_freepages_block(cc, &isolate_start_pfn, - block_end_pfn, freelist, stride, false); + block_end_pfn, cc->freepages, stride, false); /* Update the skip hint if the full pageblock was scanned */ if (isolate_start_pfn == block_end_pfn) - update_pageblock_skip(cc, page, block_start_pfn); + update_pageblock_skip(cc, page, block_start_pfn - + pageblock_nr_pages); /* Are enough freepages isolated? */ if (cc->nr_freepages >= cc->nr_migratepages) { @@ -1721,33 +1788,63 @@ static void isolate_freepages(struct compact_control *cc) * and the loop terminated due to isolate_start_pfn < low_pfn */ cc->free_pfn = isolate_start_pfn; - -splitmap: - /* __isolate_free_page() does not map the pages */ - split_map_pages(freelist); } /* * This is a migrate-callback that "allocates" freepages by taking pages * from the isolated freelists in the block we are migrating to. */ -static struct folio *compaction_alloc(struct folio *src, unsigned long data) +static struct folio *compaction_alloc_noprof(struct folio *src, unsigned long data) { struct compact_control *cc = (struct compact_control *)data; struct folio *dst; + int order = folio_order(src); + bool has_isolated_pages = false; + int start_order; + struct page *freepage; + unsigned long size; + +again: + for (start_order = order; start_order < NR_PAGE_ORDERS; start_order++) + if (!list_empty(&cc->freepages[start_order])) + break; - if (list_empty(&cc->freepages)) { + /* no free pages in the list */ + if (start_order == NR_PAGE_ORDERS) { + if (has_isolated_pages) + return NULL; isolate_freepages(cc); + has_isolated_pages = true; + goto again; + } - if (list_empty(&cc->freepages)) - return NULL; + freepage = list_first_entry(&cc->freepages[start_order], struct page, + lru); + size = 1 << start_order; + + list_del(&freepage->lru); + + while (start_order > order) { + start_order--; + size >>= 1; + + list_add(&freepage[size].lru, &cc->freepages[start_order]); + set_page_private(&freepage[size], start_order); } + dst = (struct folio *)freepage; - dst = list_entry(cc->freepages.next, struct folio, lru); - list_del(&dst->lru); - cc->nr_freepages--; + post_alloc_hook(&dst->page, order, __GFP_MOVABLE); + set_page_refcounted(&dst->page); + if (order) + prep_compound_page(&dst->page, order); + cc->nr_freepages -= 1 << order; + cc->nr_migratepages -= 1 << order; + return page_rmappable_folio(&dst->page); +} - return dst; +static struct folio *compaction_alloc(struct folio *src, unsigned long data) +{ + return alloc_hooks(compaction_alloc_noprof(src, data)); } /* @@ -1758,9 +1855,19 @@ static struct folio *compaction_alloc(struct folio *src, unsigned long data) static void compaction_free(struct folio *dst, unsigned long data) { struct compact_control *cc = (struct compact_control *)data; + int order = folio_order(dst); + struct page *page = &dst->page; - list_add(&dst->lru, &cc->freepages); - cc->nr_freepages++; + if (folio_put_testzero(dst)) { + free_pages_prepare(page, order); + list_add(&dst->lru, &cc->freepages[order]); + cc->nr_freepages += 1 << order; + } + cc->nr_migratepages += 1 << order; + /* + * someone else has referenced the page, we cannot take it back to our + * free list. + */ } /* possible outcome of isolate_migratepages */ @@ -1883,7 +1990,7 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc) spin_lock_irqsave(&cc->zone->lock, flags); freelist = &area->free_list[MIGRATE_MOVABLE]; - list_for_each_entry(freepage, freelist, lru) { + list_for_each_entry(freepage, freelist, buddy_list) { unsigned long free_pfn; if (nr_scanned++ >= limit) { @@ -1957,9 +2064,9 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc) block_start_pfn = cc->zone->zone_start_pfn; /* - * fast_find_migrateblock marks a pageblock skipped so to avoid - * the isolation_suitable check below, check whether the fast - * search was successful. + * fast_find_migrateblock() has already ensured the pageblock is not + * set with a skipped flag, so to avoid the isolation_suitable check + * below again, check whether the fast search was successful. */ fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail; @@ -2002,7 +2109,8 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc) * before making it "skip" so other compaction instances do * not scan the same block. */ - if (pageblock_aligned(low_pfn) && + if ((pageblock_aligned(low_pfn) || + low_pfn == cc->zone->zone_start_pfn) && !fast_find_block && !isolation_suitable(cc, page)) continue; @@ -2036,15 +2144,6 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc) } /* - * order == -1 is expected when compacting via - * /proc/sys/vm/compact_memory - */ -static inline bool is_via_compact_memory(int order) -{ - return order == -1; -} - -/* * Determine whether kswapd is (or recently was!) running on this node. * * pgdat_kswapd_lock() pins pgdat->kswapd, so a concurrent kswapd_stop() can't @@ -2112,17 +2211,13 @@ static unsigned int fragmentation_score_node(pg_data_t *pgdat) return score; } -static unsigned int fragmentation_score_wmark(pg_data_t *pgdat, bool low) +static unsigned int fragmentation_score_wmark(bool low) { - unsigned int wmark_low; + unsigned int wmark_low, leeway; - /* - * Cap the low watermark to avoid excessive compaction - * activity in case a user sets the proactiveness tunable - * close to 100 (maximum). - */ - wmark_low = max(100U - sysctl_compaction_proactiveness, 5U); - return low ? wmark_low : min(wmark_low + 10, 100U); + wmark_low = 100U - sysctl_compaction_proactiveness; + leeway = min(10U, wmark_low / 2); + return low ? wmark_low : min(wmark_low + leeway, 100U); } static bool should_proactive_compact_node(pg_data_t *pgdat) @@ -2132,7 +2227,7 @@ static bool should_proactive_compact_node(pg_data_t *pgdat) if (!sysctl_compaction_proactiveness || kswapd_is_running(pgdat)) return false; - wmark_high = fragmentation_score_wmark(pgdat, false); + wmark_high = fragmentation_score_wmark(false); return fragmentation_score_node(pgdat) > wmark_high; } @@ -2171,7 +2266,7 @@ static enum compact_result __compact_finished(struct compact_control *cc) return COMPACT_PARTIAL_SKIPPED; score = fragmentation_score_zone(cc->zone); - wmark_low = fragmentation_score_wmark(pgdat, true); + wmark_low = fragmentation_score_wmark(true); if (score > wmark_low) ret = COMPACT_CONTINUE; @@ -2193,11 +2288,26 @@ static enum compact_result __compact_finished(struct compact_control *cc) if (!pageblock_aligned(cc->migrate_pfn)) return COMPACT_CONTINUE; + /* + * When defrag_mode is enabled, make kcompactd target + * watermarks in whole pageblocks. Because they can be stolen + * without polluting, no further fallback checks are needed. + */ + if (defrag_mode && !cc->direct_compaction) { + if (__zone_watermark_ok(cc->zone, cc->order, + high_wmark_pages(cc->zone), + cc->highest_zoneidx, cc->alloc_flags, + zone_page_state(cc->zone, + NR_FREE_PAGES_BLOCKS))) + return COMPACT_SUCCESS; + + return COMPACT_CONTINUE; + } + /* Direct compactor: Is a suitable page free? */ ret = COMPACT_NO_SUITABLE_PAGE; - for (order = cc->order; order <= MAX_ORDER; order++) { + for (order = cc->order; order < NR_PAGE_ORDERS; order++) { struct free_area *area = &cc->zone->free_area[order]; - bool can_steal; /* Job done if page is free of the right migratetype */ if (!free_area_empty(area, migratetype)) @@ -2213,8 +2323,7 @@ static enum compact_result __compact_finished(struct compact_control *cc) * Job done if allocation would steal freepages from * other migratetype buddy lists. */ - if (find_suitable_fallback(area, order, migratetype, - true, &can_steal) != -1) + if (find_suitable_fallback(area, order, migratetype, true) >= 0) /* * Movable pages are OK in any pageblock. If we are * stealing for a non-movable allocation, make sure @@ -2246,40 +2355,42 @@ static enum compact_result compact_finished(struct compact_control *cc) } static bool __compaction_suitable(struct zone *zone, int order, - int highest_zoneidx, - unsigned long wmark_target) + unsigned long watermark, int highest_zoneidx, + unsigned long free_pages) { - unsigned long watermark; /* * Watermarks for order-0 must be met for compaction to be able to * isolate free pages for migration targets. This means that the - * watermark and alloc_flags have to match, or be more pessimistic than - * the check in __isolate_free_page(). We don't use the direct - * compactor's alloc_flags, as they are not relevant for freepage - * isolation. We however do use the direct compactor's highest_zoneidx - * to skip over zones where lowmem reserves would prevent allocation - * even if compaction succeeds. - * For costly orders, we require low watermark instead of min for - * compaction to proceed to increase its chances. + * watermark have to match, or be more pessimistic than the check in + * __isolate_free_page(). + * + * For costly orders, we require a higher watermark for compaction to + * proceed to increase its chances. + * + * We use the direct compactor's highest_zoneidx to skip over zones + * where lowmem reserves would prevent allocation even if compaction + * succeeds. + * * ALLOC_CMA is used, as pages in CMA pageblocks are considered - * suitable migration targets + * suitable migration targets. */ - watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ? - low_wmark_pages(zone) : min_wmark_pages(zone); watermark += compact_gap(order); + if (order > PAGE_ALLOC_COSTLY_ORDER) + watermark += low_wmark_pages(zone) - min_wmark_pages(zone); return __zone_watermark_ok(zone, 0, watermark, highest_zoneidx, - ALLOC_CMA, wmark_target); + ALLOC_CMA, free_pages); } /* * compaction_suitable: Is this suitable to run compaction on this zone now? */ -bool compaction_suitable(struct zone *zone, int order, int highest_zoneidx) +bool compaction_suitable(struct zone *zone, int order, unsigned long watermark, + int highest_zoneidx) { enum compact_result compact_result; bool suitable; - suitable = __compaction_suitable(zone, order, highest_zoneidx, + suitable = __compaction_suitable(zone, order, watermark, highest_zoneidx, zone_page_state(zone, NR_FREE_PAGES)); /* * fragmentation index determines if allocation failures are due to @@ -2317,6 +2428,7 @@ bool compaction_suitable(struct zone *zone, int order, int highest_zoneidx) return suitable; } +/* Used by direct reclaimers */ bool compaction_zonelist_suitable(struct alloc_context *ac, int order, int alloc_flags) { @@ -2339,14 +2451,62 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order, */ available = zone_reclaimable_pages(zone) / order; available += zone_page_state_snapshot(zone, NR_FREE_PAGES); - if (__compaction_suitable(zone, order, ac->highest_zoneidx, - available)) + if (__compaction_suitable(zone, order, min_wmark_pages(zone), + ac->highest_zoneidx, available)) return true; } return false; } +/* + * Should we do compaction for target allocation order. + * Return COMPACT_SUCCESS if allocation for target order can be already + * satisfied + * Return COMPACT_SKIPPED if compaction for target order is likely to fail + * Return COMPACT_CONTINUE if compaction for target order should be ran + */ +static enum compact_result +compaction_suit_allocation_order(struct zone *zone, unsigned int order, + int highest_zoneidx, unsigned int alloc_flags, + bool async, bool kcompactd) +{ + unsigned long free_pages; + unsigned long watermark; + + if (kcompactd && defrag_mode) + free_pages = zone_page_state(zone, NR_FREE_PAGES_BLOCKS); + else + free_pages = zone_page_state(zone, NR_FREE_PAGES); + + watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); + if (__zone_watermark_ok(zone, order, watermark, highest_zoneidx, + alloc_flags, free_pages)) + return COMPACT_SUCCESS; + + /* + * For unmovable allocations (without ALLOC_CMA), check if there is enough + * free memory in the non-CMA pageblocks. Otherwise compaction could form + * the high-order page in CMA pageblocks, which would not help the + * allocation to succeed. However, limit the check to costly order async + * compaction (such as opportunistic THP attempts) because there is the + * possibility that compaction would migrate pages from non-CMA to CMA + * pageblock. + */ + if (order > PAGE_ALLOC_COSTLY_ORDER && async && + !(alloc_flags & ALLOC_CMA)) { + if (!__zone_watermark_ok(zone, 0, watermark + compact_gap(order), + highest_zoneidx, 0, + zone_page_state(zone, NR_FREE_PAGES))) + return COMPACT_SKIPPED; + } + + if (!compaction_suitable(zone, order, watermark, highest_zoneidx)) + return COMPACT_SKIPPED; + + return COMPACT_CONTINUE; +} + static enum compact_result compact_zone(struct compact_control *cc, struct capture_control *capc) { @@ -2356,7 +2516,8 @@ compact_zone(struct compact_control *cc, struct capture_control *capc) unsigned long last_migrated_pfn; const bool sync = cc->mode != MIGRATE_ASYNC; bool update_cached; - unsigned int nr_succeeded = 0; + unsigned int nr_succeeded = 0, nr_migratepages; + int order; /* * These counters track activities during zone compaction. Initialize @@ -2366,25 +2527,20 @@ compact_zone(struct compact_control *cc, struct capture_control *capc) cc->total_free_scanned = 0; cc->nr_migratepages = 0; cc->nr_freepages = 0; - INIT_LIST_HEAD(&cc->freepages); + for (order = 0; order < NR_PAGE_ORDERS; order++) + INIT_LIST_HEAD(&cc->freepages[order]); INIT_LIST_HEAD(&cc->migratepages); cc->migratetype = gfp_migratetype(cc->gfp_mask); if (!is_via_compact_memory(cc->order)) { - unsigned long watermark; - - /* Allocation can already succeed, nothing to do */ - watermark = wmark_pages(cc->zone, - cc->alloc_flags & ALLOC_WMARK_MASK); - if (zone_watermark_ok(cc->zone, cc->order, watermark, - cc->highest_zoneidx, cc->alloc_flags)) - return COMPACT_SUCCESS; - - /* Compaction is likely to fail */ - if (!compaction_suitable(cc->zone, cc->order, - cc->highest_zoneidx)) - return COMPACT_SKIPPED; + ret = compaction_suit_allocation_order(cc->zone, cc->order, + cc->highest_zoneidx, + cc->alloc_flags, + cc->mode == MIGRATE_ASYNC, + !cc->direct_compaction); + if (ret != COMPACT_CONTINUE) + return ret; } /* @@ -2478,14 +2634,21 @@ rescan: goto check_drain; case ISOLATE_SUCCESS: update_cached = false; - last_migrated_pfn = iteration_start_pfn; + last_migrated_pfn = max(cc->zone->zone_start_pfn, + pageblock_start_pfn(cc->migrate_pfn - 1)); } + /* + * Record the number of pages to migrate since the + * compaction_alloc/free() will update cc->nr_migratepages + * properly. + */ + nr_migratepages = cc->nr_migratepages; err = migrate_pages(&cc->migratepages, compaction_alloc, compaction_free, (unsigned long)cc, cc->mode, MR_COMPACTION, &nr_succeeded); - trace_mm_compaction_migratepages(cc, nr_succeeded); + trace_mm_compaction_migratepages(nr_migratepages, nr_succeeded); /* All pages were either migrated or will be released */ cc->nr_migratepages = 0; @@ -2501,7 +2664,7 @@ rescan: } /* * If an ASYNC or SYNC_LIGHT fails to migrate a page - * within the current order-aligned block and + * within the pageblock_order-aligned block and * fast_find_migrateblock may be used then scan the * remainder of the pageblock. This will mark the * pageblock "skip" to avoid rescanning in the near @@ -2559,7 +2722,7 @@ out: * so we don't leave any returned pages behind in the next attempt. */ if (cc->nr_freepages > 0) { - unsigned long free_pfn = release_freepages(&cc->freepages); + unsigned long free_pfn = release_free_list(cc->freepages); cc->nr_freepages = 0; VM_BUG_ON(free_pfn == 0); @@ -2578,7 +2741,6 @@ out: trace_mm_compaction_end(cc, start_pfn, end_pfn, sync, ret); - VM_BUG_ON(!list_empty(&cc->freepages)); VM_BUG_ON(!list_empty(&cc->migratepages)); return ret; @@ -2653,16 +2815,11 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, enum compact_priority prio, struct page **capture) { - int may_perform_io = (__force int)(gfp_mask & __GFP_IO); struct zoneref *z; struct zone *zone; enum compact_result rc = COMPACT_SKIPPED; - /* - * Check if the GFP flags allow compaction - GFP_NOIO is really - * tricky context because the migration might require IO - */ - if (!may_perform_io) + if (!gfp_compaction_allowed(gfp_mask)) return COMPACT_SKIPPED; trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio); @@ -2672,6 +2829,11 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, ac->highest_zoneidx, ac->nodemask) { enum compact_result status; + if (cpusets_enabled() && + (alloc_flags & ALLOC_CPUSET) && + !__cpuset_zone_allowed(zone, gfp_mask)) + continue; + if (prio > MIN_COMPACT_PRIORITY && compaction_deferred(zone, order)) { rc = max_t(enum compact_result, COMPACT_DEFERRED, rc); @@ -2718,25 +2880,27 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, } /* - * Compact all zones within a node till each zone's fragmentation score - * reaches within proactive compaction thresholds (as determined by the - * proactiveness tunable). + * compact_node() - compact all zones within a node + * @pgdat: The node page data + * @proactive: Whether the compaction is proactive * - * It is possible that the function returns before reaching score targets - * due to various back-off conditions, such as, contention on per-node or - * per-zone locks. + * For proactive compaction, compact till each zone's fragmentation score + * reaches within proactive compaction thresholds (as determined by the + * proactiveness tunable), it is possible that the function returns before + * reaching score targets due to various back-off conditions, such as, + * contention on per-node or per-zone locks. */ -static void proactive_compact_node(pg_data_t *pgdat) +static int compact_node(pg_data_t *pgdat, bool proactive) { int zoneid; struct zone *zone; struct compact_control cc = { .order = -1, - .mode = MIGRATE_SYNC_LIGHT, + .mode = proactive ? MIGRATE_SYNC_LIGHT : MIGRATE_SYNC, .ignore_skip_hint = true, .whole_zone = true, .gfp_mask = GFP_KERNEL, - .proactive_compaction = true, + .proactive_compaction = proactive, }; for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { @@ -2744,57 +2908,42 @@ static void proactive_compact_node(pg_data_t *pgdat) if (!populated_zone(zone)) continue; + if (fatal_signal_pending(current)) + return -EINTR; + cc.zone = zone; compact_zone(&cc, NULL); - count_compact_events(KCOMPACTD_MIGRATE_SCANNED, - cc.total_migrate_scanned); - count_compact_events(KCOMPACTD_FREE_SCANNED, - cc.total_free_scanned); + if (proactive) { + count_compact_events(KCOMPACTD_MIGRATE_SCANNED, + cc.total_migrate_scanned); + count_compact_events(KCOMPACTD_FREE_SCANNED, + cc.total_free_scanned); + } } -} - -/* Compact all zones within a node */ -static void compact_node(int nid) -{ - pg_data_t *pgdat = NODE_DATA(nid); - int zoneid; - struct zone *zone; - struct compact_control cc = { - .order = -1, - .mode = MIGRATE_SYNC, - .ignore_skip_hint = true, - .whole_zone = true, - .gfp_mask = GFP_KERNEL, - }; - - for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { - - zone = &pgdat->node_zones[zoneid]; - if (!populated_zone(zone)) - continue; - - cc.zone = zone; - - compact_zone(&cc, NULL); - } + return 0; } -/* Compact all nodes in the system */ -static void compact_nodes(void) +/* Compact all zones of all nodes in the system */ +static int compact_nodes(void) { - int nid; + int ret, nid; /* Flush pending updates to the LRU lists */ lru_add_drain_all(); - for_each_online_node(nid) - compact_node(nid); + for_each_online_node(nid) { + ret = compact_node(NODE_DATA(nid), false); + if (ret) + return ret; + } + + return 0; } -static int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int write, +static int compaction_proactiveness_sysctl_handler(const struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { int rc, nid; @@ -2824,7 +2973,7 @@ static int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int * This is the entry point for compacting all nodes via * /proc/sys/vm/compact_memory */ -static int sysctl_compaction_handler(struct ctl_table *table, int write, +static int sysctl_compaction_handler(const struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { int ret; @@ -2837,9 +2986,9 @@ static int sysctl_compaction_handler(struct ctl_table *table, int write, return -EINVAL; if (write) - compact_nodes(); + ret = compact_nodes(); - return 0; + return ret; } #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) @@ -2853,7 +3002,7 @@ static ssize_t compact_store(struct device *dev, /* Flush pending updates to the LRU lists */ lru_add_drain_all(); - compact_node(nid); + compact_node(NODE_DATA(nid), false); } return count; @@ -2867,7 +3016,7 @@ int compaction_register_node(struct node *node) void compaction_unregister_node(struct node *node) { - return device_remove_file(&node->dev, &dev_attr_compact); + device_remove_file(&node->dev, &dev_attr_compact); } #endif /* CONFIG_SYSFS && CONFIG_NUMA */ @@ -2882,6 +3031,9 @@ static bool kcompactd_node_suitable(pg_data_t *pgdat) int zoneid; struct zone *zone; enum zone_type highest_zoneidx = pgdat->kcompactd_highest_zoneidx; + enum compact_result ret; + unsigned int alloc_flags = defrag_mode ? + ALLOC_WMARK_HIGH : ALLOC_WMARK_MIN; for (zoneid = 0; zoneid <= highest_zoneidx; zoneid++) { zone = &pgdat->node_zones[zoneid]; @@ -2889,14 +3041,11 @@ static bool kcompactd_node_suitable(pg_data_t *pgdat) if (!populated_zone(zone)) continue; - /* Allocation can already succeed, check other zones */ - if (zone_watermark_ok(zone, pgdat->kcompactd_max_order, - min_wmark_pages(zone), - highest_zoneidx, 0)) - continue; - - if (compaction_suitable(zone, pgdat->kcompactd_max_order, - highest_zoneidx)) + ret = compaction_suit_allocation_order(zone, + pgdat->kcompactd_max_order, + highest_zoneidx, alloc_flags, + false, true); + if (ret == COMPACT_CONTINUE) return true; } @@ -2918,7 +3067,10 @@ static void kcompactd_do_work(pg_data_t *pgdat) .mode = MIGRATE_SYNC_LIGHT, .ignore_skip_hint = false, .gfp_mask = GFP_KERNEL, + .alloc_flags = defrag_mode ? ALLOC_WMARK_HIGH : ALLOC_WMARK_MIN, }; + enum compact_result ret; + trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order, cc.highest_zoneidx); count_compact_event(KCOMPACTD_WAKE); @@ -2933,12 +3085,10 @@ static void kcompactd_do_work(pg_data_t *pgdat) if (compaction_deferred(zone, cc.order)) continue; - /* Allocation can already succeed, nothing to do */ - if (zone_watermark_ok(zone, cc.order, - min_wmark_pages(zone), zoneid, 0)) - continue; - - if (!compaction_suitable(zone, cc.order, zoneid)) + ret = compaction_suit_allocation_order(zone, + cc.order, zoneid, cc.alloc_flags, + false, true); + if (ret != COMPACT_CONTINUE) continue; if (kthread_should_stop()) @@ -3015,15 +3165,10 @@ void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx) static int kcompactd(void *p) { pg_data_t *pgdat = (pg_data_t *)p; - struct task_struct *tsk = current; long default_timeout = msecs_to_jiffies(HPAGE_FRAG_CHECK_INTERVAL_MSEC); long timeout = default_timeout; - const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); - - if (!cpumask_empty(cpumask)) - set_cpus_allowed_ptr(tsk, cpumask); - + current->flags |= PF_KCOMPACTD; set_freezable(); pgdat->kcompactd_max_order = 0; @@ -3066,7 +3211,7 @@ static int kcompactd(void *p) unsigned int prev_score, score; prev_score = fragmentation_score_node(pgdat); - proactive_compact_node(pgdat); + compact_node(pgdat, true); score = fragmentation_score_node(pgdat); /* * Defer proactive compaction if the fragmentation @@ -3080,6 +3225,8 @@ static int kcompactd(void *p) pgdat->proactive_compact_trigger = false; } + current->flags &= ~PF_KCOMPACTD; + return 0; } @@ -3094,10 +3241,12 @@ void __meminit kcompactd_run(int nid) if (pgdat->kcompactd) return; - pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid); + pgdat->kcompactd = kthread_create_on_node(kcompactd, pgdat, nid, "kcompactd%d", nid); if (IS_ERR(pgdat->kcompactd)) { pr_err("Failed to start kcompactd on node %d\n", nid); pgdat->kcompactd = NULL; + } else { + wake_up_process(pgdat->kcompactd); } } @@ -3115,31 +3264,7 @@ void __meminit kcompactd_stop(int nid) } } -/* - * It's optimal to keep kcompactd on the same CPUs as their memory, but - * not required for correctness. So if the last cpu in a node goes - * away, we get changed to run anywhere: as the first one comes back, - * restore their cpu bindings. - */ -static int kcompactd_cpu_online(unsigned int cpu) -{ - int nid; - - for_each_node_state(nid, N_MEMORY) { - pg_data_t *pgdat = NODE_DATA(nid); - const struct cpumask *mask; - - mask = cpumask_of_node(pgdat->node_id); - - if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) - /* One of our CPUs online: restore mask */ - if (pgdat->kcompactd) - set_cpus_allowed_ptr(pgdat->kcompactd, mask); - } - return 0; -} - -static int proc_dointvec_minmax_warn_RT_change(struct ctl_table *table, +static int proc_dointvec_minmax_warn_RT_change(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int ret, old; @@ -3158,7 +3283,7 @@ static int proc_dointvec_minmax_warn_RT_change(struct ctl_table *table, return ret; } -static struct ctl_table vm_compaction[] = { +static const struct ctl_table vm_compaction[] = { { .procname = "compact_memory", .data = &sysctl_compact_memory, @@ -3193,21 +3318,11 @@ static struct ctl_table vm_compaction[] = { .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, - { } }; static int __init kcompactd_init(void) { int nid; - int ret; - - ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, - "mm/compaction:online", - kcompactd_cpu_online, NULL); - if (ret < 0) { - pr_err("kcompactd: failed to register hotplug callbacks.\n"); - return ret; - } for_each_node_state(nid, N_MEMORY) kcompactd_run(nid); |
