summaryrefslogtreecommitdiff
path: root/include/linux/compaction.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/compaction.h')
-rw-r--r--include/linux/compaction.h174
1 files changed, 102 insertions, 72 deletions
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 091d72e70d8a..173d9c07a895 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -1,109 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_COMPACTION_H
#define _LINUX_COMPACTION_H
-/* Return values for compact_zone() and try_to_compact_pages() */
-/* compaction didn't start as it was not possible or direct reclaim was more suitable */
-#define COMPACT_SKIPPED 0
-/* compaction should continue to another pageblock */
-#define COMPACT_CONTINUE 1
-/* direct compaction partially compacted a zone and there are suitable pages */
-#define COMPACT_PARTIAL 2
-/* The full zone was compacted */
-#define COMPACT_COMPLETE 3
-
-#ifdef CONFIG_COMPACTION
-extern int sysctl_compact_memory;
-extern int sysctl_compaction_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *length, loff_t *ppos);
-extern int sysctl_extfrag_threshold;
-extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *length, loff_t *ppos);
-
-extern int fragmentation_index(struct zone *zone, unsigned int order);
-extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
- int order, gfp_t gfp_mask, nodemask_t *mask,
- bool sync, bool *contended);
-extern void compact_pgdat(pg_data_t *pgdat, int order);
-extern void reset_isolation_suitable(pg_data_t *pgdat);
-extern unsigned long compaction_suitable(struct zone *zone, int order);
+/*
+ * Determines how hard direct compaction should try to succeed.
+ * Lower value means higher priority, analogically to reclaim priority.
+ */
+enum compact_priority {
+ COMPACT_PRIO_SYNC_FULL,
+ MIN_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_FULL,
+ COMPACT_PRIO_SYNC_LIGHT,
+ MIN_COMPACT_COSTLY_PRIORITY = COMPACT_PRIO_SYNC_LIGHT,
+ DEF_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_LIGHT,
+ COMPACT_PRIO_ASYNC,
+ INIT_COMPACT_PRIORITY = COMPACT_PRIO_ASYNC
+};
-/* Do not skip compaction more than 64 times */
-#define COMPACT_MAX_DEFER_SHIFT 6
+/* Return values for compact_zone() and try_to_compact_pages() */
+/* When adding new states, please adjust include/trace/events/compaction.h */
+enum compact_result {
+ /* For more detailed tracepoint output - internal to compaction */
+ COMPACT_NOT_SUITABLE_ZONE,
+ /*
+ * compaction didn't start as it was not possible or direct reclaim
+ * was more suitable
+ */
+ COMPACT_SKIPPED,
+ /* compaction didn't start as it was deferred due to past failures */
+ COMPACT_DEFERRED,
+
+ /* For more detailed tracepoint output - internal to compaction */
+ COMPACT_NO_SUITABLE_PAGE,
+ /* compaction should continue to another pageblock */
+ COMPACT_CONTINUE,
+
+ /*
+ * The full zone was compacted scanned but wasn't successful to compact
+ * suitable pages.
+ */
+ COMPACT_COMPLETE,
+ /*
+ * direct compaction has scanned part of the zone but wasn't successful
+ * to compact suitable pages.
+ */
+ COMPACT_PARTIAL_SKIPPED,
+
+ /* compaction terminated prematurely due to lock contentions */
+ COMPACT_CONTENDED,
+
+ /*
+ * direct compaction terminated after concluding that the allocation
+ * should now succeed
+ */
+ COMPACT_SUCCESS,
+};
+
+struct alloc_context; /* in mm/internal.h */
/*
- * Compaction is deferred when compaction fails to result in a page
- * allocation success. 1 << compact_defer_limit compactions are skipped up
- * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
+ * Number of free order-0 pages that should be available above given watermark
+ * to make sure compaction has reasonable chance of not running out of free
+ * pages that it needs to isolate as migration target during its work.
*/
-static inline void defer_compaction(struct zone *zone, int order)
+static inline unsigned long compact_gap(unsigned int order)
{
- zone->compact_considered = 0;
- zone->compact_defer_shift++;
-
- if (order < zone->compact_order_failed)
- zone->compact_order_failed = order;
-
- if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
- zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
+ /*
+ * Although all the isolations for migration are temporary, compaction
+ * free scanner may have up to 1 << order pages on its list and then
+ * try to split an (order - 1) free page. At that point, a gap of
+ * 1 << order might not be enough, so it's safer to require twice that
+ * amount. Note that the number of pages on the list is also
+ * effectively limited by COMPACT_CLUSTER_MAX, as that's the maximum
+ * that the migrate scanner can have isolated on migrate list, and free
+ * scanner is only invoked when the number of isolated free pages is
+ * lower than that. But it's not worth to complicate the formula here
+ * as a bigger gap for higher orders than strictly necessary can also
+ * improve chances of compaction success.
+ */
+ return 2UL << order;
}
-/* Returns true if compaction should be skipped this time */
-static inline bool compaction_deferred(struct zone *zone, int order)
+static inline int current_is_kcompactd(void)
{
- unsigned long defer_limit = 1UL << zone->compact_defer_shift;
+ return current->flags & PF_KCOMPACTD;
+}
- if (order < zone->compact_order_failed)
- return false;
+#ifdef CONFIG_COMPACTION
- /* Avoid possible overflow */
- if (++zone->compact_considered > defer_limit)
- zone->compact_considered = defer_limit;
+extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order);
+extern int fragmentation_index(struct zone *zone, unsigned int order);
+extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
+ unsigned int order, unsigned int alloc_flags,
+ const struct alloc_context *ac, enum compact_priority prio,
+ struct page **page);
+extern void reset_isolation_suitable(pg_data_t *pgdat);
+extern bool compaction_suitable(struct zone *zone, int order,
+ unsigned long watermark, int highest_zoneidx);
- return zone->compact_considered < defer_limit;
-}
+extern void compaction_defer_reset(struct zone *zone, int order,
+ bool alloc_success);
-/* Returns true if restarting compaction after many failures */
-static inline bool compaction_restarting(struct zone *zone, int order)
-{
- if (order < zone->compact_order_failed)
- return false;
+bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
+ int alloc_flags);
- return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
- zone->compact_considered >= 1UL << zone->compact_defer_shift;
-}
+extern void __meminit kcompactd_run(int nid);
+extern void __meminit kcompactd_stop(int nid);
+extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx);
#else
-static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
- int order, gfp_t gfp_mask, nodemask_t *nodemask,
- bool sync, bool *contended)
-{
- return COMPACT_CONTINUE;
-}
-
-static inline void compact_pgdat(pg_data_t *pgdat, int order)
+static inline void reset_isolation_suitable(pg_data_t *pgdat)
{
}
-static inline void reset_isolation_suitable(pg_data_t *pgdat)
+static inline bool compaction_suitable(struct zone *zone, int order,
+ unsigned long watermark,
+ int highest_zoneidx)
{
+ return false;
}
-static inline unsigned long compaction_suitable(struct zone *zone, int order)
+static inline void kcompactd_run(int nid)
{
- return COMPACT_SKIPPED;
}
-
-static inline void defer_compaction(struct zone *zone, int order)
+static inline void kcompactd_stop(int nid)
{
}
-static inline bool compaction_deferred(struct zone *zone, int order)
+static inline void wakeup_kcompactd(pg_data_t *pgdat,
+ int order, int highest_zoneidx)
{
- return true;
}
#endif /* CONFIG_COMPACTION */
+struct node;
#if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
extern int compaction_register_node(struct node *node);
extern void compaction_unregister_node(struct node *node);