summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/sections.h14
-rw-r--r--include/linux/backing-dev-defs.h3
-rw-r--r--include/linux/backing-dev.h1
-rw-r--r--include/linux/cma.h1
-rw-r--r--include/linux/compiler-gcc.h8
-rw-r--r--include/linux/compiler_attributes.h10
-rw-r--r--include/linux/compiler_types.h12
-rw-r--r--include/linux/cpuset.h17
-rw-r--r--include/linux/damon.h236
-rw-r--r--include/linux/fs.h1
-rw-r--r--include/linux/gfp.h8
-rw-r--r--include/linux/highmem.h28
-rw-r--r--include/linux/hugetlb.h36
-rw-r--r--include/linux/io-mapping.h6
-rw-r--r--include/linux/kasan.h8
-rw-r--r--include/linux/kernel.h1
-rw-r--r--include/linux/kfence.h21
-rw-r--r--include/linux/memblock.h48
-rw-r--r--include/linux/memcontrol.h9
-rw-r--r--include/linux/memory.h26
-rw-r--r--include/linux/memory_hotplug.h3
-rw-r--r--include/linux/mempolicy.h5
-rw-r--r--include/linux/migrate.h23
-rw-r--r--include/linux/migrate_mode.h13
-rw-r--r--include/linux/mm.h57
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/mmzone.h41
-rw-r--r--include/linux/node.h4
-rw-r--r--include/linux/page-flags.h2
-rw-r--r--include/linux/percpu.h6
-rw-r--r--include/linux/slab.h135
-rw-r--r--include/linux/slub_def.h13
-rw-r--r--include/linux/stackdepot.h8
-rw-r--r--include/linux/stacktrace.h1
-rw-r--r--include/linux/swap.h1
-rw-r--r--include/linux/vmalloc.h24
-rw-r--r--include/trace/events/mmap_lock.h48
-rw-r--r--include/trace/events/vmscan.h38
-rw-r--r--include/trace/events/writeback.h7
39 files changed, 623 insertions, 302 deletions
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index d16302d3eb59..596ab2092289 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -80,20 +80,6 @@ static inline int arch_is_kernel_data(unsigned long addr)
}
#endif
-/*
- * Check if an address is part of freed initmem. This is needed on architectures
- * with virt == phys kernel mapping, for code that wants to check if an address
- * is part of a static object within [_stext, _end]. After initmem is freed,
- * memory can be allocated from it, and such allocations would then have
- * addresses within the range [_stext, _end].
- */
-#ifndef arch_is_kernel_initmem_freed
-static inline int arch_is_kernel_initmem_freed(unsigned long addr)
-{
- return 0;
-}
-#endif
-
/**
* memory_contains - checks if an object is contained within a memory region
* @begin: virtual address of the beginning of the memory region
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index 33207004cfde..993c5628a726 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -103,6 +103,9 @@ struct wb_completion {
* change as blkcg is disabled and enabled higher up in the hierarchy, a wb
* is tested for blkcg after lookup and removed from index on mismatch so
* that a new wb for the combination can be created.
+ *
+ * Each bdi_writeback that is not embedded into the backing_dev_info must hold
+ * a reference to the parent backing_dev_info. See cgwb_create() for details.
*/
struct bdi_writeback {
struct backing_dev_info *bdi; /* our parent bdi */
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 9c14f0a8dbe5..483979c1b9f4 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -141,7 +141,6 @@ static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
}
long congestion_wait(int sync, long timeout);
-long wait_iff_congested(int sync, long timeout);
static inline bool mapping_can_writeback(struct address_space *mapping)
{
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 53fd8c3cdbd0..bd801023504b 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -46,6 +46,7 @@ extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
struct cma **res_cma);
extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align,
bool no_warn);
+extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned long count);
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count);
extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 7bbd8df02532..ccbbd31b3aae 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -150,3 +150,11 @@
#else
#define __diag_GCC_8(s)
#endif
+
+/*
+ * Prior to 9.1, -Wno-alloc-size-larger-than (and therefore the "alloc_size"
+ * attribute) do not work, and must be disabled.
+ */
+#if GCC_VERSION < 90100
+#undef __alloc_size__
+#endif
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
index e6ec63403965..3de06a8fae73 100644
--- a/include/linux/compiler_attributes.h
+++ b/include/linux/compiler_attributes.h
@@ -34,6 +34,15 @@
#define __aligned_largest __attribute__((__aligned__))
/*
+ * Note: do not use this directly. Instead, use __alloc_size() since it is conditionally
+ * available and includes other attributes.
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-alloc_005fsize-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#alloc-size
+ */
+#define __alloc_size__(x, ...) __attribute__((__alloc_size__(x, ## __VA_ARGS__)))
+
+/*
* Note: users of __always_inline currently do not write "inline" themselves,
* which seems to be required by gcc to apply the attribute according
* to its docs (and also "warning: always_inline function might not be
@@ -153,6 +162,7 @@
/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-malloc-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#malloc
*/
#define __malloc __attribute__((__malloc__))
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 05ceb2e92b0e..3f198879c292 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -250,6 +250,18 @@ struct ftrace_likely_data {
# define __cficanonical
#endif
+/*
+ * Any place that could be marked with the "alloc_size" attribute is also
+ * a place to be marked with the "malloc" attribute. Do this as part of the
+ * __alloc_size macro to avoid redundant attributes and to avoid missing a
+ * __malloc marking.
+ */
+#ifdef __alloc_size__
+# define __alloc_size(x, ...) __alloc_size__(x, ## __VA_ARGS__) __malloc
+#else
+# define __alloc_size(x, ...) __malloc
+#endif
+
#ifndef asm_volatile_goto
#define asm_volatile_goto(x...) asm goto(x)
#endif
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index d2b9c41c8edf..d58e0476ee8e 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -34,6 +34,8 @@
*/
extern struct static_key_false cpusets_pre_enable_key;
extern struct static_key_false cpusets_enabled_key;
+extern struct static_key_false cpusets_insane_config_key;
+
static inline bool cpusets_enabled(void)
{
return static_branch_unlikely(&cpusets_enabled_key);
@@ -51,6 +53,19 @@ static inline void cpuset_dec(void)
static_branch_dec_cpuslocked(&cpusets_pre_enable_key);
}
+/*
+ * This will get enabled whenever a cpuset configuration is considered
+ * unsupportable in general. E.g. movable only node which cannot satisfy
+ * any non movable allocations (see update_nodemask). Page allocator
+ * needs to make additional checks for those configurations and this
+ * check is meant to guard those checks without any overhead for sane
+ * configurations.
+ */
+static inline bool cpusets_insane_config(void)
+{
+ return static_branch_unlikely(&cpusets_insane_config_key);
+}
+
extern int cpuset_init(void);
extern void cpuset_init_smp(void);
extern void cpuset_force_rebuild(void);
@@ -167,6 +182,8 @@ static inline void set_mems_allowed(nodemask_t nodemask)
static inline bool cpusets_enabled(void) { return false; }
+static inline bool cpusets_insane_config(void) { return false; }
+
static inline int cpuset_init(void) { return 0; }
static inline void cpuset_init_smp(void) {}
diff --git a/include/linux/damon.h b/include/linux/damon.h
index d68b67b8d458..b4d4be3cc987 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -14,6 +14,8 @@
/* Minimal region size. Every damon_region is aligned by this. */
#define DAMON_MIN_REGION PAGE_SIZE
+/* Max priority score for DAMON-based operation schemes */
+#define DAMOS_MAX_SCORE (99)
/**
* struct damon_addr_range - Represents an address region of [@start, @end).
@@ -31,12 +33,22 @@ struct damon_addr_range {
* @sampling_addr: Address of the sample for the next access check.
* @nr_accesses: Access frequency of this region.
* @list: List head for siblings.
+ * @age: Age of this region.
+ *
+ * @age is initially zero, increased for each aggregation interval, and reset
+ * to zero again if the access frequency is significantly changed. If two
+ * regions are merged into a new region, both @nr_accesses and @age of the new
+ * region are set as region size-weighted average of those of the two regions.
*/
struct damon_region {
struct damon_addr_range ar;
unsigned long sampling_addr;
unsigned int nr_accesses;
struct list_head list;
+
+ unsigned int age;
+/* private: Internal value for age calculation. */
+ unsigned int last_nr_accesses;
};
/**
@@ -59,16 +71,180 @@ struct damon_target {
struct list_head list;
};
+/**
+ * enum damos_action - Represents an action of a Data Access Monitoring-based
+ * Operation Scheme.
+ *
+ * @DAMOS_WILLNEED: Call ``madvise()`` for the region with MADV_WILLNEED.
+ * @DAMOS_COLD: Call ``madvise()`` for the region with MADV_COLD.
+ * @DAMOS_PAGEOUT: Call ``madvise()`` for the region with MADV_PAGEOUT.
+ * @DAMOS_HUGEPAGE: Call ``madvise()`` for the region with MADV_HUGEPAGE.
+ * @DAMOS_NOHUGEPAGE: Call ``madvise()`` for the region with MADV_NOHUGEPAGE.
+ * @DAMOS_STAT: Do nothing but count the stat.
+ */
+enum damos_action {
+ DAMOS_WILLNEED,
+ DAMOS_COLD,
+ DAMOS_PAGEOUT,
+ DAMOS_HUGEPAGE,
+ DAMOS_NOHUGEPAGE,
+ DAMOS_STAT, /* Do nothing but only record the stat */
+};
+
+/**
+ * struct damos_quota - Controls the aggressiveness of the given scheme.
+ * @ms: Maximum milliseconds that the scheme can use.
+ * @sz: Maximum bytes of memory that the action can be applied.
+ * @reset_interval: Charge reset interval in milliseconds.
+ *
+ * @weight_sz: Weight of the region's size for prioritization.
+ * @weight_nr_accesses: Weight of the region's nr_accesses for prioritization.
+ * @weight_age: Weight of the region's age for prioritization.
+ *
+ * To avoid consuming too much CPU time or IO resources for applying the
+ * &struct damos->action to large memory, DAMON allows users to set time and/or
+ * size quotas. The quotas can be set by writing non-zero values to &ms and
+ * &sz, respectively. If the time quota is set, DAMON tries to use only up to
+ * &ms milliseconds within &reset_interval for applying the action. If the
+ * size quota is set, DAMON tries to apply the action only up to &sz bytes
+ * within &reset_interval.
+ *
+ * Internally, the time quota is transformed to a size quota using estimated
+ * throughput of the scheme's action. DAMON then compares it against &sz and
+ * uses smaller one as the effective quota.
+ *
+ * For selecting regions within the quota, DAMON prioritizes current scheme's
+ * target memory regions using the &struct damon_primitive->get_scheme_score.
+ * You could customize the prioritization logic by setting &weight_sz,
+ * &weight_nr_accesses, and &weight_age, because monitoring primitives are
+ * encouraged to respect those.
+ */
+struct damos_quota {
+ unsigned long ms;
+ unsigned long sz;
+ unsigned long reset_interval;
+
+ unsigned int weight_sz;
+ unsigned int weight_nr_accesses;
+ unsigned int weight_age;
+
+/* private: */
+ /* For throughput estimation */
+ unsigned long total_charged_sz;
+ unsigned long total_charged_ns;
+
+ unsigned long esz; /* Effective size quota in bytes */
+
+ /* For charging the quota */
+ unsigned long charged_sz;
+ unsigned long charged_from;
+ struct damon_target *charge_target_from;
+ unsigned long charge_addr_from;
+
+ /* For prioritization */
+ unsigned long histogram[DAMOS_MAX_SCORE + 1];
+ unsigned int min_score;
+};
+
+/**
+ * enum damos_wmark_metric - Represents the watermark metric.
+ *
+ * @DAMOS_WMARK_NONE: Ignore the watermarks of the given scheme.
+ * @DAMOS_WMARK_FREE_MEM_RATE: Free memory rate of the system in [0,1000].
+ */
+enum damos_wmark_metric {
+ DAMOS_WMARK_NONE,
+ DAMOS_WMARK_FREE_MEM_RATE,
+};
+
+/**
+ * struct damos_watermarks - Controls when a given scheme should be activated.
+ * @metric: Metric for the watermarks.
+ * @interval: Watermarks check time interval in microseconds.
+ * @high: High watermark.
+ * @mid: Middle watermark.
+ * @low: Low watermark.
+ *
+ * If &metric is &DAMOS_WMARK_NONE, the scheme is always active. Being active
+ * means DAMON does monitoring and applying the action of the scheme to
+ * appropriate memory regions. Else, DAMON checks &metric of the system for at
+ * least every &interval microseconds and works as below.
+ *
+ * If &metric is higher than &high, the scheme is inactivated. If &metric is
+ * between &mid and &low, the scheme is activated. If &metric is lower than
+ * &low, the scheme is inactivated.
+ */
+struct damos_watermarks {
+ enum damos_wmark_metric metric;
+ unsigned long interval;
+ unsigned long high;
+ unsigned long mid;
+ unsigned long low;
+
+/* private: */
+ bool activated;
+};
+
+/**
+ * struct damos - Represents a Data Access Monitoring-based Operation Scheme.
+ * @min_sz_region: Minimum size of target regions.
+ * @max_sz_region: Maximum size of target regions.
+ * @min_nr_accesses: Minimum ``->nr_accesses`` of target regions.
+ * @max_nr_accesses: Maximum ``->nr_accesses`` of target regions.
+ * @min_age_region: Minimum age of target regions.
+ * @max_age_region: Maximum age of target regions.
+ * @action: &damo_action to be applied to the target regions.
+ * @quota: Control the aggressiveness of this scheme.
+ * @wmarks: Watermarks for automated (in)activation of this scheme.
+ * @stat_count: Total number of regions that this scheme is applied.
+ * @stat_sz: Total size of regions that this scheme is applied.
+ * @list: List head for siblings.
+ *
+ * For each aggregation interval, DAMON finds regions which fit in the
+ * condition (&min_sz_region, &max_sz_region, &min_nr_accesses,
+ * &max_nr_accesses, &min_age_region, &max_age_region) and applies &action to
+ * those. To avoid consuming too much CPU time or IO resources for the
+ * &action, &quota is used.
+ *
+ * To do the work only when needed, schemes can be activated for specific
+ * system situations using &wmarks. If all schemes that registered to the
+ * monitoring context are inactive, DAMON stops monitoring either, and just
+ * repeatedly checks the watermarks.
+ *
+ * If all schemes that registered to a &struct damon_ctx are inactive, DAMON
+ * stops monitoring and just repeatedly checks the watermarks.
+ *
+ * After applying the &action to each region, &stat_count and &stat_sz is
+ * updated to reflect the number of regions and total size of regions that the
+ * &action is applied.
+ */
+struct damos {
+ unsigned long min_sz_region;
+ unsigned long max_sz_region;
+ unsigned int min_nr_accesses;
+ unsigned int max_nr_accesses;
+ unsigned int min_age_region;
+ unsigned int max_age_region;
+ enum damos_action action;
+ struct damos_quota quota;
+ struct damos_watermarks wmarks;
+ unsigned long stat_count;
+ unsigned long stat_sz;
+ struct list_head list;
+};
+
struct damon_ctx;
/**
- * struct damon_primitive Monitoring primitives for given use cases.
+ * struct damon_primitive - Monitoring primitives for given use cases.
*
* @init: Initialize primitive-internal data structures.
* @update: Update primitive-internal data structures.
* @prepare_access_checks: Prepare next access check of target regions.
* @check_accesses: Check the accesses to target regions.
* @reset_aggregated: Reset aggregated accesses monitoring results.
+ * @get_scheme_score: Get the score of a region for a scheme.
+ * @apply_scheme: Apply a DAMON-based operation scheme.
* @target_valid: Determine if the target is valid.
* @cleanup: Clean up the context.
*
@@ -94,6 +270,11 @@ struct damon_ctx;
* of its update. The value will be used for regions adjustment threshold.
* @reset_aggregated should reset the access monitoring results that aggregated
* by @check_accesses.
+ * @get_scheme_score should return the priority score of a region for a scheme
+ * as an integer in [0, &DAMOS_MAX_SCORE].
+ * @apply_scheme is called from @kdamond when a region for user provided
+ * DAMON-based operation scheme is found. It should apply the scheme's action
+ * to the region. This is not used for &DAMON_ARBITRARY_TARGET case.
* @target_valid should check whether the target is still valid for the
* monitoring.
* @cleanup is called from @kdamond just before its termination.
@@ -104,12 +285,17 @@ struct damon_primitive {
void (*prepare_access_checks)(struct damon_ctx *context);
unsigned int (*check_accesses)(struct damon_ctx *context);
void (*reset_aggregated)(struct damon_ctx *context);
+ int (*get_scheme_score)(struct damon_ctx *context,
+ struct damon_target *t, struct damon_region *r,
+ struct damos *scheme);
+ int (*apply_scheme)(struct damon_ctx *context, struct damon_target *t,
+ struct damon_region *r, struct damos *scheme);
bool (*target_valid)(void *target);
void (*cleanup)(struct damon_ctx *context);
};
-/*
- * struct damon_callback Monitoring events notification callbacks.
+/**
+ * struct damon_callback - Monitoring events notification callbacks.
*
* @before_start: Called before starting the monitoring.
* @after_sampling: Called after each sampling.
@@ -136,7 +322,7 @@ struct damon_callback {
int (*before_start)(struct damon_ctx *context);
int (*after_sampling)(struct damon_ctx *context);
int (*after_aggregation)(struct damon_ctx *context);
- int (*before_terminate)(struct damon_ctx *context);
+ void (*before_terminate)(struct damon_ctx *context);
};
/**
@@ -182,6 +368,7 @@ struct damon_callback {
* @min_nr_regions: The minimum number of adaptive monitoring regions.
* @max_nr_regions: The maximum number of adaptive monitoring regions.
* @adaptive_targets: Head of monitoring targets (&damon_target) list.
+ * @schemes: Head of schemes (&damos) list.
*/
struct damon_ctx {
unsigned long sample_interval;
@@ -194,7 +381,6 @@ struct damon_ctx {
/* public: */
struct task_struct *kdamond;
- bool kdamond_stop;
struct mutex kdamond_lock;
struct damon_primitive primitive;
@@ -203,6 +389,7 @@ struct damon_ctx {
unsigned long min_nr_regions;
unsigned long max_nr_regions;
struct list_head adaptive_targets;
+ struct list_head schemes;
};
#define damon_next_region(r) \
@@ -211,6 +398,9 @@ struct damon_ctx {
#define damon_prev_region(r) \
(container_of(r->list.prev, struct damon_region, list))
+#define damon_last_region(t) \
+ (list_last_entry(&t->regions_list, struct damon_region, list))
+
#define damon_for_each_region(r, t) \
list_for_each_entry(r, &t->regions_list, list)
@@ -223,6 +413,12 @@ struct damon_ctx {
#define damon_for_each_target_safe(t, next, ctx) \
list_for_each_entry_safe(t, next, &(ctx)->adaptive_targets, list)
+#define damon_for_each_scheme(s, ctx) \
+ list_for_each_entry(s, &(ctx)->schemes, list)
+
+#define damon_for_each_scheme_safe(s, next, ctx) \
+ list_for_each_entry_safe(s, next, &(ctx)->schemes, list)
+
#ifdef CONFIG_DAMON
struct damon_region *damon_new_region(unsigned long start, unsigned long end);
@@ -232,8 +428,18 @@ inline void damon_insert_region(struct damon_region *r,
void damon_add_region(struct damon_region *r, struct damon_target *t);
void damon_destroy_region(struct damon_region *r, struct damon_target *t);
+struct damos *damon_new_scheme(
+ unsigned long min_sz_region, unsigned long max_sz_region,
+ unsigned int min_nr_accesses, unsigned int max_nr_accesses,
+ unsigned int min_age_region, unsigned int max_age_region,
+ enum damos_action action, struct damos_quota *quota,
+ struct damos_watermarks *wmarks);
+void damon_add_scheme(struct damon_ctx *ctx, struct damos *s);
+void damon_destroy_scheme(struct damos *s);
+
struct damon_target *damon_new_target(unsigned long id);
void damon_add_target(struct damon_ctx *ctx, struct damon_target *t);
+bool damon_targets_empty(struct damon_ctx *ctx);
void damon_free_target(struct damon_target *t);
void damon_destroy_target(struct damon_target *t);
unsigned int damon_nr_regions(struct damon_target *t);
@@ -245,6 +451,8 @@ int damon_set_targets(struct damon_ctx *ctx,
int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
unsigned long aggr_int, unsigned long primitive_upd_int,
unsigned long min_nr_reg, unsigned long max_nr_reg);
+int damon_set_schemes(struct damon_ctx *ctx,
+ struct damos **schemes, ssize_t nr_schemes);
int damon_nr_running_ctxs(void);
int damon_start(struct damon_ctx **ctxs, int nr_ctxs);
@@ -261,8 +469,26 @@ void damon_va_prepare_access_checks(struct damon_ctx *ctx);
unsigned int damon_va_check_accesses(struct damon_ctx *ctx);
bool damon_va_target_valid(void *t);
void damon_va_cleanup(struct damon_ctx *ctx);
+int damon_va_apply_scheme(struct damon_ctx *context, struct damon_target *t,
+ struct damon_region *r, struct damos *scheme);
+int damon_va_scheme_score(struct damon_ctx *context, struct damon_target *t,
+ struct damon_region *r, struct damos *scheme);
void damon_va_set_primitives(struct damon_ctx *ctx);
#endif /* CONFIG_DAMON_VADDR */
+#ifdef CONFIG_DAMON_PADDR
+
+/* Monitoring primitives for the physical memory address space */
+void damon_pa_prepare_access_checks(struct damon_ctx *ctx);
+unsigned int damon_pa_check_accesses(struct damon_ctx *ctx);
+bool damon_pa_target_valid(void *t);
+int damon_pa_apply_scheme(struct damon_ctx *context, struct damon_target *t,
+ struct damon_region *r, struct damos *scheme);
+int damon_pa_scheme_score(struct damon_ctx *context, struct damon_target *t,
+ struct damon_region *r, struct damos *scheme);
+void damon_pa_set_primitives(struct damon_ctx *ctx);
+
+#endif /* CONFIG_DAMON_PADDR */
+
#endif /* _DAMON_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index f3cfca5edc9a..4137a9bfae7a 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1440,6 +1440,7 @@ extern int send_sigurg(struct fown_struct *fown);
#define SB_I_UNTRUSTED_MOUNTER 0x00000040
#define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */
+#define SB_I_PERSB_BDI 0x00000200 /* has a per-sb bdi */
/* Possible states of 'frozen' field */
enum {
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 3745efd21cf6..b976c4177299 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -531,6 +531,10 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
struct list_head *page_list,
struct page **page_array);
+unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
+ unsigned long nr_pages,
+ struct page **page_array);
+
/* Bulk allocate order-0 pages */
static inline unsigned long
alloc_pages_bulk_list(gfp_t gfp, unsigned long nr_pages, struct list_head *list)
@@ -618,9 +622,9 @@ static inline struct folio *folio_alloc(gfp_t gfp, unsigned int order)
extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
extern unsigned long get_zeroed_page(gfp_t gfp_mask);
-void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
+void *alloc_pages_exact(size_t size, gfp_t gfp_mask) __alloc_size(1);
void free_pages_exact(void *virt, size_t size);
-void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
+__meminit void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) __alloc_size(1);
#define __get_free_page(gfp_mask) \
__get_free_pages((gfp_mask), 0)
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 27cdd715c5f9..25aff0f2ed0b 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -180,9 +180,9 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
#ifndef clear_user_highpage
static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
{
- void *addr = kmap_atomic(page);
+ void *addr = kmap_local_page(page);
clear_user_page(addr, vaddr, page);
- kunmap_atomic(addr);
+ kunmap_local(addr);
}
#endif
@@ -214,9 +214,9 @@ alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
static inline void clear_highpage(struct page *page)
{
- void *kaddr = kmap_atomic(page);
+ void *kaddr = kmap_local_page(page);
clear_page(kaddr);
- kunmap_atomic(kaddr);
+ kunmap_local(kaddr);
}
#ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
@@ -239,7 +239,7 @@ static inline void zero_user_segments(struct page *page,
unsigned start1, unsigned end1,
unsigned start2, unsigned end2)
{
- void *kaddr = kmap_atomic(page);
+ void *kaddr = kmap_local_page(page);
unsigned int i;
BUG_ON(end1 > page_size(page) || end2 > page_size(page));
@@ -250,7 +250,7 @@ static inline void zero_user_segments(struct page *page,
if (end2 > start2)
memset(kaddr + start2, 0, end2 - start2);
- kunmap_atomic(kaddr);
+ kunmap_local(kaddr);
for (i = 0; i < compound_nr(page); i++)
flush_dcache_page(page + i);
}
@@ -275,11 +275,11 @@ static inline void copy_user_highpage(struct page *to, struct page *from,
{
char *vfrom, *vto;
- vfrom = kmap_atomic(from);
- vto = kmap_atomic(to);
+ vfrom = kmap_local_page(from);
+ vto = kmap_local_page(to);
copy_user_page(vto, vfrom, vaddr, to);
- kunmap_atomic(vto);
- kunmap_atomic(vfrom);
+ kunmap_local(vto);
+ kunmap_local(vfrom);
}
#endif
@@ -290,11 +290,11 @@ static inline void copy_highpage(struct page *to, struct page *from)
{
char *vfrom, *vto;
- vfrom = kmap_atomic(from);
- vto = kmap_atomic(to);
+ vfrom = kmap_local_page(from);
+ vto = kmap_local_page(to);
copy_page(vto, vfrom);
- kunmap_atomic(vto);
- kunmap_atomic(vfrom);
+ kunmap_local(vto);
+ kunmap_local(vfrom);
}
#endif
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 1faebe1cd0ed..44c2ab0dfa59 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -124,6 +124,7 @@ struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
void hugepage_put_subpool(struct hugepage_subpool *spool);
void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
+void clear_vma_resv_huge_pages(struct vm_area_struct *vma);
int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
loff_t *);
@@ -132,6 +133,10 @@ int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
loff_t *);
+int move_hugetlb_page_tables(struct vm_area_struct *vma,
+ struct vm_area_struct *new_vma,
+ unsigned long old_addr, unsigned long new_addr,
+ unsigned long len);
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
struct page **, struct vm_area_struct **,
@@ -143,9 +148,6 @@ void __unmap_hugepage_range_final(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long start, unsigned long end,
struct page *ref_page);
-void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- struct page *ref_page);
void hugetlb_report_meminfo(struct seq_file *);
int hugetlb_report_node_meminfo(char *buf, int len, int nid);
void hugetlb_show_meminfo(void);
@@ -218,6 +220,10 @@ static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
{
}
+static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
+{
+}
+
static inline unsigned long hugetlb_total_pages(void)
{
return 0;
@@ -265,6 +271,16 @@ static inline int copy_hugetlb_page_range(struct mm_struct *dst,
return 0;
}
+static inline int move_hugetlb_page_tables(struct vm_area_struct *vma,
+ struct vm_area_struct *new_vma,
+ unsigned long old_addr,
+ unsigned long new_addr,
+ unsigned long len)
+{
+ BUG();
+ return 0;
+}
+
static inline void hugetlb_report_meminfo(struct seq_file *m)
{
}
@@ -385,13 +401,6 @@ static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
BUG();
}
-static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
- struct vm_area_struct *vma, unsigned long start,
- unsigned long end, struct page *ref_page)
-{
- BUG();
-}
-
static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long address,
unsigned int flags)
@@ -596,6 +605,7 @@ struct hstate {
int next_nid_to_alloc;
int next_nid_to_free;
unsigned int order;
+ unsigned int demote_order;
unsigned long mask;
unsigned long max_huge_pages;
unsigned long nr_huge_pages;
@@ -605,6 +615,7 @@ struct hstate {
unsigned long nr_overcommit_huge_pages;
struct list_head hugepage_activelist;
struct list_head hugepage_freelists[MAX_NUMNODES];
+ unsigned int max_huge_pages_node[MAX_NUMNODES];
unsigned int nr_huge_pages_node[MAX_NUMNODES];
unsigned int free_huge_pages_node[MAX_NUMNODES];
unsigned int surplus_huge_pages_node[MAX_NUMNODES];
@@ -637,8 +648,9 @@ void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
unsigned long address, struct page *page);
/* arch callback */
-int __init __alloc_bootmem_huge_page(struct hstate *h);
-int __init alloc_bootmem_huge_page(struct hstate *h);
+int __init __alloc_bootmem_huge_page(struct hstate *h, int nid);
+int __init alloc_bootmem_huge_page(struct hstate *h, int nid);
+bool __init hugetlb_node_alloc_supported(void);
void __init hugetlb_add_hstate(unsigned order);
bool __init arch_hugetlb_valid_size(unsigned long size);
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index e9743cfd8585..66a774d2710e 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -132,13 +132,7 @@ io_mapping_init_wc(struct io_mapping *iomap,
iomap->base = base;
iomap->size = size;
-#if defined(pgprot_noncached_wc) /* archs can't agree on a name ... */
- iomap->prot = pgprot_noncached_wc(PAGE_KERNEL);
-#elif defined(pgprot_writecombine)
iomap->prot = pgprot_writecombine(PAGE_KERNEL);
-#else
- iomap->prot = pgprot_noncached(PAGE_KERNEL);
-#endif
return iomap;
}
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index de5f5913374d..d8783b682669 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -375,12 +375,14 @@ static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
void kasan_cache_shrink(struct kmem_cache *cache);
void kasan_cache_shutdown(struct kmem_cache *cache);
void kasan_record_aux_stack(void *ptr);
+void kasan_record_aux_stack_noalloc(void *ptr);
#else /* CONFIG_KASAN_GENERIC */
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
static inline void kasan_record_aux_stack(void *ptr) {}
+static inline void kasan_record_aux_stack_noalloc(void *ptr) {}
#endif /* CONFIG_KASAN_GENERIC */
@@ -439,6 +441,8 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
unsigned long free_region_start,
unsigned long free_region_end);
+void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
+
#else /* CONFIG_KASAN_VMALLOC */
static inline int kasan_populate_vmalloc(unsigned long start,
@@ -456,6 +460,10 @@ static inline void kasan_release_vmalloc(unsigned long start,
unsigned long free_region_start,
unsigned long free_region_end) {}
+static inline void kasan_populate_early_vm_area_shadow(void *start,
+ unsigned long size)
+{ }
+
#endif /* CONFIG_KASAN_VMALLOC */
#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index e8696e4a45aa..e5359b09de1d 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -247,6 +247,7 @@ extern bool early_boot_irqs_disabled;
extern enum system_states {
SYSTEM_BOOTING,
SYSTEM_SCHEDULING,
+ SYSTEM_FREEING_INITMEM,
SYSTEM_RUNNING,
SYSTEM_HALT,
SYSTEM_POWER_OFF,
diff --git a/include/linux/kfence.h b/include/linux/kfence.h
index 3fe6dd8a18c1..4b5e3679a72c 100644
--- a/include/linux/kfence.h
+++ b/include/linux/kfence.h
@@ -14,6 +14,9 @@
#ifdef CONFIG_KFENCE
+#include <linux/atomic.h>
+#include <linux/static_key.h>
+
/*
* We allocate an even number of pages, as it simplifies calculations to map
* address to metadata indices; effectively, the very first page serves as an
@@ -22,13 +25,8 @@
#define KFENCE_POOL_SIZE ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE)
extern char *__kfence_pool;
-#ifdef CONFIG_KFENCE_STATIC_KEYS
-#include <linux/static_key.h>
DECLARE_STATIC_KEY_FALSE(kfence_allocation_key);
-#else
-#include <linux/atomic.h>
extern atomic_t kfence_allocation_gate;
-#endif
/**
* is_kfence_address() - check if an address belongs to KFENCE pool
@@ -116,13 +114,16 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags);
*/
static __always_inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
{
-#ifdef CONFIG_KFENCE_STATIC_KEYS
- if (static_branch_unlikely(&kfence_allocation_key))
+#if defined(CONFIG_KFENCE_STATIC_KEYS) || CONFIG_KFENCE_SAMPLE_INTERVAL == 0
+ if (!static_branch_unlikely(&kfence_allocation_key))
+ return NULL;
#else
- if (unlikely(!atomic_read(&kfence_allocation_gate)))
+ if (!static_branch_likely(&kfence_allocation_key))
+ return NULL;
#endif
- return __kfence_alloc(s, size, flags);
- return NULL;
+ if (likely(atomic_read(&kfence_allocation_gate)))
+ return NULL;
+ return __kfence_alloc(s, size, flags);
}
/**
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 34de69b3b8ba..7df557b16c1e 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -28,17 +28,26 @@ extern unsigned long long max_possible_pfn;
/**
* enum memblock_flags - definition of memory region attributes
* @MEMBLOCK_NONE: no special request
- * @MEMBLOCK_HOTPLUG: hotpluggable region
+ * @MEMBLOCK_HOTPLUG: memory region indicated in the firmware-provided memory
+ * map during early boot as hot(un)pluggable system RAM (e.g., memory range
+ * that might get hotunplugged later). With "movable_node" set on the kernel
+ * commandline, try keeping this memory region hotunpluggable. Does not apply
+ * to memblocks added ("hotplugged") after early boot.
* @MEMBLOCK_MIRROR: mirrored region
* @MEMBLOCK_NOMAP: don't add to kernel direct mapping and treat as
* reserved in the memory map; refer to memblock_mark_nomap() description
* for further details
+ * @MEMBLOCK_DRIVER_MANAGED: memory region that is always detected and added
+ * via a driver, and never indicated in the firmware-provided memory map as
+ * system RAM. This corresponds to IORESOURCE_SYSRAM_DRIVER_MANAGED in the
+ * kernel resource tree.
*/
enum memblock_flags {
MEMBLOCK_NONE = 0x0, /* No special request */
MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */
MEMBLOCK_MIRROR = 0x2, /* mirrored region */
MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */
+ MEMBLOCK_DRIVER_MANAGED = 0x8, /* always detected via a driver */
};
/**
@@ -100,10 +109,11 @@ static inline void memblock_discard(void) {}
#endif
void memblock_allow_resize(void);
-int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
+int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid,
+ enum memblock_flags flags);
int memblock_add(phys_addr_t base, phys_addr_t size);
int memblock_remove(phys_addr_t base, phys_addr_t size);
-int memblock_free(phys_addr_t base, phys_addr_t size);
+int memblock_phys_free(phys_addr_t base, phys_addr_t size);
int memblock_reserve(phys_addr_t base, phys_addr_t size);
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
@@ -118,7 +128,7 @@ int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
void memblock_free_all(void);
-void memblock_free_ptr(void *ptr, size_t size);
+void memblock_free(void *ptr, size_t size);
void reset_node_managed_pages(pg_data_t *pgdat);
void reset_all_zones_managed_pages(void);
@@ -133,7 +143,7 @@ void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
struct memblock_type *type_b, phys_addr_t *out_start,
phys_addr_t *out_end, int *out_nid);
-void __memblock_free_late(phys_addr_t base, phys_addr_t size);
+void memblock_free_late(phys_addr_t base, phys_addr_t size);
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
@@ -208,7 +218,8 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
*/
#define for_each_mem_range(i, p_start, p_end) \
__for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, \
- MEMBLOCK_HOTPLUG, p_start, p_end, NULL)
+ MEMBLOCK_HOTPLUG | MEMBLOCK_DRIVER_MANAGED, \
+ p_start, p_end, NULL)
/**
* for_each_mem_range_rev - reverse iterate through memblock areas from
@@ -219,7 +230,8 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
*/
#define for_each_mem_range_rev(i, p_start, p_end) \
__for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \
- MEMBLOCK_HOTPLUG, p_start, p_end, NULL)
+ MEMBLOCK_HOTPLUG | MEMBLOCK_DRIVER_MANAGED,\
+ p_start, p_end, NULL)
/**
* for_each_reserved_mem_range - iterate over all reserved memblock areas
@@ -249,6 +261,11 @@ static inline bool memblock_is_nomap(struct memblock_region *m)
return m->flags & MEMBLOCK_NOMAP;
}
+static inline bool memblock_is_driver_managed(struct memblock_region *m)
+{
+ return m->flags & MEMBLOCK_DRIVER_MANAGED;
+}
+
int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
unsigned long *end_pfn);
void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
@@ -441,23 +458,6 @@ static inline void *memblock_alloc_node(phys_addr_t size,
MEMBLOCK_ALLOC_ACCESSIBLE, nid);
}
-static inline void memblock_free_early(phys_addr_t base,
- phys_addr_t size)
-{
- memblock_free(base, size);
-}
-
-static inline void memblock_free_early_nid(phys_addr_t base,
- phys_addr_t size, int nid)
-{
- memblock_free(base, size);
-}
-
-static inline void memblock_free_late(phys_addr_t base, phys_addr_t size)
-{
- __memblock_free_late(base, size);
-}
-
/*
* Set the allocation direction to bottom-up or top-down.
*/
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index e34bf0cbdf55..0c5c403f4be6 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -180,12 +180,6 @@ struct mem_cgroup_thresholds {
struct mem_cgroup_threshold_ary *spare;
};
-enum memcg_kmem_state {
- KMEM_NONE,
- KMEM_ALLOCATED,
- KMEM_ONLINE,
-};
-
#if defined(CONFIG_SMP)
struct memcg_padding {
char x[0];
@@ -318,7 +312,6 @@ struct mem_cgroup {
#ifdef CONFIG_MEMCG_KMEM
int kmemcg_id;
- enum memcg_kmem_state kmem_state;
struct obj_cgroup __rcu *objcg;
struct list_head objcg_list; /* list of inherited objcgs */
#endif
@@ -1667,7 +1660,7 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
return true;
do {
- if (time_before(jiffies, memcg->socket_pressure))
+ if (time_before(jiffies, READ_ONCE(memcg->socket_pressure)))
return true;
} while ((memcg = parent_mem_cgroup(memcg)));
return false;
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 182c606adb06..88eb587b5143 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -96,7 +96,6 @@ struct memory_notify {
unsigned long start_pfn;
unsigned long nr_pages;
int status_change_nid_normal;
- int status_change_nid_high;
int status_change_nid;
};
@@ -110,7 +109,7 @@ struct mem_section;
#define SLAB_CALLBACK_PRI 1
#define IPC_CALLBACK_PRI 10
-#ifndef CONFIG_MEMORY_HOTPLUG_SPARSE
+#ifndef CONFIG_MEMORY_HOTPLUG
static inline void memory_dev_init(void)
{
return;
@@ -126,7 +125,14 @@ static inline int memory_notify(unsigned long val, void *v)
{
return 0;
}
-#else
+static inline int hotplug_memory_notifier(notifier_fn_t fn, int pri)
+{
+ return 0;
+}
+/* These aren't inline functions due to a GCC bug. */
+#define register_hotmemory_notifier(nb) ({ (void)(nb); 0; })
+#define unregister_hotmemory_notifier(nb) ({ (void)(nb); })
+#else /* CONFIG_MEMORY_HOTPLUG */
extern int register_memory_notifier(struct notifier_block *nb);
extern void unregister_memory_notifier(struct notifier_block *nb);
int create_memory_block_devices(unsigned long start, unsigned long size,
@@ -140,7 +146,6 @@ typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *);
extern int walk_memory_blocks(unsigned long start, unsigned long size,
void *arg, walk_memory_blocks_func_t func);
extern int for_each_memory_block(void *arg, walk_memory_blocks_func_t func);
-#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT)
extern int memory_group_register_static(int nid, unsigned long max_pages);
extern int memory_group_register_dynamic(int nid, unsigned long unit_pages);
@@ -149,9 +154,6 @@ struct memory_group *memory_group_find_by_id(int mgid);
typedef int (*walk_memory_groups_func_t)(struct memory_group *, void *);
int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func,
struct memory_group *excluded, void *arg);
-#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
-
-#ifdef CONFIG_MEMORY_HOTPLUG
#define hotplug_memory_notifier(fn, pri) ({ \
static __meminitdata struct notifier_block fn##_mem_nb =\
{ .notifier_call = fn, .priority = pri };\
@@ -159,15 +161,7 @@ int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func,
})
#define register_hotmemory_notifier(nb) register_memory_notifier(nb)
#define unregister_hotmemory_notifier(nb) unregister_memory_notifier(nb)
-#else
-static inline int hotplug_memory_notifier(notifier_fn_t fn, int pri)
-{
- return 0;
-}
-/* These aren't inline functions due to a GCC bug. */
-#define register_hotmemory_notifier(nb) ({ (void)(nb); 0; })
-#define unregister_hotmemory_notifier(nb) ({ (void)(nb); })
-#endif
+#endif /* CONFIG_MEMORY_HOTPLUG */
/*
* Kernel text modification mutex, used for code patching. Users of this lock
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index e5a867c950b2..be48e003a518 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -98,9 +98,6 @@ static inline void zone_seqlock_init(struct zone *zone)
{
seqlock_init(&zone->span_seqlock);
}
-extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
-extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
-extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
extern void adjust_present_page_count(struct page *page,
struct memory_group *group,
long nr_pages);
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 4091692bed8c..3c7595e81150 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -8,7 +8,6 @@
#include <linux/sched.h>
#include <linux/mmzone.h>
-#include <linux/dax.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
#include <linux/spinlock.h>
@@ -184,8 +183,6 @@ extern bool vma_migratable(struct vm_area_struct *vma);
extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
extern void mpol_put_task_policy(struct task_struct *);
-extern bool numa_demotion_enabled;
-
static inline bool mpol_is_preferred_many(struct mempolicy *pol)
{
return (pol->mode == MPOL_PREFERRED_MANY);
@@ -301,8 +298,6 @@ static inline nodemask_t *policy_nodemask_current(gfp_t gfp)
return NULL;
}
-#define numa_demotion_enabled false
-
static inline bool mpol_is_preferred_many(struct mempolicy *pol)
{
return false;
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 0d2aeb9b0f66..eeb818c4fc78 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -19,24 +19,7 @@ struct migration_target_control;
*/
#define MIGRATEPAGE_SUCCESS 0
-/*
- * Keep sync with:
- * - macro MIGRATE_REASON in include/trace/events/migrate.h
- * - migrate_reason_names[MR_TYPES] in mm/debug.c
- */
-enum migrate_reason {
- MR_COMPACTION,
- MR_MEMORY_FAILURE,
- MR_MEMORY_HOTPLUG,
- MR_SYSCALL, /* also applies to cpusets */
- MR_MEMPOLICY_MBIND,
- MR_NUMA_MISPLACED,
- MR_CONTIG_RANGE,
- MR_LONGTERM_PIN,
- MR_DEMOTION,
- MR_TYPES
-};
-
+/* Defined in mm/debug.c: */
extern const char *migrate_reason_names[MR_TYPES];
#ifdef CONFIG_MIGRATION
@@ -61,6 +44,8 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
void folio_migrate_copy(struct folio *newfolio, struct folio *folio);
int folio_migrate_mapping(struct address_space *mapping,
struct folio *newfolio, struct folio *folio, int extra_count);
+
+extern bool numa_demotion_enabled;
#else
static inline void putback_movable_pages(struct list_head *l) {}
@@ -86,6 +71,8 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
{
return -ENOSYS;
}
+
+#define numa_demotion_enabled false
#endif /* CONFIG_MIGRATION */
#ifdef CONFIG_COMPACTION
diff --git a/include/linux/migrate_mode.h b/include/linux/migrate_mode.h
index 883c99249033..f37cc03f9369 100644
--- a/include/linux/migrate_mode.h
+++ b/include/linux/migrate_mode.h
@@ -19,4 +19,17 @@ enum migrate_mode {
MIGRATE_SYNC_NO_COPY,
};
+enum migrate_reason {
+ MR_COMPACTION,
+ MR_MEMORY_FAILURE,
+ MR_MEMORY_HOTPLUG,
+ MR_SYSCALL, /* also applies to cpusets */
+ MR_MEMPOLICY_MBIND,
+ MR_NUMA_MISPLACED,
+ MR_CONTIG_RANGE,
+ MR_LONGTERM_PIN,
+ MR_DEMOTION,
+ MR_TYPES
+};
+
#endif /* MIGRATE_MODE_H_INCLUDED */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a62b91e769c8..a7e4a9e7d807 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -794,40 +794,6 @@ static inline int is_vmalloc_or_module_addr(const void *x)
}
#endif
-extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
-static inline void *kvmalloc(size_t size, gfp_t flags)
-{
- return kvmalloc_node(size, flags, NUMA_NO_NODE);
-}
-static inline void *kvzalloc_node(size_t size, gfp_t flags, int node)
-{
- return kvmalloc_node(size, flags | __GFP_ZERO, node);
-}
-static inline void *kvzalloc(size_t size, gfp_t flags)
-{
- return kvmalloc(size, flags | __GFP_ZERO);
-}
-
-static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
-{
- size_t bytes;
-
- if (unlikely(check_mul_overflow(n, size, &bytes)))
- return NULL;
-
- return kvmalloc(bytes, flags);
-}
-
-static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
-{
- return kvmalloc_array(n, size, flags | __GFP_ZERO);
-}
-
-extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize,
- gfp_t flags);
-extern void kvfree(const void *addr);
-extern void kvfree_sensitive(const void *addr, size_t len);
-
static inline int head_compound_mapcount(struct page *head)
{
return atomic_read(compound_mapcount_ptr(head)) + 1;
@@ -904,6 +870,8 @@ void put_pages_list(struct list_head *pages);
void split_page(struct page *page, unsigned int order);
void folio_copy(struct folio *dst, struct folio *src);
+unsigned long nr_free_buffer_pages(void);
+
/*
* Compound pages have a destructor function. Provide a
* prototype for that function and accessor functions.
@@ -1861,12 +1829,24 @@ extern void user_shm_unlock(size_t, struct ucounts *);
* Parameter block passed down to zap_pte_range in exceptional cases.
*/
struct zap_details {
- struct address_space *check_mapping; /* Check page->mapping if set */
- pgoff_t first_index; /* Lowest page->index to unmap */
- pgoff_t last_index; /* Highest page->index to unmap */
+ struct address_space *zap_mapping; /* Check page->mapping if set */
struct page *single_page; /* Locked page to be unmapped */
};
+/*
+ * We set details->zap_mappings when we want to unmap shared but keep private
+ * pages. Return true if skip zapping this page, false otherwise.
+ */
+static inline bool
+zap_skip_check_mapping(struct zap_details *details, struct page *page)
+{
+ if (!details || !page)
+ return false;
+
+ return details->zap_mapping &&
+ (details->zap_mapping != page_rmapping(page));
+}
+
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
@@ -2576,7 +2556,7 @@ static inline unsigned long get_num_physpages(void)
* unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
* max_highmem_pfn};
* for_each_valid_physical_page_range()
- * memblock_add_node(base, size, nid)
+ * memblock_add_node(base, size, nid, MEMBLOCK_NONE)
* free_area_init(max_zone_pfns);
*/
void free_area_init(unsigned long *max_zone_pfn);
@@ -2604,6 +2584,7 @@ extern void memmap_init_range(unsigned long, int, unsigned long,
unsigned long, unsigned long, enum meminit_context,
struct vmem_altmap *, int migratetype);
extern void setup_per_zone_wmarks(void);
+extern void calculate_min_free_kbytes(void);
extern int __meminit init_per_zone_wmark_min(void);
extern void mem_init(void);
extern void __init mmap_init(void);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index f7326c8704bb..bb8c6f5f19bc 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -114,10 +114,8 @@ struct page {
struct page *next;
#ifdef CONFIG_64BIT
int pages; /* Nr of pages left */
- int pobjects; /* Approximate count */
#else
short int pages;
- short int pobjects;
#endif
};
};
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 6a1d79d84675..58e744b78c2c 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -199,6 +199,7 @@ enum node_stat_item {
NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */
NR_DIRTIED, /* page dirtyings since bootup */
NR_WRITTEN, /* page writings since bootup */
+ NR_THROTTLED_WRITTEN, /* NR_WRITTEN while reclaim throttled */
NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */
NR_FOLL_PIN_ACQUIRED, /* via: pin_user_page(), gup flag: FOLL_PIN */
NR_FOLL_PIN_RELEASED, /* pages returned via unpin_user_page() */
@@ -272,6 +273,13 @@ enum lru_list {
NR_LRU_LISTS
};
+enum vmscan_throttle_state {
+ VMSCAN_THROTTLE_WRITEBACK,
+ VMSCAN_THROTTLE_ISOLATED,
+ VMSCAN_THROTTLE_NOPROGRESS,
+ NR_VMSCAN_THROTTLE,
+};
+
#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
@@ -841,6 +849,13 @@ typedef struct pglist_data {
int node_id;
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
+
+ /* workqueues for throttling reclaim for different reasons. */
+ wait_queue_head_t reclaim_wait[NR_VMSCAN_THROTTLE];
+
+ atomic_t nr_writeback_throttled;/* nr of writeback-throttled tasks */
+ unsigned long nr_reclaim_start; /* nr pages written while throttled
+ * when throttling started. */
struct task_struct *kswapd; /* Protected by
mem_hotplug_begin/end() */
int kswapd_order;
@@ -1220,6 +1235,28 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
#define for_each_zone_zonelist(zone, z, zlist, highidx) \
for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
+/* Whether the 'nodes' are all movable nodes */
+static inline bool movable_only_nodes(nodemask_t *nodes)
+{
+ struct zonelist *zonelist;
+ struct zoneref *z;
+ int nid;
+
+ if (nodes_empty(*nodes))
+ return false;
+
+ /*
+ * We can chose arbitrary node from the nodemask to get a
+ * zonelist as they are interlinked. We just need to find
+ * at least one zone that can satisfy kernel allocations.
+ */
+ nid = first_node(*nodes);
+ zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
+ z = first_zones_zonelist(zonelist, ZONE_NORMAL, nodes);
+ return (!z->zone) ? true : false;
+}
+
+
#ifdef CONFIG_SPARSEMEM
#include <asm/sparsemem.h>
#endif
@@ -1481,7 +1518,7 @@ static inline int pfn_valid(unsigned long pfn)
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
return 0;
- ms = __nr_to_section(pfn_to_section_nr(pfn));
+ ms = __pfn_to_section(pfn);
if (!valid_section(ms))
return 0;
/*
@@ -1496,7 +1533,7 @@ static inline int pfn_in_present_section(unsigned long pfn)
{
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
return 0;
- return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
+ return present_section(__pfn_to_section(pfn));
}
static inline unsigned long next_present_section_nr(unsigned long section_nr)
diff --git a/include/linux/node.h b/include/linux/node.h
index 8e5a29897936..bb21fd631b16 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -85,7 +85,7 @@ struct node {
struct device dev;
struct list_head access_list;
-#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS)
+#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_HUGETLBFS)
struct work_struct node_work;
#endif
#ifdef CONFIG_HMEM_REPORTING
@@ -98,7 +98,7 @@ struct memory_block;
extern struct node *node_devices[];
typedef void (*node_registration_func_t)(struct node *);
-#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA)
+#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_NUMA)
void link_mem_sections(int nid, unsigned long start_pfn,
unsigned long end_pfn,
enum meminit_context context);
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 981341a3c3c4..52ec4b5e5615 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -245,7 +245,7 @@ static __always_inline int PageCompound(struct page *page)
#define PAGE_POISON_PATTERN -1l
static inline int PagePoisoned(const struct page *page)
{
- return page->flags == PAGE_POISON_PATTERN;
+ return READ_ONCE(page->flags) == PAGE_POISON_PATTERN;
}
#ifdef CONFIG_DEBUG_VM
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 5e76af742c80..98a9371133f8 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -123,7 +123,7 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
pcpu_fc_populate_pte_fn_t populate_pte_fn);
#endif
-extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
+extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align) __alloc_size(1);
extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr);
extern bool is_kernel_percpu_address(unsigned long addr);
@@ -131,8 +131,8 @@ extern bool is_kernel_percpu_address(unsigned long addr);
extern void __init setup_per_cpu_areas(void);
#endif
-extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp);
-extern void __percpu *__alloc_percpu(size_t size, size_t align);
+extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) __alloc_size(1);
+extern void __percpu *__alloc_percpu(size_t size, size_t align) __alloc_size(1);
extern void free_percpu(void __percpu *__pdata);
extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 083f3ce550bc..181045148b06 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -142,8 +142,6 @@ struct mem_cgroup;
void __init kmem_cache_init(void);
bool slab_is_available(void);
-extern bool usercopy_fallback;
-
struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
unsigned int align, slab_flags_t flags,
void (*ctor)(void *));
@@ -152,8 +150,8 @@ struct kmem_cache *kmem_cache_create_usercopy(const char *name,
slab_flags_t flags,
unsigned int useroffset, unsigned int usersize,
void (*ctor)(void *));
-void kmem_cache_destroy(struct kmem_cache *);
-int kmem_cache_shrink(struct kmem_cache *);
+void kmem_cache_destroy(struct kmem_cache *s);
+int kmem_cache_shrink(struct kmem_cache *s);
/*
* Please use this macro to create slab caches. Simply specify the
@@ -181,11 +179,11 @@ int kmem_cache_shrink(struct kmem_cache *);
/*
* Common kmalloc functions provided by all allocators
*/
-void * __must_check krealloc(const void *, size_t, gfp_t);
-void kfree(const void *);
-void kfree_sensitive(const void *);
-size_t __ksize(const void *);
-size_t ksize(const void *);
+void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __alloc_size(2);
+void kfree(const void *objp);
+void kfree_sensitive(const void *objp);
+size_t __ksize(const void *objp);
+size_t ksize(const void *objp);
#ifdef CONFIG_PRINTK
bool kmem_valid_obj(void *object);
void kmem_dump_obj(void *object);
@@ -425,9 +423,9 @@ static __always_inline unsigned int __kmalloc_index(size_t size,
#define kmalloc_index(s) __kmalloc_index(s, true)
#endif /* !CONFIG_SLOB */
-void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
-void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
-void kmem_cache_free(struct kmem_cache *, void *);
+void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
+void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_slab_alignment __malloc;
+void kmem_cache_free(struct kmem_cache *s, void *objp);
/*
* Bulk allocation and freeing operations. These are accelerated in an
@@ -436,8 +434,8 @@ void kmem_cache_free(struct kmem_cache *, void *);
*
* Note that interrupts must be enabled when calling these functions.
*/
-void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
-int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
+void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
+int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
/*
* Caller must not use kfree_bulk() on memory not originally allocated
@@ -449,10 +447,12 @@ static __always_inline void kfree_bulk(size_t size, void **p)
}
#ifdef CONFIG_NUMA
-void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
-void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
+void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
+ __alloc_size(1);
+void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
+ __malloc;
#else
-static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
+static __always_inline __alloc_size(1) void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
return __kmalloc(size, flags);
}
@@ -464,25 +464,24 @@ static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t f
#endif
#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
+extern void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
+ __assume_slab_alignment __alloc_size(3);
#ifdef CONFIG_NUMA
-extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
- gfp_t gfpflags,
- int node, size_t size) __assume_slab_alignment __malloc;
+extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
+ int node, size_t size) __assume_slab_alignment
+ __alloc_size(4);
#else
-static __always_inline void *
-kmem_cache_alloc_node_trace(struct kmem_cache *s,
- gfp_t gfpflags,
- int node, size_t size)
+static __always_inline __alloc_size(4) void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
+ gfp_t gfpflags, int node, size_t size)
{
return kmem_cache_alloc_trace(s, gfpflags, size);
}
#endif /* CONFIG_NUMA */
#else /* CONFIG_TRACING */
-static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
- gfp_t flags, size_t size)
+static __always_inline __alloc_size(3) void *kmem_cache_alloc_trace(struct kmem_cache *s,
+ gfp_t flags, size_t size)
{
void *ret = kmem_cache_alloc(s, flags);
@@ -490,10 +489,8 @@ static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
return ret;
}
-static __always_inline void *
-kmem_cache_alloc_node_trace(struct kmem_cache *s,
- gfp_t gfpflags,
- int node, size_t size)
+static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
+ int node, size_t size)
{
void *ret = kmem_cache_alloc_node(s, gfpflags, node);
@@ -502,19 +499,21 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
}
#endif /* CONFIG_TRACING */
-extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
+extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment
+ __alloc_size(1);
#ifdef CONFIG_TRACING
-extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
+extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
+ __assume_page_alignment __alloc_size(1);
#else
-static __always_inline void *
-kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
+static __always_inline __alloc_size(1) void *kmalloc_order_trace(size_t size, gfp_t flags,
+ unsigned int order)
{
return kmalloc_order(size, flags, order);
}
#endif
-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
+static __always_inline __alloc_size(1) void *kmalloc_large(size_t size, gfp_t flags)
{
unsigned int order = get_order(size);
return kmalloc_order_trace(size, flags, order);
@@ -574,7 +573,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
* Try really hard to succeed the allocation but fail
* eventually.
*/
-static __always_inline void *kmalloc(size_t size, gfp_t flags)
+static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
{
if (__builtin_constant_p(size)) {
#ifndef CONFIG_SLOB
@@ -596,7 +595,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
return __kmalloc(size, flags);
}
-static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
+static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
{
#ifndef CONFIG_SLOB
if (__builtin_constant_p(size) &&
@@ -620,7 +619,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
* @size: element size.
* @flags: the type of memory to allocate (see kmalloc).
*/
-static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
+static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_t flags)
{
size_t bytes;
@@ -638,8 +637,10 @@ static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
* @new_size: new size of a single member of the array
* @flags: the type of memory to allocate (see kmalloc)
*/
-static __must_check inline void *
-krealloc_array(void *p, size_t new_n, size_t new_size, gfp_t flags)
+static inline __alloc_size(2, 3) void * __must_check krealloc_array(void *p,
+ size_t new_n,
+ size_t new_size,
+ gfp_t flags)
{
size_t bytes;
@@ -655,7 +656,7 @@ krealloc_array(void *p, size_t new_n, size_t new_size, gfp_t flags)
* @size: element size.
* @flags: the type of memory to allocate (see kmalloc).
*/
-static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
+static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flags)
{
return kmalloc_array(n, size, flags | __GFP_ZERO);
}
@@ -668,12 +669,13 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
* allocator where we care about the real place the memory allocation
* request comes from.
*/
-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
+extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
+ __alloc_size(1);
#define kmalloc_track_caller(size, flags) \
__kmalloc_track_caller(size, flags, _RET_IP_)
-static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
- int node)
+static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
+ int node)
{
size_t bytes;
@@ -684,14 +686,15 @@ static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
return __kmalloc_node(bytes, flags, node);
}
-static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
+static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
{
return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
}
#ifdef CONFIG_NUMA
-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
+extern void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
+ unsigned long caller) __alloc_size(1);
#define kmalloc_node_track_caller(size, flags, node) \
__kmalloc_node_track_caller(size, flags, node, \
_RET_IP_)
@@ -716,7 +719,7 @@ static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
* @size: how many bytes of memory are required.
* @flags: the type of memory to allocate (see kmalloc).
*/
-static inline void *kzalloc(size_t size, gfp_t flags)
+static inline __alloc_size(1) void *kzalloc(size_t size, gfp_t flags)
{
return kmalloc(size, flags | __GFP_ZERO);
}
@@ -727,11 +730,45 @@ static inline void *kzalloc(size_t size, gfp_t flags)
* @flags: the type of memory to allocate (see kmalloc).
* @node: memory node from which to allocate
*/
-static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
+static inline __alloc_size(1) void *kzalloc_node(size_t size, gfp_t flags, int node)
{
return kmalloc_node(size, flags | __GFP_ZERO, node);
}
+extern void *kvmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
+static inline __alloc_size(1) void *kvmalloc(size_t size, gfp_t flags)
+{
+ return kvmalloc_node(size, flags, NUMA_NO_NODE);
+}
+static inline __alloc_size(1) void *kvzalloc_node(size_t size, gfp_t flags, int node)
+{
+ return kvmalloc_node(size, flags | __GFP_ZERO, node);
+}
+static inline __alloc_size(1) void *kvzalloc(size_t size, gfp_t flags)
+{
+ return kvmalloc(size, flags | __GFP_ZERO);
+}
+
+static inline __alloc_size(1, 2) void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
+{
+ size_t bytes;
+
+ if (unlikely(check_mul_overflow(n, size, &bytes)))
+ return NULL;
+
+ return kvmalloc(bytes, flags);
+}
+
+static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t flags)
+{
+ return kvmalloc_array(n, size, flags | __GFP_ZERO);
+}
+
+extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
+ __alloc_size(3);
+extern void kvfree(const void *addr);
+extern void kvfree_sensitive(const void *addr, size_t len);
+
unsigned int kmem_cache_size(struct kmem_cache *s);
void __init kmem_cache_init_late(void);
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 85499f0586b0..0fa751b946fa 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -99,6 +99,8 @@ struct kmem_cache {
#ifdef CONFIG_SLUB_CPU_PARTIAL
/* Number of per cpu partial objects to keep around */
unsigned int cpu_partial;
+ /* Number of per cpu partial pages to keep around */
+ unsigned int cpu_partial_pages;
#endif
struct kmem_cache_order_objects oo;
@@ -141,17 +143,6 @@ struct kmem_cache {
struct kmem_cache_node *node[MAX_NUMNODES];
};
-#ifdef CONFIG_SLUB_CPU_PARTIAL
-#define slub_cpu_partial(s) ((s)->cpu_partial)
-#define slub_set_cpu_partial(s, n) \
-({ \
- slub_cpu_partial(s) = (n); \
-})
-#else
-#define slub_cpu_partial(s) (0)
-#define slub_set_cpu_partial(s, n)
-#endif /* CONFIG_SLUB_CPU_PARTIAL */
-
#ifdef CONFIG_SYSFS
#define SLAB_SUPPORTS_SYSFS
void sysfs_slab_unlink(struct kmem_cache *);
diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
index 6bb4bc1a5f54..d29860966bc9 100644
--- a/include/linux/stackdepot.h
+++ b/include/linux/stackdepot.h
@@ -11,16 +11,20 @@
#ifndef _LINUX_STACKDEPOT_H
#define _LINUX_STACKDEPOT_H
+#include <linux/gfp.h>
+
typedef u32 depot_stack_handle_t;
+depot_stack_handle_t __stack_depot_save(unsigned long *entries,
+ unsigned int nr_entries,
+ gfp_t gfp_flags, bool can_alloc);
+
depot_stack_handle_t stack_depot_save(unsigned long *entries,
unsigned int nr_entries, gfp_t gfp_flags);
unsigned int stack_depot_fetch(depot_stack_handle_t handle,
unsigned long **entries);
-unsigned int filter_irq_stacks(unsigned long *entries, unsigned int nr_entries);
-
#ifdef CONFIG_STACKDEPOT
int stack_depot_init(void);
#else
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index 9edecb494e9e..bef158815e83 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -21,6 +21,7 @@ unsigned int stack_trace_save_tsk(struct task_struct *task,
unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store,
unsigned int size, unsigned int skipnr);
unsigned int stack_trace_save_user(unsigned long *store, unsigned int size);
+unsigned int filter_irq_stacks(unsigned long *entries, unsigned int nr_entries);
/* Internal interfaces. Do not use in generic code */
#ifdef CONFIG_ARCH_STACKWALK
diff --git a/include/linux/swap.h b/include/linux/swap.h
index cdf0957a88a4..d1ea44b31f19 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -341,7 +341,6 @@ void workingset_update_node(struct xa_node *node);
/* linux/mm/page_alloc.c */
extern unsigned long totalreserve_pages;
-extern unsigned long nr_free_buffer_pages(void);
/* Definition of global_zone_page_state not available yet */
#define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 671d402c3778..6e022cc712e6 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -22,7 +22,7 @@ struct notifier_block; /* in notifier.h */
#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
#define VM_DMA_COHERENT 0x00000010 /* dma_alloc_coherent */
#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
-#define VM_NO_GUARD 0x00000040 /* don't add guard page */
+#define VM_NO_GUARD 0x00000040 /* ***DANGEROUS*** don't add guard page */
#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
#define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */
#define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */
@@ -136,21 +136,21 @@ static inline void vmalloc_init(void)
static inline unsigned long vmalloc_nr_pages(void) { return 0; }
#endif
-extern void *vmalloc(unsigned long size);
-extern void *vzalloc(unsigned long size);
-extern void *vmalloc_user(unsigned long size);
-extern void *vmalloc_node(unsigned long size, int node);
-extern void *vzalloc_node(unsigned long size, int node);
-extern void *vmalloc_32(unsigned long size);
-extern void *vmalloc_32_user(unsigned long size);
-extern void *__vmalloc(unsigned long size, gfp_t gfp_mask);
+extern void *vmalloc(unsigned long size) __alloc_size(1);
+extern void *vzalloc(unsigned long size) __alloc_size(1);
+extern void *vmalloc_user(unsigned long size) __alloc_size(1);
+extern void *vmalloc_node(unsigned long size, int node) __alloc_size(1);
+extern void *vzalloc_node(unsigned long size, int node) __alloc_size(1);
+extern void *vmalloc_32(unsigned long size) __alloc_size(1);
+extern void *vmalloc_32_user(unsigned long size) __alloc_size(1);
+extern void *__vmalloc(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask,
pgprot_t prot, unsigned long vm_flags, int node,
- const void *caller);
+ const void *caller) __alloc_size(1);
void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
- int node, const void *caller);
-void *vmalloc_no_huge(unsigned long size);
+ int node, const void *caller) __alloc_size(1);
+void *vmalloc_no_huge(unsigned long size) __alloc_size(1);
extern void vfree(const void *addr);
extern void vfree_atomic(const void *addr);
diff --git a/include/trace/events/mmap_lock.h b/include/trace/events/mmap_lock.h
index 0abff67b96f0..14db8044c1ff 100644
--- a/include/trace/events/mmap_lock.h
+++ b/include/trace/events/mmap_lock.h
@@ -13,7 +13,7 @@ struct mm_struct;
extern int trace_mmap_lock_reg(void);
extern void trace_mmap_lock_unreg(void);
-TRACE_EVENT_FN(mmap_lock_start_locking,
+DECLARE_EVENT_CLASS(mmap_lock,
TP_PROTO(struct mm_struct *mm, const char *memcg_path, bool write),
@@ -32,15 +32,23 @@ TRACE_EVENT_FN(mmap_lock_start_locking,
),
TP_printk(
- "mm=%p memcg_path=%s write=%s\n",
+ "mm=%p memcg_path=%s write=%s",
__entry->mm,
__get_str(memcg_path),
__entry->write ? "true" : "false"
- ),
-
- trace_mmap_lock_reg, trace_mmap_lock_unreg
+ )
);
+#define DEFINE_MMAP_LOCK_EVENT(name) \
+ DEFINE_EVENT_FN(mmap_lock, name, \
+ TP_PROTO(struct mm_struct *mm, const char *memcg_path, \
+ bool write), \
+ TP_ARGS(mm, memcg_path, write), \
+ trace_mmap_lock_reg, trace_mmap_lock_unreg)
+
+DEFINE_MMAP_LOCK_EVENT(mmap_lock_start_locking);
+DEFINE_MMAP_LOCK_EVENT(mmap_lock_released);
+
TRACE_EVENT_FN(mmap_lock_acquire_returned,
TP_PROTO(struct mm_struct *mm, const char *memcg_path, bool write,
@@ -63,7 +71,7 @@ TRACE_EVENT_FN(mmap_lock_acquire_returned,
),
TP_printk(
- "mm=%p memcg_path=%s write=%s success=%s\n",
+ "mm=%p memcg_path=%s write=%s success=%s",
__entry->mm,
__get_str(memcg_path),
__entry->write ? "true" : "false",
@@ -73,34 +81,6 @@ TRACE_EVENT_FN(mmap_lock_acquire_returned,
trace_mmap_lock_reg, trace_mmap_lock_unreg
);
-TRACE_EVENT_FN(mmap_lock_released,
-
- TP_PROTO(struct mm_struct *mm, const char *memcg_path, bool write),
-
- TP_ARGS(mm, memcg_path, write),
-
- TP_STRUCT__entry(
- __field(struct mm_struct *, mm)
- __string(memcg_path, memcg_path)
- __field(bool, write)
- ),
-
- TP_fast_assign(
- __entry->mm = mm;
- __assign_str(memcg_path, memcg_path);
- __entry->write = write;
- ),
-
- TP_printk(
- "mm=%p memcg_path=%s write=%s\n",
- __entry->mm,
- __get_str(memcg_path),
- __entry->write ? "true" : "false"
- ),
-
- trace_mmap_lock_reg, trace_mmap_lock_unreg
-);
-
#endif /* _TRACE_MMAP_LOCK_H */
/* This part must be outside protection */
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index 88faf2400ec2..f25a6149d3ba 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -27,6 +27,18 @@
{RECLAIM_WB_ASYNC, "RECLAIM_WB_ASYNC"} \
) : "RECLAIM_WB_NONE"
+#define _VMSCAN_THROTTLE_WRITEBACK (1 << VMSCAN_THROTTLE_WRITEBACK)
+#define _VMSCAN_THROTTLE_ISOLATED (1 << VMSCAN_THROTTLE_ISOLATED)
+#define _VMSCAN_THROTTLE_NOPROGRESS (1 << VMSCAN_THROTTLE_NOPROGRESS)
+
+#define show_throttle_flags(flags) \
+ (flags) ? __print_flags(flags, "|", \
+ {_VMSCAN_THROTTLE_WRITEBACK, "VMSCAN_THROTTLE_WRITEBACK"}, \
+ {_VMSCAN_THROTTLE_ISOLATED, "VMSCAN_THROTTLE_ISOLATED"}, \
+ {_VMSCAN_THROTTLE_NOPROGRESS, "VMSCAN_THROTTLE_NOPROGRESS"} \
+ ) : "VMSCAN_THROTTLE_NONE"
+
+
#define trace_reclaim_flags(file) ( \
(file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \
(RECLAIM_WB_ASYNC) \
@@ -454,6 +466,32 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_node_reclaim_end,
TP_ARGS(nr_reclaimed)
);
+TRACE_EVENT(mm_vmscan_throttled,
+
+ TP_PROTO(int nid, int usec_timeout, int usec_delayed, int reason),
+
+ TP_ARGS(nid, usec_timeout, usec_delayed, reason),
+
+ TP_STRUCT__entry(
+ __field(int, nid)
+ __field(int, usec_timeout)
+ __field(int, usec_delayed)
+ __field(int, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->nid = nid;
+ __entry->usec_timeout = usec_timeout;
+ __entry->usec_delayed = usec_delayed;
+ __entry->reason = 1U << reason;
+ ),
+
+ TP_printk("nid=%d usec_timeout=%d usect_delayed=%d reason=%s",
+ __entry->nid,
+ __entry->usec_timeout,
+ __entry->usec_delayed,
+ show_throttle_flags(__entry->reason))
+);
#endif /* _TRACE_VMSCAN_H */
/* This part must be outside protection */
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 7dccb66474f7..a345b1e12daf 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -763,13 +763,6 @@ DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
TP_ARGS(usec_timeout, usec_delayed)
);
-DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
-
- TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
-
- TP_ARGS(usec_timeout, usec_delayed)
-);
-
DECLARE_EVENT_CLASS(writeback_single_inode_template,
TP_PROTO(struct inode *inode,