diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/kfence/core.c | 4 | ||||
-rw-r--r-- | mm/memory-failure.c | 13 | ||||
-rw-r--r-- | mm/memory.c | 18 | ||||
-rw-r--r-- | mm/page_idle.c | 2 | ||||
-rw-r--r-- | mm/slab.h | 28 | ||||
-rw-r--r-- | mm/slub.c | 80 | ||||
-rw-r--r-- | mm/vmscan.c | 8 |
7 files changed, 86 insertions, 67 deletions
diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 102048821c22..0ed3be100963 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -605,8 +605,8 @@ static unsigned long kfence_init_pool(void) pages = virt_to_page(__kfence_pool); /* - * Set up object pages: they must have PG_slab set, to avoid freeing - * these as real pages. + * Set up object pages: they must have PGTY_slab set to avoid freeing + * them as real pages. * * We also want to avoid inserting kfence_free() in the kfree() * fast-path in SLUB, and therefore need to ensure kfree() correctly diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 225dddff091d..033bb5fe8515 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -2507,19 +2507,6 @@ static void memory_failure_work_func(struct work_struct *work) } } -/* - * Process memory_failure work queued on the specified CPU. - * Used to avoid return-to-userspace racing with the memory_failure workqueue. - */ -void memory_failure_queue_kick(int cpu) -{ - struct memory_failure_cpu *mf_cpu; - - mf_cpu = &per_cpu(memory_failure_cpu, cpu); - cancel_work_sync(&mf_cpu->work); - memory_failure_work_func(&mf_cpu->work); -} - static int __init memory_failure_init(void) { struct memory_failure_cpu *mf_cpu; diff --git a/mm/memory.c b/mm/memory.c index b0cda5aab398..f57764b117f6 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -125,6 +125,24 @@ int randomize_va_space __read_mostly = 2; #endif +static const struct ctl_table mmu_sysctl_table[] = { + { + .procname = "randomize_va_space", + .data = &randomize_va_space, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, +}; + +static int __init init_mm_sysctl(void) +{ + register_sysctl_init("kernel", mmu_sysctl_table); + return 0; +} + +subsys_initcall(init_mm_sysctl); + #ifndef arch_wants_old_prefaulted_pte static inline bool arch_wants_old_prefaulted_pte(void) { diff --git a/mm/page_idle.c b/mm/page_idle.c index 408aaf29a3ea..a82b340dc204 100644 --- a/mm/page_idle.c +++ b/mm/page_idle.c @@ -208,7 +208,7 @@ static const struct bin_attribute *const page_idle_bin_attrs[] = { }; static const struct attribute_group page_idle_attr_group = { - .bin_attrs_new = page_idle_bin_attrs, + .bin_attrs = page_idle_bin_attrs, .name = "page_idle", }; diff --git a/mm/slab.h b/mm/slab.h index 05a21dc796e0..248b34c839b7 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -50,7 +50,7 @@ typedef union { /* Reuses the bits in struct page */ struct slab { - unsigned long __page_flags; + unsigned long flags; struct kmem_cache *slab_cache; union { @@ -99,7 +99,7 @@ struct slab { #define SLAB_MATCH(pg, sl) \ static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl)) -SLAB_MATCH(flags, __page_flags); +SLAB_MATCH(flags, flags); SLAB_MATCH(compound_head, slab_cache); /* Ensure bit 0 is clear */ SLAB_MATCH(_refcount, __page_refcount); #ifdef CONFIG_MEMCG @@ -167,30 +167,6 @@ static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t) */ #define slab_page(s) folio_page(slab_folio(s), 0) -/* - * If network-based swap is enabled, sl*b must keep track of whether pages - * were allocated from pfmemalloc reserves. - */ -static inline bool slab_test_pfmemalloc(const struct slab *slab) -{ - return folio_test_active(slab_folio(slab)); -} - -static inline void slab_set_pfmemalloc(struct slab *slab) -{ - folio_set_active(slab_folio(slab)); -} - -static inline void slab_clear_pfmemalloc(struct slab *slab) -{ - folio_clear_active(slab_folio(slab)); -} - -static inline void __slab_clear_pfmemalloc(struct slab *slab) -{ - __folio_clear_active(slab_folio(slab)); -} - static inline void *slab_address(const struct slab *slab) { return folio_address(slab_folio(slab)); diff --git a/mm/slub.c b/mm/slub.c index 31e11ef256f9..70327dc70ee5 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -91,14 +91,14 @@ * The partially empty slabs cached on the CPU partial list are used * for performance reasons, which speeds up the allocation process. * These slabs are not frozen, but are also exempt from list management, - * by clearing the PG_workingset flag when moving out of the node + * by clearing the SL_partial flag when moving out of the node * partial list. Please see __slab_free() for more details. * * To sum up, the current scheme is: - * - node partial slab: PG_Workingset && !frozen - * - cpu partial slab: !PG_Workingset && !frozen - * - cpu slab: !PG_Workingset && frozen - * - full slab: !PG_Workingset && !frozen + * - node partial slab: SL_partial && !frozen + * - cpu partial slab: !SL_partial && !frozen + * - cpu slab: !SL_partial && frozen + * - full slab: !SL_partial && !frozen * * list_lock * @@ -183,6 +183,22 @@ * the fast path and disables lockless freelists. */ +/** + * enum slab_flags - How the slab flags bits are used. + * @SL_locked: Is locked with slab_lock() + * @SL_partial: On the per-node partial list + * @SL_pfmemalloc: Was allocated from PF_MEMALLOC reserves + * + * The slab flags share space with the page flags but some bits have + * different interpretations. The high bits are used for information + * like zone/node/section. + */ +enum slab_flags { + SL_locked = PG_locked, + SL_partial = PG_workingset, /* Historical reasons for this bit */ + SL_pfmemalloc = PG_active, /* Historical reasons for this bit */ +}; + /* * We could simply use migrate_disable()/enable() but as long as it's a * function call even on !PREEMPT_RT, use inline preempt_disable() there. @@ -635,16 +651,35 @@ static inline unsigned int slub_get_cpu_partial(struct kmem_cache *s) #endif /* CONFIG_SLUB_CPU_PARTIAL */ /* + * If network-based swap is enabled, slub must keep track of whether memory + * were allocated from pfmemalloc reserves. + */ +static inline bool slab_test_pfmemalloc(const struct slab *slab) +{ + return test_bit(SL_pfmemalloc, &slab->flags); +} + +static inline void slab_set_pfmemalloc(struct slab *slab) +{ + set_bit(SL_pfmemalloc, &slab->flags); +} + +static inline void __slab_clear_pfmemalloc(struct slab *slab) +{ + __clear_bit(SL_pfmemalloc, &slab->flags); +} + +/* * Per slab locking using the pagelock */ static __always_inline void slab_lock(struct slab *slab) { - bit_spin_lock(PG_locked, &slab->__page_flags); + bit_spin_lock(SL_locked, &slab->flags); } static __always_inline void slab_unlock(struct slab *slab) { - bit_spin_unlock(PG_locked, &slab->__page_flags); + bit_spin_unlock(SL_locked, &slab->flags); } static inline bool @@ -1010,7 +1045,7 @@ static void print_slab_info(const struct slab *slab) { pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n", slab, slab->objects, slab->inuse, slab->freelist, - &slab->__page_flags); + &slab->flags); } void skip_orig_size_check(struct kmem_cache *s, const void *object) @@ -2717,23 +2752,19 @@ static void discard_slab(struct kmem_cache *s, struct slab *slab) free_slab(s, slab); } -/* - * SLUB reuses PG_workingset bit to keep track of whether it's on - * the per-node partial list. - */ static inline bool slab_test_node_partial(const struct slab *slab) { - return folio_test_workingset(slab_folio(slab)); + return test_bit(SL_partial, &slab->flags); } static inline void slab_set_node_partial(struct slab *slab) { - set_bit(PG_workingset, folio_flags(slab_folio(slab), 0)); + set_bit(SL_partial, &slab->flags); } static inline void slab_clear_node_partial(struct slab *slab) { - clear_bit(PG_workingset, folio_flags(slab_folio(slab), 0)); + clear_bit(SL_partial, &slab->flags); } /* @@ -4269,7 +4300,12 @@ static void *___kmalloc_large_node(size_t size, gfp_t flags, int node) flags = kmalloc_fix_flags(flags); flags |= __GFP_COMP; - folio = (struct folio *)alloc_pages_node_noprof(node, flags, order); + + if (node == NUMA_NO_NODE) + folio = (struct folio *)alloc_frozen_pages_noprof(flags, order); + else + folio = (struct folio *)__alloc_frozen_pages_noprof(flags, order, node, NULL); + if (folio) { ptr = folio_address(folio); lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, @@ -4765,7 +4801,7 @@ static void free_large_kmalloc(struct folio *folio, void *object) lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, -(PAGE_SIZE << order)); __folio_clear_large_kmalloc(folio); - folio_put(folio); + free_frozen_pages(&folio->page, order); } /* @@ -4930,12 +4966,12 @@ alloc_new: * When slub_debug_orig_size() is off, krealloc() only knows about the bucket * size of an allocation (but not the exact size it was allocated with) and * hence implements the following semantics for shrinking and growing buffers - * with __GFP_ZERO. + * with __GFP_ZERO:: * - * new bucket - * 0 size size - * |--------|----------------| - * | keep | zero | + * new bucket + * 0 size size + * |--------|----------------| + * | keep | zero | * * Otherwise, the original allocation size 'orig_size' could be used to * precisely clear the requested size, and the new size will also be stored diff --git a/mm/vmscan.c b/mm/vmscan.c index 424412680cfc..72d28aa3315b 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -5428,7 +5428,7 @@ static void lru_gen_seq_show_full(struct seq_file *m, struct lruvec *lruvec, static int lru_gen_seq_show(struct seq_file *m, void *v) { unsigned long seq; - bool full = !debugfs_real_fops(m->file)->write; + bool full = debugfs_get_aux_num(m->file); struct lruvec *lruvec = v; struct lru_gen_folio *lrugen = &lruvec->lrugen; int nid = lruvec_pgdat(lruvec)->node_id; @@ -5764,8 +5764,10 @@ static int __init init_lru_gen(void) if (sysfs_create_group(mm_kobj, &lru_gen_attr_group)) pr_err("lru_gen: failed to create sysfs group\n"); - debugfs_create_file("lru_gen", 0644, NULL, NULL, &lru_gen_rw_fops); - debugfs_create_file("lru_gen_full", 0444, NULL, NULL, &lru_gen_ro_fops); + debugfs_create_file_aux_num("lru_gen", 0644, NULL, NULL, 1, + &lru_gen_rw_fops); + debugfs_create_file_aux_num("lru_gen_full", 0444, NULL, NULL, 0, + &lru_gen_ro_fops); return 0; }; |