summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/damon/vaddr.c8
-rw-r--r--mm/huge_memory.c15
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/memblock.c4
-rw-r--r--mm/memcontrol.c7
-rw-r--r--mm/migrate.c23
-rw-r--r--mm/slub.c65
-rw-r--r--mm/util.c3
8 files changed, 78 insertions, 49 deletions
diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index 8c048f9b129e..7e834467b2d8 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -328,10 +328,8 @@ static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr,
}
pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
- if (!pte) {
- walk->action = ACTION_AGAIN;
+ if (!pte)
return 0;
- }
if (!pte_present(ptep_get(pte)))
goto out;
damon_ptep_mkold(pte, walk->vma, addr);
@@ -481,10 +479,8 @@ regular_page:
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
- if (!pte) {
- walk->action = ACTION_AGAIN;
+ if (!pte)
return 0;
- }
ptent = ptep_get(pte);
if (!pte_present(ptent))
goto out;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 5acca24bbabb..1b81680b4225 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -4104,32 +4104,23 @@ static unsigned long deferred_split_count(struct shrinker *shrink,
static bool thp_underused(struct folio *folio)
{
int num_zero_pages = 0, num_filled_pages = 0;
- void *kaddr;
int i;
if (khugepaged_max_ptes_none == HPAGE_PMD_NR - 1)
return false;
for (i = 0; i < folio_nr_pages(folio); i++) {
- kaddr = kmap_local_folio(folio, i * PAGE_SIZE);
- if (!memchr_inv(kaddr, 0, PAGE_SIZE)) {
- num_zero_pages++;
- if (num_zero_pages > khugepaged_max_ptes_none) {
- kunmap_local(kaddr);
+ if (pages_identical(folio_page(folio, i), ZERO_PAGE(0))) {
+ if (++num_zero_pages > khugepaged_max_ptes_none)
return true;
- }
} else {
/*
* Another path for early exit once the number
* of non-zero filled pages exceeds threshold.
*/
- num_filled_pages++;
- if (num_filled_pages >= HPAGE_PMD_NR - khugepaged_max_ptes_none) {
- kunmap_local(kaddr);
+ if (++num_filled_pages >= HPAGE_PMD_NR - khugepaged_max_ptes_none)
return false;
- }
}
- kunmap_local(kaddr);
}
return false;
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 6cac826cb61f..795ee393eac0 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -7222,6 +7222,8 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
psize);
}
spin_unlock(ptl);
+
+ cond_resched();
}
/*
* Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
diff --git a/mm/memblock.c b/mm/memblock.c
index 120a501a887a..e23e16618e9b 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -2452,8 +2452,10 @@ static int reserve_mem_kho_finalize(struct kho_serialization *ser)
for (i = 0; i < reserved_mem_count; i++) {
struct reserve_mem_table *map = &reserved_mem_table[i];
+ struct page *page = phys_to_page(map->start);
+ unsigned int nr_pages = map->size >> PAGE_SHIFT;
- err |= kho_preserve_phys(map->start, map->size);
+ err |= kho_preserve_pages(page, nr_pages);
}
err |= kho_preserve_folio(page_folio(kho_fdt));
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index e090f29eb03b..4deda33625f4 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2307,12 +2307,13 @@ static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
bool drained = false;
bool raised_max_event = false;
unsigned long pflags;
+ bool allow_spinning = gfpflags_allow_spinning(gfp_mask);
retry:
if (consume_stock(memcg, nr_pages))
return 0;
- if (!gfpflags_allow_spinning(gfp_mask))
+ if (!allow_spinning)
/* Avoid the refill and flush of the older stock */
batch = nr_pages;
@@ -2348,7 +2349,7 @@ retry:
if (!gfpflags_allow_blocking(gfp_mask))
goto nomem;
- memcg_memory_event(mem_over_limit, MEMCG_MAX);
+ __memcg_memory_event(mem_over_limit, MEMCG_MAX, allow_spinning);
raised_max_event = true;
psi_memstall_enter(&pflags);
@@ -2415,7 +2416,7 @@ force:
* a MEMCG_MAX event.
*/
if (!raised_max_event)
- memcg_memory_event(mem_over_limit, MEMCG_MAX);
+ __memcg_memory_event(mem_over_limit, MEMCG_MAX, allow_spinning);
/*
* The allocation either can't fail or will lead to more memory
diff --git a/mm/migrate.c b/mm/migrate.c
index aee61a980374..e3065c9edb55 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -296,19 +296,16 @@ bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
}
static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw,
- struct folio *folio,
- unsigned long idx)
+ struct folio *folio, pte_t old_pte, unsigned long idx)
{
struct page *page = folio_page(folio, idx);
- bool contains_data;
pte_t newpte;
- void *addr;
if (PageCompound(page))
return false;
VM_BUG_ON_PAGE(!PageAnon(page), page);
VM_BUG_ON_PAGE(!PageLocked(page), page);
- VM_BUG_ON_PAGE(pte_present(ptep_get(pvmw->pte)), page);
+ VM_BUG_ON_PAGE(pte_present(old_pte), page);
if (folio_test_mlocked(folio) || (pvmw->vma->vm_flags & VM_LOCKED) ||
mm_forbids_zeropage(pvmw->vma->vm_mm))
@@ -319,15 +316,17 @@ static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw,
* this subpage has been non present. If the subpage is only zero-filled
* then map it to the shared zeropage.
*/
- addr = kmap_local_page(page);
- contains_data = memchr_inv(addr, 0, PAGE_SIZE);
- kunmap_local(addr);
-
- if (contains_data)
+ if (!pages_identical(page, ZERO_PAGE(0)))
return false;
newpte = pte_mkspecial(pfn_pte(my_zero_pfn(pvmw->address),
pvmw->vma->vm_page_prot));
+
+ if (pte_swp_soft_dirty(old_pte))
+ newpte = pte_mksoft_dirty(newpte);
+ if (pte_swp_uffd_wp(old_pte))
+ newpte = pte_mkuffd_wp(newpte);
+
set_pte_at(pvmw->vma->vm_mm, pvmw->address, pvmw->pte, newpte);
dec_mm_counter(pvmw->vma->vm_mm, mm_counter(folio));
@@ -370,13 +369,13 @@ static bool remove_migration_pte(struct folio *folio,
continue;
}
#endif
+ old_pte = ptep_get(pvmw.pte);
if (rmap_walk_arg->map_unused_to_zeropage &&
- try_to_map_unused_to_zeropage(&pvmw, folio, idx))
+ try_to_map_unused_to_zeropage(&pvmw, folio, old_pte, idx))
continue;
folio_get(folio);
pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
- old_pte = ptep_get(pvmw.pte);
entry = pte_to_swp_entry(old_pte);
if (!is_migration_entry_young(entry))
diff --git a/mm/slub.c b/mm/slub.c
index 135c408e0515..b1f15598fbfd 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -504,10 +504,18 @@ static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
return s->node[node];
}
-/* Get the barn of the current cpu's memory node */
+/*
+ * Get the barn of the current cpu's closest memory node. It may not exist on
+ * systems with memoryless nodes but without CONFIG_HAVE_MEMORYLESS_NODES
+ */
static inline struct node_barn *get_barn(struct kmem_cache *s)
{
- return get_node(s, numa_mem_id())->barn;
+ struct kmem_cache_node *n = get_node(s, numa_mem_id());
+
+ if (!n)
+ return NULL;
+
+ return n->barn;
}
/*
@@ -4982,6 +4990,10 @@ __pcs_replace_empty_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs,
}
barn = get_barn(s);
+ if (!barn) {
+ local_unlock(&s->cpu_sheaves->lock);
+ return NULL;
+ }
full = barn_replace_empty_sheaf(barn, pcs->main);
@@ -5153,13 +5165,20 @@ next_batch:
if (unlikely(pcs->main->size == 0)) {
struct slab_sheaf *full;
+ struct node_barn *barn;
if (pcs->spare && pcs->spare->size > 0) {
swap(pcs->main, pcs->spare);
goto do_alloc;
}
- full = barn_replace_empty_sheaf(get_barn(s), pcs->main);
+ barn = get_barn(s);
+ if (!barn) {
+ local_unlock(&s->cpu_sheaves->lock);
+ return allocated;
+ }
+
+ full = barn_replace_empty_sheaf(barn, pcs->main);
if (full) {
stat(s, BARN_GET);
@@ -5314,6 +5333,7 @@ kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size)
{
struct slub_percpu_sheaves *pcs;
struct slab_sheaf *sheaf = NULL;
+ struct node_barn *barn;
if (unlikely(size > s->sheaf_capacity)) {
@@ -5355,8 +5375,11 @@ kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size)
pcs->spare = NULL;
stat(s, SHEAF_PREFILL_FAST);
} else {
+ barn = get_barn(s);
+
stat(s, SHEAF_PREFILL_SLOW);
- sheaf = barn_get_full_or_empty_sheaf(get_barn(s));
+ if (barn)
+ sheaf = barn_get_full_or_empty_sheaf(barn);
if (sheaf && sheaf->size)
stat(s, BARN_GET);
else
@@ -5426,7 +5449,7 @@ void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp,
* If the barn has too many full sheaves or we fail to refill the sheaf,
* simply flush and free it.
*/
- if (data_race(barn->nr_full) >= MAX_FULL_SHEAVES ||
+ if (!barn || data_race(barn->nr_full) >= MAX_FULL_SHEAVES ||
refill_sheaf(s, sheaf, gfp)) {
sheaf_flush_unused(s, sheaf);
free_empty_sheaf(s, sheaf);
@@ -5943,10 +5966,9 @@ slab_empty:
* put the full sheaf there.
*/
static void __pcs_install_empty_sheaf(struct kmem_cache *s,
- struct slub_percpu_sheaves *pcs, struct slab_sheaf *empty)
+ struct slub_percpu_sheaves *pcs, struct slab_sheaf *empty,
+ struct node_barn *barn)
{
- struct node_barn *barn;
-
lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock));
/* This is what we expect to find if nobody interrupted us. */
@@ -5956,8 +5978,6 @@ static void __pcs_install_empty_sheaf(struct kmem_cache *s,
return;
}
- barn = get_barn(s);
-
/*
* Unlikely because if the main sheaf had space, we would have just
* freed to it. Get rid of our empty sheaf.
@@ -6002,6 +6022,11 @@ restart:
lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock));
barn = get_barn(s);
+ if (!barn) {
+ local_unlock(&s->cpu_sheaves->lock);
+ return NULL;
+ }
+
put_fail = false;
if (!pcs->spare) {
@@ -6084,7 +6109,7 @@ got_empty:
}
pcs = this_cpu_ptr(s->cpu_sheaves);
- __pcs_install_empty_sheaf(s, pcs, empty);
+ __pcs_install_empty_sheaf(s, pcs, empty, barn);
return pcs;
}
@@ -6121,8 +6146,9 @@ bool free_to_pcs(struct kmem_cache *s, void *object)
static void rcu_free_sheaf(struct rcu_head *head)
{
+ struct kmem_cache_node *n;
struct slab_sheaf *sheaf;
- struct node_barn *barn;
+ struct node_barn *barn = NULL;
struct kmem_cache *s;
sheaf = container_of(head, struct slab_sheaf, rcu_head);
@@ -6139,7 +6165,11 @@ static void rcu_free_sheaf(struct rcu_head *head)
*/
__rcu_free_sheaf_prepare(s, sheaf);
- barn = get_node(s, sheaf->node)->barn;
+ n = get_node(s, sheaf->node);
+ if (!n)
+ goto flush;
+
+ barn = n->barn;
/* due to slab_free_hook() */
if (unlikely(sheaf->size == 0))
@@ -6157,11 +6187,12 @@ static void rcu_free_sheaf(struct rcu_head *head)
return;
}
+flush:
stat(s, BARN_PUT_FAIL);
sheaf_flush_unused(s, sheaf);
empty:
- if (data_race(barn->nr_empty) < MAX_EMPTY_SHEAVES) {
+ if (barn && data_race(barn->nr_empty) < MAX_EMPTY_SHEAVES) {
barn_put_empty_sheaf(barn, sheaf);
return;
}
@@ -6191,6 +6222,10 @@ bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj)
}
barn = get_barn(s);
+ if (!barn) {
+ local_unlock(&s->cpu_sheaves->lock);
+ goto fail;
+ }
empty = barn_get_empty_sheaf(barn);
@@ -6304,6 +6339,8 @@ next_batch:
goto do_free;
barn = get_barn(s);
+ if (!barn)
+ goto no_empty;
if (!pcs->spare) {
empty = barn_get_empty_sheaf(barn);
diff --git a/mm/util.c b/mm/util.c
index 6c1d64ed0221..8989d5767528 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -566,6 +566,7 @@ unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flag, unsigned long pgoff)
{
+ loff_t off = (loff_t)pgoff << PAGE_SHIFT;
unsigned long ret;
struct mm_struct *mm = current->mm;
unsigned long populate;
@@ -573,7 +574,7 @@ unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
ret = security_mmap_file(file, prot, flag);
if (!ret)
- ret = fsnotify_mmap_perm(file, prot, pgoff >> PAGE_SHIFT, len);
+ ret = fsnotify_mmap_perm(file, prot, off, len);
if (!ret) {
if (mmap_write_lock_killable(mm))
return -EINTR;