summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-09-23 11:51:16 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2023-09-23 11:51:16 -0700
commit85eba5f1759f9eb89273225027254ced57bd18a2 (patch)
tree8d797d02144d6a24599dfafb36f193358fb4a49c /mm
parent8565bdf8cd30cf5bf98efeb9fb53c39bf387e30c (diff)
parentfe4419801617514765974f3e796269bc512ad146 (diff)
Merge tag 'mm-hotfixes-stable-2023-09-23-10-31' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull misc fixes from Andrew Morton: "13 hotfixes, 10 of which pertain to post-6.5 issues. The other three are cc:stable" * tag 'mm-hotfixes-stable-2023-09-23-10-31' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: proc: nommu: fix empty /proc/<pid>/maps filemap: add filemap_map_order0_folio() to handle order0 folio proc: nommu: /proc/<pid>/maps: release mmap read lock mm: memcontrol: fix GFP_NOFS recursion in memory.high enforcement pidfd: prevent a kernel-doc warning argv_split: fix kernel-doc warnings scatterlist: add missing function params to kernel-doc selftests/proc: fixup proc-empty-vm test after KSM changes revert "scripts/gdb/symbols: add specific ko module load command" selftests: link libasan statically for tests with -fsanitize=address task_work: add kerneldoc annotation for 'data' argument mm: page_alloc: fix CMA and HIGHATOMIC landing on the wrong buddy list sh: mm: re-add lost __ref to ioremap_prot() to fix modpost warning
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c69
-rw-r--r--mm/memcontrol.c6
-rw-r--r--mm/page_alloc.c12
3 files changed, 57 insertions, 30 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 582f5317ff71..4ea4387053e8 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -3475,13 +3475,11 @@ skip:
*/
static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
struct folio *folio, unsigned long start,
- unsigned long addr, unsigned int nr_pages)
+ unsigned long addr, unsigned int nr_pages,
+ unsigned int *mmap_miss)
{
vm_fault_t ret = 0;
- struct vm_area_struct *vma = vmf->vma;
- struct file *file = vma->vm_file;
struct page *page = folio_page(folio, start);
- unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
unsigned int count = 0;
pte_t *old_ptep = vmf->pte;
@@ -3489,8 +3487,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
if (PageHWPoison(page + count))
goto skip;
- if (mmap_miss > 0)
- mmap_miss--;
+ (*mmap_miss)++;
/*
* NOTE: If there're PTE markers, we'll leave them to be
@@ -3525,7 +3522,35 @@ skip:
}
vmf->pte = old_ptep;
- WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss);
+
+ return ret;
+}
+
+static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf,
+ struct folio *folio, unsigned long addr,
+ unsigned int *mmap_miss)
+{
+ vm_fault_t ret = 0;
+ struct page *page = &folio->page;
+
+ if (PageHWPoison(page))
+ return ret;
+
+ (*mmap_miss)++;
+
+ /*
+ * NOTE: If there're PTE markers, we'll leave them to be
+ * handled in the specific fault path, and it'll prohibit
+ * the fault-around logic.
+ */
+ if (!pte_none(ptep_get(vmf->pte)))
+ return ret;
+
+ if (vmf->address == addr)
+ ret = VM_FAULT_NOPAGE;
+
+ set_pte_range(vmf, folio, page, 1, addr);
+ folio_ref_inc(folio);
return ret;
}
@@ -3541,7 +3566,7 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
XA_STATE(xas, &mapping->i_pages, start_pgoff);
struct folio *folio;
vm_fault_t ret = 0;
- int nr_pages = 0;
+ unsigned int nr_pages = 0, mmap_miss = 0, mmap_miss_saved;
rcu_read_lock();
folio = next_uptodate_folio(&xas, mapping, end_pgoff);
@@ -3569,25 +3594,27 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
end = folio->index + folio_nr_pages(folio) - 1;
nr_pages = min(end, end_pgoff) - xas.xa_index + 1;
- /*
- * NOTE: If there're PTE markers, we'll leave them to be
- * handled in the specific fault path, and it'll prohibit the
- * fault-around logic.
- */
- if (!pte_none(ptep_get(vmf->pte)))
- goto unlock;
-
- ret |= filemap_map_folio_range(vmf, folio,
- xas.xa_index - folio->index, addr, nr_pages);
+ if (!folio_test_large(folio))
+ ret |= filemap_map_order0_folio(vmf,
+ folio, addr, &mmap_miss);
+ else
+ ret |= filemap_map_folio_range(vmf, folio,
+ xas.xa_index - folio->index, addr,
+ nr_pages, &mmap_miss);
-unlock:
folio_unlock(folio);
folio_put(folio);
- folio = next_uptodate_folio(&xas, mapping, end_pgoff);
- } while (folio);
+ } while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);
pte_unmap_unlock(vmf->pte, vmf->ptl);
out:
rcu_read_unlock();
+
+ mmap_miss_saved = READ_ONCE(file->f_ra.mmap_miss);
+ if (mmap_miss >= mmap_miss_saved)
+ WRITE_ONCE(file->f_ra.mmap_miss, 0);
+ else
+ WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss_saved - mmap_miss);
+
return ret;
}
EXPORT_SYMBOL(filemap_map_pages);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index a4d3282493b6..d13dde2f8b56 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2555,7 +2555,7 @@ static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
* Scheduled by try_charge() to be executed from the userland return path
* and reclaims memory over the high limit.
*/
-void mem_cgroup_handle_over_high(void)
+void mem_cgroup_handle_over_high(gfp_t gfp_mask)
{
unsigned long penalty_jiffies;
unsigned long pflags;
@@ -2583,7 +2583,7 @@ retry_reclaim:
*/
nr_reclaimed = reclaim_high(memcg,
in_retry ? SWAP_CLUSTER_MAX : nr_pages,
- GFP_KERNEL);
+ gfp_mask);
/*
* memory.high is breached and reclaim is unable to keep up. Throttle
@@ -2819,7 +2819,7 @@ done_restock:
if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
!(current->flags & PF_MEMALLOC) &&
gfpflags_allow_blocking(gfp_mask)) {
- mem_cgroup_handle_over_high();
+ mem_cgroup_handle_over_high(gfp_mask);
}
return 0;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0c5be12f9336..95546f376302 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2400,7 +2400,7 @@ void free_unref_page(struct page *page, unsigned int order)
struct per_cpu_pages *pcp;
struct zone *zone;
unsigned long pfn = page_to_pfn(page);
- int migratetype;
+ int migratetype, pcpmigratetype;
if (!free_unref_page_prepare(page, pfn, order))
return;
@@ -2408,24 +2408,24 @@ void free_unref_page(struct page *page, unsigned int order)
/*
* We only track unmovable, reclaimable and movable on pcp lists.
* Place ISOLATE pages on the isolated list because they are being
- * offlined but treat HIGHATOMIC as movable pages so we can get those
- * areas back if necessary. Otherwise, we may have to free
+ * offlined but treat HIGHATOMIC and CMA as movable pages so we can
+ * get those areas back if necessary. Otherwise, we may have to free
* excessively into the page allocator
*/
- migratetype = get_pcppage_migratetype(page);
+ migratetype = pcpmigratetype = get_pcppage_migratetype(page);
if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
if (unlikely(is_migrate_isolate(migratetype))) {
free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE);
return;
}
- migratetype = MIGRATE_MOVABLE;
+ pcpmigratetype = MIGRATE_MOVABLE;
}
zone = page_zone(page);
pcp_trylock_prepare(UP_flags);
pcp = pcp_spin_trylock(zone->per_cpu_pageset);
if (pcp) {
- free_unref_page_commit(zone, pcp, page, migratetype, order);
+ free_unref_page_commit(zone, pcp, page, pcpmigratetype, order);
pcp_spin_unlock(pcp);
} else {
free_one_page(zone, page, pfn, order, migratetype, FPI_NONE);