summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--mm/memory.c9
-rw-r--r--mm/rmap.c4
-rw-r--r--mm/swapfile.c14
3 files changed, 24 insertions, 3 deletions
diff --git a/mm/memory.c b/mm/memory.c
index a4fc6e632d2c..97ddba866e43 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4341,6 +4341,15 @@ check_folio:
if (unlikely(folio != swapcache && swapcache)) {
folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE);
folio_add_lru_vma(folio, vma);
+ } else if (!folio_test_anon(folio)) {
+ /*
+ * We currently only expect small !anon folios, which are either
+ * fully exclusive or fully shared. If we ever get large folios
+ * here, we have to be careful.
+ */
+ VM_WARN_ON_ONCE(folio_test_large(folio));
+ VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
+ folio_add_new_anon_rmap(folio, vma, address, rmap_flags);
} else {
folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, address,
rmap_flags);
diff --git a/mm/rmap.c b/mm/rmap.c
index c0c99f91ade1..41012fe7a05a 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1422,7 +1422,9 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
VM_WARN_ON_FOLIO(!exclusive && !folio_test_locked(folio), folio);
VM_BUG_ON_VMA(address < vma->vm_start ||
address + (nr << PAGE_SHIFT) > vma->vm_end, vma);
- __folio_set_swapbacked(folio);
+
+ if (!folio_test_swapbacked(folio))
+ __folio_set_swapbacked(folio);
__folio_set_anon(folio, vma, address, exclusive);
if (likely(!folio_test_large(folio))) {
diff --git a/mm/swapfile.c b/mm/swapfile.c
index ae1d2700f6a3..38bdc439651a 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1908,8 +1908,18 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
if (pte_swp_exclusive(old_pte))
rmap_flags |= RMAP_EXCLUSIVE;
-
- folio_add_anon_rmap_pte(folio, page, vma, addr, rmap_flags);
+ /*
+ * We currently only expect small !anon folios, which are either
+ * fully exclusive or fully shared. If we ever get large folios
+ * here, we have to be careful.
+ */
+ if (!folio_test_anon(folio)) {
+ VM_WARN_ON_ONCE(folio_test_large(folio));
+ VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
+ folio_add_new_anon_rmap(folio, vma, addr, rmap_flags);
+ } else {
+ folio_add_anon_rmap_pte(folio, page, vma, addr, rmap_flags);
+ }
} else { /* ksm created a completely new copy */
folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
folio_add_lru_vma(folio, vma);