summaryrefslogtreecommitdiff
path: root/mm/rmap.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-12-15 14:55:10 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2020-12-15 14:55:10 -0800
commit5b200f578960a9635918a0ed41be3d8dc90186bf (patch)
tree8190f2278adec6e7dfc3bcdc00abf8e9b467af5d /mm/rmap.c
parent3db1a3fa98808aa90f95ec3e0fa2fc7abf28f5c9 (diff)
parent15b447361794271f4d03c04d82276a841fe06328 (diff)
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton: "More MM work: a memcg scalability improvememt" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm/lru: revise the comments of lru_lock mm/lru: introduce relock_page_lruvec() mm/lru: replace pgdat lru_lock with lruvec lock mm/swap.c: serialize memcg changes in pagevec_lru_move_fn mm/compaction: do page isolation first in compaction mm/lru: introduce TestClearPageLRU() mm/mlock: remove __munlock_isolate_lru_page() mm/mlock: remove lru_lock on TestClearPageMlocked mm/vmscan: remove lruvec reget in move_pages_to_lru mm/lru: move lock into lru_note_cost mm/swap.c: fold vm event PGROTATED into pagevec_move_tail_fn mm/memcg: add debug checking in lock_page_memcg mm: page_idle_get_page() does not need lru_lock mm/rmap: stop store reordering issue on page->mapping mm/vmscan: remove unnecessary lruvec adding mm/thp: narrow lru locking mm/thp: simplify lru_add_page_tail() mm/thp: use head for head page in lru_add_page_tail() mm/thp: move lru_add_page_tail() to huge_memory.c
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c12
1 files changed, 9 insertions, 3 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 6657000b18d4..08c56aaf72eb 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -28,12 +28,12 @@
* hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
* anon_vma->rwsem
* mm->page_table_lock or pte_lock
- * pgdat->lru_lock (in mark_page_accessed, isolate_lru_page)
* swap_lock (in swap_duplicate, swap_info_get)
* mmlist_lock (in mmput, drain_mmlist and others)
* mapping->private_lock (in __set_page_dirty_buffers)
- * mem_cgroup_{begin,end}_page_stat (memcg->move_lock)
+ * lock_page_memcg move_lock (in __set_page_dirty_buffers)
* i_pages lock (widely used)
+ * lruvec->lru_lock (in lock_page_lruvec_irq)
* inode->i_lock (in set_page_dirty's __mark_inode_dirty)
* bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
* sb_lock (within inode_lock in fs/fs-writeback.c)
@@ -1054,8 +1054,14 @@ static void __page_set_anon_rmap(struct page *page,
if (!exclusive)
anon_vma = anon_vma->root;
+ /*
+ * page_idle does a lockless/optimistic rmap scan on page->mapping.
+ * Make sure the compiler doesn't split the stores of anon_vma and
+ * the PAGE_MAPPING_ANON type identifier, otherwise the rmap code
+ * could mistake the mapping for a struct address_space and crash.
+ */
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
- page->mapping = (struct address_space *) anon_vma;
+ WRITE_ONCE(page->mapping, (struct address_space *) anon_vma);
page->index = linear_page_index(vma, address);
}