summaryrefslogtreecommitdiff
path: root/mm/mlock.c
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2022-02-14 18:29:54 -0800
committerMatthew Wilcox (Oracle) <willy@infradead.org>2022-02-17 11:56:57 -0500
commit07ca760673088f262da57ff42c15558688565aa2 (patch)
tree765db0e0ab2245a8e0cecc79ee97c47c294dac24 /mm/mlock.c
parentb109b87050df5438ee745b2bddfa3587970025bb (diff)
mm/munlock: maintain page->mlock_count while unevictable
Previous patches have been preparatory: now implement page->mlock_count. The ordering of the "Unevictable LRU" is of no significance, and there is no point holding unevictable pages on a list: place page->mlock_count to overlay page->lru.prev (since page->lru.next is overlaid by compound_head, which needs to be even so as not to satisfy PageTail - though 2 could be added instead of 1 for each mlock, if that's ever an improvement). But it's only safe to rely on or modify page->mlock_count while lruvec lock is held and page is on unevictable "LRU" - we can save lots of edits by continuing to pretend that there's an imaginary LRU here (there is an unevictable count which still needs to be maintained, but not a list). The mlock_count technique suffers from an unreliability much like with page_mlock(): while someone else has the page off LRU, not much can be done. As before, err on the safe side (behave as if mlock_count 0), and let try_to_unlock_one() move the page to unevictable if reclaim finds out later on - a few misplaced pages don't matter, what we want to avoid is imbalancing reclaim by flooding evictable lists with unevictable pages. I am not a fan of "if (!isolate_lru_page(page)) putback_lru_page(page);": if we have taken lruvec lock to get the page off its present list, then we save everyone trouble (and however many extra atomic ops) by putting it on its destination list immediately. Signed-off-by: Hugh Dickins <hughd@google.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Diffstat (limited to 'mm/mlock.c')
-rw-r--r--mm/mlock.c68
1 files changed, 54 insertions, 14 deletions
diff --git a/mm/mlock.c b/mm/mlock.c
index 3c26473050a3..f8a3a54687dd 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -54,16 +54,35 @@ EXPORT_SYMBOL(can_do_mlock);
*/
void mlock_page(struct page *page)
{
+ struct lruvec *lruvec;
+ int nr_pages = thp_nr_pages(page);
+
VM_BUG_ON_PAGE(PageTail(page), page);
if (!TestSetPageMlocked(page)) {
- int nr_pages = thp_nr_pages(page);
-
mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
- count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
- if (!isolate_lru_page(page))
- putback_lru_page(page);
+ __count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
+ }
+
+ /* There is nothing more we can do while it's off LRU */
+ if (!TestClearPageLRU(page))
+ return;
+
+ lruvec = folio_lruvec_lock_irq(page_folio(page));
+ if (PageUnevictable(page)) {
+ page->mlock_count++;
+ goto out;
}
+
+ del_page_from_lru_list(page, lruvec);
+ ClearPageActive(page);
+ SetPageUnevictable(page);
+ page->mlock_count = 1;
+ add_page_to_lru_list(page, lruvec);
+ __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
+out:
+ SetPageLRU(page);
+ unlock_page_lruvec_irq(lruvec);
}
/**
@@ -72,19 +91,40 @@ void mlock_page(struct page *page)
*/
void munlock_page(struct page *page)
{
+ struct lruvec *lruvec;
+ int nr_pages = thp_nr_pages(page);
+
VM_BUG_ON_PAGE(PageTail(page), page);
+ lock_page_memcg(page);
+ lruvec = folio_lruvec_lock_irq(page_folio(page));
+ if (PageLRU(page) && PageUnevictable(page)) {
+ /* Then mlock_count is maintained, but might undercount */
+ if (page->mlock_count)
+ page->mlock_count--;
+ if (page->mlock_count)
+ goto out;
+ }
+ /* else assume that was the last mlock: reclaim will fix it if not */
+
if (TestClearPageMlocked(page)) {
- int nr_pages = thp_nr_pages(page);
-
- mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
- if (!isolate_lru_page(page)) {
- putback_lru_page(page);
- count_vm_events(UNEVICTABLE_PGMUNLOCKED, nr_pages);
- } else if (PageUnevictable(page)) {
- count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages);
- }
+ __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
+ if (PageLRU(page) || !PageUnevictable(page))
+ __count_vm_events(UNEVICTABLE_PGMUNLOCKED, nr_pages);
+ else
+ __count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages);
+ }
+
+ /* page_evictable() has to be checked *after* clearing Mlocked */
+ if (PageLRU(page) && PageUnevictable(page) && page_evictable(page)) {
+ del_page_from_lru_list(page, lruvec);
+ ClearPageUnevictable(page);
+ add_page_to_lru_list(page, lruvec);
+ __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
}
+out:
+ unlock_page_lruvec_irq(lruvec);
+ unlock_page_memcg(page);
}
/*