summaryrefslogtreecommitdiff
path: root/mm/memory-failure.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2023-08-23 00:13:14 +0100
committerAndrew Morton <akpm@linux-foundation.org>2023-10-04 10:32:20 -0700
commit91e79d22be75fec88ae58d274a7c9e49d6215099 (patch)
treef151d6c93440cec269c0a91c40f843fb9920b8e5 /mm/memory-failure.c
parentbc0c3357601e3ff1b006600530079bd246ef0d82 (diff)
mm: convert DAX lock/unlock page to lock/unlock folio
The one caller of DAX lock/unlock page already calls compound_head(), so use page_folio() instead, then use a folio throughout the DAX code to remove uses of page->mapping and page->index. [jane.chu@oracle.com: add comment to mf_generic_kill_procss(), simplify mf_generic_kill_procs:folio initialization] Link: https://lkml.kernel.org/r/20230908222336.186313-1-jane.chu@oracle.com Link: https://lkml.kernel.org/r/20230822231314.349200-1-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Jane Chu <jane.chu@oracle.com> Acked-by: Naoya Horiguchi <naoya.horiguchi@nec.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Jane Chu <jane.chu@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/memory-failure.c')
-rw-r--r--mm/memory-failure.c29
1 files changed, 16 insertions, 13 deletions
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 4d6e43c88489..660c21859118 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1713,32 +1713,35 @@ static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn,
kill_procs(to_kill, flags & MF_MUST_KILL, false, pfn, flags);
}
+/*
+ * Only dev_pagemap pages get here, such as fsdax when the filesystem
+ * either do not claim or fails to claim a hwpoison event, or devdax.
+ * The fsdax pages are initialized per base page, and the devdax pages
+ * could be initialized either as base pages, or as compound pages with
+ * vmemmap optimization enabled. Devdax is simplistic in its dealing with
+ * hwpoison, such that, if a subpage of a compound page is poisoned,
+ * simply mark the compound head page is by far sufficient.
+ */
static int mf_generic_kill_procs(unsigned long long pfn, int flags,
struct dev_pagemap *pgmap)
{
- struct page *page = pfn_to_page(pfn);
+ struct folio *folio = pfn_folio(pfn);
LIST_HEAD(to_kill);
dax_entry_t cookie;
int rc = 0;
/*
- * Pages instantiated by device-dax (not filesystem-dax)
- * may be compound pages.
- */
- page = compound_head(page);
-
- /*
* Prevent the inode from being freed while we are interrogating
* the address_space, typically this would be handled by
* lock_page(), but dax pages do not use the page lock. This
* also prevents changes to the mapping of this pfn until
* poison signaling is complete.
*/
- cookie = dax_lock_page(page);
+ cookie = dax_lock_folio(folio);
if (!cookie)
return -EBUSY;
- if (hwpoison_filter(page)) {
+ if (hwpoison_filter(&folio->page)) {
rc = -EOPNOTSUPP;
goto unlock;
}
@@ -1760,7 +1763,7 @@ static int mf_generic_kill_procs(unsigned long long pfn, int flags,
* Use this flag as an indication that the dax page has been
* remapped UC to prevent speculative consumption of poison.
*/
- SetPageHWPoison(page);
+ SetPageHWPoison(&folio->page);
/*
* Unlike System-RAM there is no possibility to swap in a
@@ -1769,11 +1772,11 @@ static int mf_generic_kill_procs(unsigned long long pfn, int flags,
* SIGBUS (i.e. MF_MUST_KILL)
*/
flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
- collect_procs(page, &to_kill, true);
+ collect_procs(&folio->page, &to_kill, true);
- unmap_and_kill(&to_kill, pfn, page->mapping, page->index, flags);
+ unmap_and_kill(&to_kill, pfn, folio->mapping, folio->index, flags);
unlock:
- dax_unlock_page(page, cookie);
+ dax_unlock_folio(folio, cookie);
return rc;
}