diff options
Diffstat (limited to 'mm/memory-failure.c')
| -rw-r--r-- | mm/memory-failure.c | 1338 |
1 files changed, 766 insertions, 572 deletions
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index ece5d481b5ff..fbc5a01260c8 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -38,8 +38,8 @@ #include <linux/kernel.h> #include <linux/mm.h> +#include <linux/memory-failure.h> #include <linux/page-flags.h> -#include <linux/kernel-page-flags.h> #include <linux/sched/signal.h> #include <linux/sched/task.h> #include <linux/dax.h> @@ -50,38 +50,43 @@ #include <linux/swap.h> #include <linux/backing-dev.h> #include <linux/migrate.h> -#include <linux/suspend.h> #include <linux/slab.h> -#include <linux/swapops.h> +#include <linux/leafops.h> #include <linux/hugetlb.h> #include <linux/memory_hotplug.h> #include <linux/mm_inline.h> #include <linux/memremap.h> #include <linux/kfifo.h> #include <linux/ratelimit.h> -#include <linux/page-isolation.h> #include <linux/pagewalk.h> #include <linux/shmem_fs.h> #include <linux/sysctl.h> + +#define CREATE_TRACE_POINTS +#include <trace/events/memory-failure.h> + #include "swap.h" #include "internal.h" -#include "ras/ras_event.h" static int sysctl_memory_failure_early_kill __read_mostly; static int sysctl_memory_failure_recovery __read_mostly = 1; +static int sysctl_enable_soft_offline __read_mostly = 1; + atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0); static bool hw_memory_failure __read_mostly = false; -inline void num_poisoned_pages_inc(unsigned long pfn) +static DEFINE_MUTEX(mf_mutex); + +void num_poisoned_pages_inc(unsigned long pfn) { atomic_long_inc(&num_poisoned_pages); memblk_nr_poison_inc(pfn); } -inline void num_poisoned_pages_sub(unsigned long pfn, long i) +void num_poisoned_pages_sub(unsigned long pfn, long i) { atomic_long_sub(i, &num_poisoned_pages); if (pfn != -1UL) @@ -99,7 +104,7 @@ static ssize_t _name##_show(struct device *dev, \ { \ struct memory_failure_stats *mf_stats = \ &NODE_DATA(dev->id)->mf_stats; \ - return sprintf(buf, "%lu\n", mf_stats->_name); \ + return sysfs_emit(buf, "%lu\n", mf_stats->_name); \ } \ static DEVICE_ATTR_RO(_name) @@ -123,7 +128,7 @@ const struct attribute_group memory_failure_attr_group = { .attrs = memory_failure_attr, }; -static struct ctl_table memory_failure_table[] = { +static const struct ctl_table memory_failure_table[] = { { .procname = "memory_failure_early_kill", .data = &sysctl_memory_failure_early_kill, @@ -142,9 +147,21 @@ static struct ctl_table memory_failure_table[] = { .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, - { } + { + .procname = "enable_soft_offline", + .data = &sysctl_enable_soft_offline, + .maxlen = sizeof(sysctl_enable_soft_offline), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + } }; +static struct rb_root_cached pfn_space_itree = RB_ROOT_CACHED; + +static DEFINE_MUTEX(pfn_space_lock); + /* * Return values: * 1: the page is dissolved (if needed) and taken off from buddy, @@ -155,11 +172,23 @@ static int __page_handle_poison(struct page *page) { int ret; - zone_pcp_disable(page_zone(page)); - ret = dissolve_free_huge_page(page); - if (!ret) + /* + * zone_pcp_disable() can't be used here. It will + * hold pcp_batch_high_lock and dissolve_free_hugetlb_folio() might hold + * cpu_hotplug_lock via static_key_slow_dec() when hugetlb vmemmap + * optimization is enabled. This will break current lock dependency + * chain and leads to deadlock. + * Disabling pcp before dissolving the page was a deterministic + * approach because we made sure that those pages cannot end up in any + * PCP list. Draining PCP lists expels those pages to the buddy system, + * but nothing guarantees that those pages do not get back to a PCP + * queue if we need to refill those. + */ + ret = dissolve_free_hugetlb_folio(page_folio(page)); + if (!ret) { + drain_all_pages(page_zone(page)); ret = take_page_off_buddy(page); - zone_pcp_enable(page_zone(page)); + } return ret; } @@ -168,8 +197,8 @@ static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, boo { if (hugepage_or_freepage) { /* - * Doing this check for free pages is also fine since dissolve_free_huge_page - * returns 0 for non-hugetlb pages as well. + * Doing this check for free pages is also fine since + * dissolve_free_hugetlb_folio() returns 0 for non-hugetlb folios as well. */ if (__page_handle_poison(page) <= 0) /* @@ -191,106 +220,34 @@ static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, boo return true; } -#if IS_ENABLED(CONFIG_HWPOISON_INJECT) - -u32 hwpoison_filter_enable = 0; -u32 hwpoison_filter_dev_major = ~0U; -u32 hwpoison_filter_dev_minor = ~0U; -u64 hwpoison_filter_flags_mask; -u64 hwpoison_filter_flags_value; -EXPORT_SYMBOL_GPL(hwpoison_filter_enable); -EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major); -EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor); -EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask); -EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value); +static hwpoison_filter_func_t __rcu *hwpoison_filter_func __read_mostly; -static int hwpoison_filter_dev(struct page *p) +void hwpoison_filter_register(hwpoison_filter_func_t *filter) { - struct address_space *mapping; - dev_t dev; - - if (hwpoison_filter_dev_major == ~0U && - hwpoison_filter_dev_minor == ~0U) - return 0; - - mapping = page_mapping(p); - if (mapping == NULL || mapping->host == NULL) - return -EINVAL; - - dev = mapping->host->i_sb->s_dev; - if (hwpoison_filter_dev_major != ~0U && - hwpoison_filter_dev_major != MAJOR(dev)) - return -EINVAL; - if (hwpoison_filter_dev_minor != ~0U && - hwpoison_filter_dev_minor != MINOR(dev)) - return -EINVAL; - - return 0; + rcu_assign_pointer(hwpoison_filter_func, filter); } +EXPORT_SYMBOL_GPL(hwpoison_filter_register); -static int hwpoison_filter_flags(struct page *p) +void hwpoison_filter_unregister(void) { - if (!hwpoison_filter_flags_mask) - return 0; - - if ((stable_page_flags(p) & hwpoison_filter_flags_mask) == - hwpoison_filter_flags_value) - return 0; - else - return -EINVAL; + RCU_INIT_POINTER(hwpoison_filter_func, NULL); + synchronize_rcu(); } +EXPORT_SYMBOL_GPL(hwpoison_filter_unregister); -/* - * This allows stress tests to limit test scope to a collection of tasks - * by putting them under some memcg. This prevents killing unrelated/important - * processes such as /sbin/init. Note that the target task may share clean - * pages with init (eg. libc text), which is harmless. If the target task - * share _dirty_ pages with another task B, the test scheme must make sure B - * is also included in the memcg. At last, due to race conditions this filter - * can only guarantee that the page either belongs to the memcg tasks, or is - * a freed page. - */ -#ifdef CONFIG_MEMCG -u64 hwpoison_filter_memcg; -EXPORT_SYMBOL_GPL(hwpoison_filter_memcg); -static int hwpoison_filter_task(struct page *p) -{ - if (!hwpoison_filter_memcg) - return 0; - - if (page_cgroup_ino(p) != hwpoison_filter_memcg) - return -EINVAL; - - return 0; -} -#else -static int hwpoison_filter_task(struct page *p) { return 0; } -#endif - -int hwpoison_filter(struct page *p) +static int hwpoison_filter(struct page *p) { - if (!hwpoison_filter_enable) - return 0; - - if (hwpoison_filter_dev(p)) - return -EINVAL; - - if (hwpoison_filter_flags(p)) - return -EINVAL; + int ret = 0; + hwpoison_filter_func_t *filter; - if (hwpoison_filter_task(p)) - return -EINVAL; + rcu_read_lock(); + filter = rcu_dereference(hwpoison_filter_func); + if (filter) + ret = filter(p); + rcu_read_unlock(); - return 0; -} -#else -int hwpoison_filter(struct page *p) -{ - return 0; + return ret; } -#endif - -EXPORT_SYMBOL_GPL(hwpoison_filter); /* * Kill all processes that have a poisoned page mapped and then isolate @@ -333,7 +290,7 @@ static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags) int ret = 0; pr_err("%#lx: Sending SIGBUS to %s:%d due to hardware memory corruption\n", - pfn, t->comm, t->pid); + pfn, t->comm, task_pid_nr(t)); if ((flags & MF_ACTION_REQUIRED) && (t == current)) ret = force_sig_mceerr(BUS_MCEERR_AR, @@ -344,14 +301,12 @@ static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags) * PF_MCE_EARLY set. * Don't use force here, it's convenient if the signal * can be temporarily blocked. - * This could cause a loop when the user sets SIGBUS - * to SIG_IGN, but hopefully no one will do that? */ ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)tk->addr, addr_lsb, t); if (ret < 0) pr_info("Error sending signal to %s:%d: %d\n", - t->comm, t->pid, ret); + t->comm, task_pid_nr(t), ret); return ret; } @@ -359,23 +314,25 @@ static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags) * Unknown page type encountered. Try to check whether it can turn PageLRU by * lru_add_drain_all. */ -void shake_page(struct page *p) +void shake_folio(struct folio *folio) { - if (PageHuge(p)) + if (folio_test_hugetlb(folio)) return; - - if (!PageSlab(p)) { - lru_add_drain_all(); - if (PageLRU(p) || is_free_buddy_page(p)) - return; - } - /* * TODO: Could shrink slab caches here if a lightweight range-based * shrinker will be available. */ + if (folio_test_slab(folio)) + return; + + lru_add_drain_all(); +} +EXPORT_SYMBOL_GPL(shake_folio); + +static void shake_page(struct page *page) +{ + shake_folio(page_folio(page)); } -EXPORT_SYMBOL_GPL(shake_page); static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma, unsigned long address) @@ -398,18 +355,18 @@ static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma, pud = pud_offset(p4d, address); if (!pud_present(*pud)) return 0; - if (pud_devmap(*pud)) + if (pud_trans_huge(*pud)) return PUD_SHIFT; pmd = pmd_offset(pud, address); if (!pmd_present(*pmd)) return 0; - if (pmd_devmap(*pmd)) + if (pmd_trans_huge(*pmd)) return PMD_SHIFT; pte = pte_offset_map(pmd, address); if (!pte) return 0; ptent = ptep_get(pte); - if (pte_present(ptent) && pte_devmap(ptent)) + if (pte_present(ptent)) ret = PAGE_SHIFT; pte_unmap(pte); return ret; @@ -420,21 +377,13 @@ static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma, * not much we can do. We just print a message and ignore otherwise. */ -#define FSDAX_INVALID_PGOFF ULONG_MAX - /* * Schedule a process for later kill. * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM. - * - * Note: @fsdax_pgoff is used only when @p is a fsdax page and a - * filesystem with a memory failure handler has claimed the - * memory_failure event. In all other cases, page->index and - * page->mapping are sufficient for mapping the page back to its - * corresponding user virtual address. */ -static void __add_to_kill(struct task_struct *tsk, struct page *p, +static void __add_to_kill(struct task_struct *tsk, const struct page *p, struct vm_area_struct *vma, struct list_head *to_kill, - unsigned long ksm_addr, pgoff_t fsdax_pgoff) + unsigned long addr) { struct to_kill *tk; @@ -444,13 +393,11 @@ static void __add_to_kill(struct task_struct *tsk, struct page *p, return; } - tk->addr = ksm_addr ? ksm_addr : page_address_in_vma(p, vma); - if (is_zone_device_page(p)) { - if (fsdax_pgoff != FSDAX_INVALID_PGOFF) - tk->addr = vma_pgoff_address(fsdax_pgoff, 1, vma); + tk->addr = addr; + if (is_zone_device_page(p)) tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr); - } else - tk->size_shift = page_shift(compound_head(p)); + else + tk->size_shift = folio_shift(page_folio(p)); /* * Send SIGKILL if "tk->addr == -EFAULT". Also, as @@ -475,11 +422,13 @@ static void __add_to_kill(struct task_struct *tsk, struct page *p, list_add_tail(&tk->nd, to_kill); } -static void add_to_kill_anon_file(struct task_struct *tsk, struct page *p, - struct vm_area_struct *vma, - struct list_head *to_kill) +static void add_to_kill_anon_file(struct task_struct *tsk, const struct page *p, + struct vm_area_struct *vma, struct list_head *to_kill, + unsigned long addr) { - __add_to_kill(tsk, p, vma, to_kill, 0, FSDAX_INVALID_PGOFF); + if (addr == -EFAULT) + return; + __add_to_kill(tsk, p, vma, to_kill, addr); } #ifdef CONFIG_KSM @@ -495,12 +444,13 @@ static bool task_in_to_kill_list(struct list_head *to_kill, return false; } -void add_to_kill_ksm(struct task_struct *tsk, struct page *p, + +void add_to_kill_ksm(struct task_struct *tsk, const struct page *p, struct vm_area_struct *vma, struct list_head *to_kill, - unsigned long ksm_addr) + unsigned long addr) { if (!task_in_to_kill_list(to_kill, tsk)) - __add_to_kill(tsk, p, vma, to_kill, ksm_addr, FSDAX_INVALID_PGOFF); + __add_to_kill(tsk, p, vma, to_kill, addr); } #endif /* @@ -508,24 +458,17 @@ void add_to_kill_ksm(struct task_struct *tsk, struct page *p, * * Only do anything when FORCEKILL is set, otherwise just free the * list (this is used for clean pages which do not need killing) - * Also when FAIL is set do a force kill because something went - * wrong earlier. */ -static void kill_procs(struct list_head *to_kill, int forcekill, bool fail, +static void kill_procs(struct list_head *to_kill, int forcekill, unsigned long pfn, int flags) { struct to_kill *tk, *next; list_for_each_entry_safe(tk, next, to_kill, nd) { if (forcekill) { - /* - * In case something went wrong with munmapping - * make sure the process doesn't catch the - * signal and then access the memory. Just kill it. - */ - if (fail || tk->addr == -EFAULT) { + if (tk->addr == -EFAULT) { pr_err("%#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n", - pfn, tk->tsk->comm, tk->tsk->pid); + pfn, tk->tsk->comm, task_pid_nr(tk->tsk)); do_send_sig_info(SIGKILL, SEND_SIG_PRIV, tk->tsk, PIDTYPE_PID); } @@ -538,7 +481,7 @@ static void kill_procs(struct list_head *to_kill, int forcekill, bool fail, */ else if (kill_proc(tk, pfn, flags) < 0) pr_err("%#lx: Cannot send advisory machine check signal to %s:%d\n", - pfn, tk->tsk->comm, tk->tsk->pid); + pfn, tk->tsk->comm, task_pid_nr(tk->tsk)); } list_del(&tk->nd); put_task_struct(tk->tsk); @@ -551,8 +494,8 @@ static void kill_procs(struct list_head *to_kill, int forcekill, bool fail, * on behalf of the thread group. Return task_struct of the (first found) * dedicated thread if found, and return NULL otherwise. * - * We already hold read_lock(&tasklist_lock) in the caller, so we don't - * have to call rcu_read_lock/unlock() in this function. + * We already hold rcu lock in the caller, so we don't have to call + * rcu_read_lock/unlock() in this function. */ static struct task_struct *find_early_kill_thread(struct task_struct *tsk) { @@ -599,11 +542,10 @@ struct task_struct *task_early_kill(struct task_struct *tsk, int force_early) /* * Collect processes when the error hit an anonymous page. */ -static void collect_procs_anon(struct page *page, struct list_head *to_kill, - int force_early) +static void collect_procs_anon(const struct folio *folio, + const struct page *page, struct list_head *to_kill, + int force_early) { - struct folio *folio = page_folio(page); - struct vm_area_struct *vma; struct task_struct *tsk; struct anon_vma *av; pgoff_t pgoff; @@ -612,11 +554,13 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill, if (av == NULL) /* Not actually mapped anymore */ return; - pgoff = page_to_pgoff(page); - read_lock(&tasklist_lock); - for_each_process (tsk) { + pgoff = page_pgoff(folio, page); + rcu_read_lock(); + for_each_process(tsk) { + struct vm_area_struct *vma; struct anon_vma_chain *vmac; struct task_struct *t = task_early_kill(tsk, force_early); + unsigned long addr; if (!t) continue; @@ -625,31 +569,32 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill, vma = vmac->vma; if (vma->vm_mm != t->mm) continue; - if (!page_mapped_in_vma(page, vma)) - continue; - add_to_kill_anon_file(t, page, vma, to_kill); + addr = page_mapped_in_vma(page, vma); + add_to_kill_anon_file(t, page, vma, to_kill, addr); } } - read_unlock(&tasklist_lock); + rcu_read_unlock(); anon_vma_unlock_read(av); } /* * Collect processes when the error hit a file mapped page. */ -static void collect_procs_file(struct page *page, struct list_head *to_kill, - int force_early) +static void collect_procs_file(const struct folio *folio, + const struct page *page, struct list_head *to_kill, + int force_early) { struct vm_area_struct *vma; struct task_struct *tsk; - struct address_space *mapping = page->mapping; + struct address_space *mapping = folio->mapping; pgoff_t pgoff; i_mmap_lock_read(mapping); - read_lock(&tasklist_lock); - pgoff = page_to_pgoff(page); + rcu_read_lock(); + pgoff = page_pgoff(folio, page); for_each_process(tsk) { struct task_struct *t = task_early_kill(tsk, force_early); + unsigned long addr; if (!t) continue; @@ -658,41 +603,51 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill, /* * Send early kill signal to tasks where a vma covers * the page but the corrupted page is not necessarily - * mapped it in its pte. + * mapped in its pte. * Assume applications who requested early kill want * to be informed of all such data corruptions. */ - if (vma->vm_mm == t->mm) - add_to_kill_anon_file(t, page, vma, to_kill); + if (vma->vm_mm != t->mm) + continue; + addr = page_address_in_vma(folio, page, vma); + add_to_kill_anon_file(t, page, vma, to_kill, addr); } } - read_unlock(&tasklist_lock); + rcu_read_unlock(); i_mmap_unlock_read(mapping); } #ifdef CONFIG_FS_DAX -static void add_to_kill_fsdax(struct task_struct *tsk, struct page *p, +static void add_to_kill_fsdax(struct task_struct *tsk, const struct page *p, struct vm_area_struct *vma, struct list_head *to_kill, pgoff_t pgoff) { - __add_to_kill(tsk, p, vma, to_kill, 0, pgoff); + unsigned long addr = vma_address(vma, pgoff, 1); + __add_to_kill(tsk, p, vma, to_kill, addr); } /* * Collect processes when the error hit a fsdax page. */ -static void collect_procs_fsdax(struct page *page, +static void collect_procs_fsdax(const struct page *page, struct address_space *mapping, pgoff_t pgoff, - struct list_head *to_kill) + struct list_head *to_kill, bool pre_remove) { struct vm_area_struct *vma; struct task_struct *tsk; i_mmap_lock_read(mapping); - read_lock(&tasklist_lock); + rcu_read_lock(); for_each_process(tsk) { - struct task_struct *t = task_early_kill(tsk, true); + struct task_struct *t = tsk; + /* + * Search for all tasks while MF_MEM_PRE_REMOVE is set, because + * the current may not be the one accessing the fsdax page. + * Otherwise, search for the current task. + */ + if (!pre_remove) + t = task_early_kill(tsk, true); if (!t) continue; vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { @@ -700,7 +655,7 @@ static void collect_procs_fsdax(struct page *page, add_to_kill_fsdax(t, page, vma, to_kill, pgoff); } } - read_unlock(&tasklist_lock); + rcu_read_unlock(); i_mmap_unlock_read(mapping); } #endif /* CONFIG_FS_DAX */ @@ -708,20 +663,20 @@ static void collect_procs_fsdax(struct page *page, /* * Collect the processes who have the corrupted page mapped to kill. */ -static void collect_procs(struct page *page, struct list_head *tokill, - int force_early) +static void collect_procs(const struct folio *folio, const struct page *page, + struct list_head *tokill, int force_early) { - if (!page->mapping) + if (!folio->mapping) return; - if (unlikely(PageKsm(page))) - collect_procs_ksm(page, tokill, force_early); - else if (PageAnon(page)) - collect_procs_anon(page, tokill, force_early); + if (unlikely(folio_test_ksm(folio))) + collect_procs_ksm(folio, page, tokill, force_early); + else if (folio_test_anon(folio)) + collect_procs_anon(folio, page, tokill, force_early); else - collect_procs_file(page, tokill, force_early); + collect_procs_file(folio, page, tokill, force_early); } -struct hwp_walk { +struct hwpoison_walk { struct to_kill tk; unsigned long pfn; int flags; @@ -741,10 +696,10 @@ static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift, if (pte_present(pte)) { pfn = pte_pfn(pte); } else { - swp_entry_t swp = pte_to_swp_entry(pte); + const softleaf_t entry = softleaf_from_pte(pte); - if (is_hwpoison_entry(swp)) - pfn = swp_offset_pfn(swp); + if (softleaf_is_hwpoison(entry)) + pfn = softleaf_to_pfn(entry); } if (!pfn || pfn != poisoned_pfn) @@ -756,7 +711,7 @@ static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift, #ifdef CONFIG_TRANSPARENT_HUGEPAGE static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr, - struct hwp_walk *hwp) + struct hwpoison_walk *hwp) { pmd_t pmd = *pmdp; unsigned long pfn; @@ -774,7 +729,7 @@ static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr, } #else static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr, - struct hwp_walk *hwp) + struct hwpoison_walk *hwp) { return 0; } @@ -783,7 +738,7 @@ static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr, static int hwpoison_pte_range(pmd_t *pmdp, unsigned long addr, unsigned long end, struct mm_walk *walk) { - struct hwp_walk *hwp = walk->private; + struct hwpoison_walk *hwp = walk->private; int ret = 0; pte_t *ptep, *mapped_pte; spinlock_t *ptl; @@ -817,20 +772,35 @@ static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask, unsigned long addr, unsigned long end, struct mm_walk *walk) { - struct hwp_walk *hwp = walk->private; - pte_t pte = huge_ptep_get(ptep); + struct hwpoison_walk *hwp = walk->private; struct hstate *h = hstate_vma(walk->vma); + spinlock_t *ptl; + pte_t pte; + int ret; - return check_hwpoisoned_entry(pte, addr, huge_page_shift(h), - hwp->pfn, &hwp->tk); + ptl = huge_pte_lock(h, walk->mm, ptep); + pte = huge_ptep_get(walk->mm, addr, ptep); + ret = check_hwpoisoned_entry(pte, addr, huge_page_shift(h), + hwp->pfn, &hwp->tk); + spin_unlock(ptl); + return ret; } #else #define hwpoison_hugetlb_range NULL #endif -static const struct mm_walk_ops hwp_walk_ops = { +static int hwpoison_test_walk(unsigned long start, unsigned long end, + struct mm_walk *walk) +{ + /* We also want to consider pages mapped into VM_PFNMAP. */ + return 0; +} + +static const struct mm_walk_ops hwpoison_walk_ops = { .pmd_entry = hwpoison_pte_range, .hugetlb_entry = hwpoison_hugetlb_range, + .test_walk = hwpoison_test_walk, + .walk_lock = PGWALK_RDLOCK, }; /* @@ -850,7 +820,7 @@ static int kill_accessing_process(struct task_struct *p, unsigned long pfn, int flags) { int ret; - struct hwp_walk priv = { + struct hwpoison_walk priv = { .pfn = pfn, }; priv.tk.tsk = p; @@ -859,16 +829,43 @@ static int kill_accessing_process(struct task_struct *p, unsigned long pfn, return -EFAULT; mmap_read_lock(p->mm); - ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwp_walk_ops, + ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwpoison_walk_ops, (void *)&priv); + /* + * ret = 1 when CMCI wins, regardless of whether try_to_unmap() + * succeeds or fails, then kill the process with SIGBUS. + * ret = 0 when poison page is a clean page and it's dropped, no + * SIGBUS is needed. + */ if (ret == 1 && priv.tk.addr) kill_proc(&priv.tk, pfn, flags); - else - ret = 0; mmap_read_unlock(p->mm); - return ret > 0 ? -EHWPOISON : -EFAULT; + + return ret > 0 ? -EHWPOISON : 0; } +/* + * MF_IGNORED - The m-f() handler marks the page as PG_hwpoisoned'ed. + * But it could not do more to isolate the page from being accessed again, + * nor does it kill the process. This is extremely rare and one of the + * potential causes is that the page state has been changed due to + * underlying race condition. This is the most severe outcomes. + * + * MF_FAILED - The m-f() handler marks the page as PG_hwpoisoned'ed. + * It should have killed the process, but it can't isolate the page, + * due to conditions such as extra pin, unmap failure, etc. Accessing + * the page again may trigger another MCE and the process will be killed + * by the m-f() handler immediately. + * + * MF_DELAYED - The m-f() handler marks the page as PG_hwpoisoned'ed. + * The page is unmapped, and is removed from the LRU or file mapping. + * An attempt to access the page again will trigger page fault and the + * PF handler will kill the process. + * + * MF_RECOVERED - The m-f() handler marks the page as PG_hwpoisoned'ed. + * The page has been completely isolated, that is, unmapped, taken out of + * the buddy system, or hole-punnched out of the file mapping. + */ static const char *action_name[] = { [MF_IGNORED] = "Ignored", [MF_FAILED] = "Failed", @@ -879,10 +876,9 @@ static const char *action_name[] = { static const char * const action_page_types[] = { [MF_MSG_KERNEL] = "reserved kernel page", [MF_MSG_KERNEL_HIGH_ORDER] = "high-order kernel page", - [MF_MSG_SLAB] = "kernel slab page", - [MF_MSG_DIFFERENT_COMPOUND] = "different compound page after locking", [MF_MSG_HUGE] = "huge page", [MF_MSG_FREE_HUGE] = "free huge page", + [MF_MSG_GET_HWPOISON] = "get hwpoison page", [MF_MSG_UNMAP_FAILED] = "unmapping failed page", [MF_MSG_DIRTY_SWAPCACHE] = "dirty swapcache page", [MF_MSG_CLEAN_SWAPCACHE] = "clean swapcache page", @@ -896,6 +892,8 @@ static const char * const action_page_types[] = { [MF_MSG_BUDDY] = "free buddy page", [MF_MSG_DAX] = "dax page", [MF_MSG_UNSPLIT_THP] = "unsplit thp", + [MF_MSG_ALREADY_POISONED] = "already poisoned page", + [MF_MSG_PFN_MAP] = "non struct page pfn", [MF_MSG_UNKNOWN] = "unknown page", }; @@ -905,54 +903,51 @@ static const char * const action_page_types[] = { * The page count will stop it from being freed by unpoison. * Stress tests should be aware of this memory leak problem. */ -static int delete_from_lru_cache(struct page *p) +static int delete_from_lru_cache(struct folio *folio) { - if (isolate_lru_page(p)) { + if (folio_isolate_lru(folio)) { /* * Clear sensible page flags, so that the buddy system won't - * complain when the page is unpoison-and-freed. + * complain when the folio is unpoison-and-freed. */ - ClearPageActive(p); - ClearPageUnevictable(p); + folio_clear_active(folio); + folio_clear_unevictable(folio); /* * Poisoned page might never drop its ref count to 0 so we have * to uncharge it manually from its memcg. */ - mem_cgroup_uncharge(page_folio(p)); + mem_cgroup_uncharge(folio); /* - * drop the page count elevated by isolate_lru_page() + * drop the refcount elevated by folio_isolate_lru() */ - put_page(p); + folio_put(folio); return 0; } return -EIO; } -static int truncate_error_page(struct page *p, unsigned long pfn, +static int truncate_error_folio(struct folio *folio, unsigned long pfn, struct address_space *mapping) { int ret = MF_FAILED; - if (mapping->a_ops->error_remove_page) { - struct folio *folio = page_folio(p); - int err = mapping->a_ops->error_remove_page(mapping, p); + if (mapping->a_ops->error_remove_folio) { + int err = mapping->a_ops->error_remove_folio(mapping, folio); - if (err != 0) { + if (err != 0) pr_info("%#lx: Failed to punch page: %d\n", pfn, err); - } else if (folio_has_private(folio) && - !filemap_release_folio(folio, GFP_NOIO)) { + else if (!filemap_release_folio(folio, GFP_NOIO)) pr_info("%#lx: failed to release buffers\n", pfn); - } else { + else ret = MF_RECOVERED; - } } else { /* * If the file system doesn't support it just invalidate * This fails on dirty or anything with private pages */ - if (invalidate_inode_page(p)) + if (mapping_evict_folio(mapping, folio)) ret = MF_RECOVERED; else pr_info("%#lx: Failed to invalidate\n", pfn); @@ -982,7 +977,7 @@ static bool has_extra_refcount(struct page_state *ps, struct page *p, int count = page_count(p) - 1; if (extra_pins) - count -= 1; + count -= folio_nr_pages(page_folio(p)); if (count > 0) { pr_err("%#lx: %s still referenced by %d users\n", @@ -1006,12 +1001,13 @@ static int me_kernel(struct page_state *ps, struct page *p) /* * Page in unknown state. Do nothing. + * This is a catch-all in case we fail to make sense of the page state. */ static int me_unknown(struct page_state *ps, struct page *p) { pr_err("%#lx: Unknown page state\n", page_to_pfn(p)); unlock_page(p); - return MF_FAILED; + return MF_IGNORED; } /* @@ -1019,17 +1015,18 @@ static int me_unknown(struct page_state *ps, struct page *p) */ static int me_pagecache_clean(struct page_state *ps, struct page *p) { + struct folio *folio = page_folio(p); int ret; struct address_space *mapping; bool extra_pins; - delete_from_lru_cache(p); + delete_from_lru_cache(folio); /* - * For anonymous pages we're done the only reference left + * For anonymous folios the only reference left * should be the one m_f() holds. */ - if (PageAnon(p)) { + if (folio_test_anon(folio)) { ret = MF_RECOVERED; goto out; } @@ -1041,11 +1038,9 @@ static int me_pagecache_clean(struct page_state *ps, struct page *p) * has a reference, because it could be file system metadata * and that's not safe to truncate. */ - mapping = page_mapping(p); + mapping = folio_mapping(folio); if (!mapping) { - /* - * Page has been teared down in the meanwhile - */ + /* Folio has been torn down in the meantime */ ret = MF_FAILED; goto out; } @@ -1061,12 +1056,12 @@ static int me_pagecache_clean(struct page_state *ps, struct page *p) * * Open: to take i_rwsem or not for this? Right now we don't. */ - ret = truncate_error_page(p, page_to_pfn(p), mapping); + ret = truncate_error_folio(folio, page_to_pfn(p), mapping); if (has_extra_refcount(ps, p, extra_pins)) ret = MF_FAILED; out: - unlock_page(p); + folio_unlock(folio); return ret; } @@ -1078,9 +1073,9 @@ out: */ static int me_pagecache_dirty(struct page_state *ps, struct page *p) { - struct address_space *mapping = page_mapping(p); + struct folio *folio = page_folio(p); + struct address_space *mapping = folio_mapping(folio); - SetPageError(p); /* TBD: print more information about the file. */ if (mapping) { /* @@ -1088,34 +1083,6 @@ static int me_pagecache_dirty(struct page_state *ps, struct page *p) * who check the mapping. * This way the application knows that something went * wrong with its dirty file data. - * - * There's one open issue: - * - * The EIO will be only reported on the next IO - * operation and then cleared through the IO map. - * Normally Linux has two mechanisms to pass IO error - * first through the AS_EIO flag in the address space - * and then through the PageError flag in the page. - * Since we drop pages on memory failure handling the - * only mechanism open to use is through AS_AIO. - * - * This has the disadvantage that it gets cleared on - * the first operation that returns an error, while - * the PageError bit is more sticky and only cleared - * when the page is reread or dropped. If an - * application assumes it will always get error on - * fsync, but does other operations on the fd before - * and the page is dropped between then the error - * will not be properly reported. - * - * This can already happen even without hwpoisoned - * pages: first on metadata IO errors (which only - * report through AS_EIO) or when the page is dropped - * at the wrong time. - * - * So right now we assume that the application DTRT on - * the first EIO, but we're not worse than other parts - * of the kernel. */ mapping_set_error(mapping, -EIO); } @@ -1127,7 +1094,7 @@ static int me_pagecache_dirty(struct page_state *ps, struct page *p) * Clean and dirty swap cache. * * Dirty swap cache page is tricky to handle. The page could live both in page - * cache and swap cache(ie. page is freshly swapped in). So it could be + * table and swap cache(ie. page is freshly swapped in). So it could be * referenced concurrently by 2 types of PTEs: * normal PTEs and swap PTEs. We try to handle them consistently by calling * try_to_unmap(!TTU_HWPOISON) to convert the normal PTEs to swap PTEs, @@ -1144,15 +1111,16 @@ static int me_pagecache_dirty(struct page_state *ps, struct page *p) */ static int me_swapcache_dirty(struct page_state *ps, struct page *p) { + struct folio *folio = page_folio(p); int ret; bool extra_pins = false; - ClearPageDirty(p); + folio_clear_dirty(folio); /* Trigger EIO in shmem: */ - ClearPageUptodate(p); + folio_clear_uptodate(folio); - ret = delete_from_lru_cache(p) ? MF_FAILED : MF_DELAYED; - unlock_page(p); + ret = delete_from_lru_cache(folio) ? MF_FAILED : MF_DELAYED; + folio_unlock(folio); if (ret == MF_DELAYED) extra_pins = true; @@ -1168,9 +1136,9 @@ static int me_swapcache_clean(struct page_state *ps, struct page *p) struct folio *folio = page_folio(p); int ret; - delete_from_swap_cache(folio); + swap_cache_del_folio(folio); - ret = delete_from_lru_cache(p) ? MF_FAILED : MF_RECOVERED; + ret = delete_from_lru_cache(folio) ? MF_FAILED : MF_RECOVERED; folio_unlock(folio); if (has_extra_refcount(ps, p, false)) @@ -1187,29 +1155,26 @@ static int me_swapcache_clean(struct page_state *ps, struct page *p) */ static int me_huge_page(struct page_state *ps, struct page *p) { + struct folio *folio = page_folio(p); int res; - struct page *hpage = compound_head(p); struct address_space *mapping; bool extra_pins = false; - if (!PageHuge(hpage)) - return MF_DELAYED; - - mapping = page_mapping(hpage); + mapping = folio_mapping(folio); if (mapping) { - res = truncate_error_page(hpage, page_to_pfn(p), mapping); + res = truncate_error_folio(folio, page_to_pfn(p), mapping); /* The page is kept in page cache. */ extra_pins = true; - unlock_page(hpage); + folio_unlock(folio); } else { - unlock_page(hpage); + folio_unlock(folio); /* * migration entry prevents later access on error hugepage, * so we can free and dissolve it into buddy to save healthy * subpages. */ - put_page(hpage); - if (__page_handle_poison(p) >= 0) { + folio_put(folio); + if (__page_handle_poison(p) > 0) { page_ref_inc(p); res = MF_RECOVERED; } else { @@ -1242,7 +1207,6 @@ static int me_huge_page(struct page_state *ps, struct page *p) #define mlock (1UL << PG_mlocked) #define lru (1UL << PG_lru) #define head (1UL << PG_head) -#define slab (1UL << PG_slab) #define reserved (1UL << PG_reserved) static struct page_state error_states[] = { @@ -1252,13 +1216,6 @@ static struct page_state error_states[] = { * PG_buddy pages only make a small fraction of all free pages. */ - /* - * Could in theory check if slab page is free or if we can drop - * currently unused objects without touching them. But just - * treat it as standard kernel for now. - */ - { slab, slab, MF_MSG_SLAB, me_kernel }, - { head, head, MF_MSG_HUGE, me_huge_page }, { sc|dirty, sc|dirty, MF_MSG_DIRTY_SWAPCACHE, me_swapcache_dirty }, @@ -1285,7 +1242,6 @@ static struct page_state error_states[] = { #undef mlock #undef lru #undef head -#undef slab #undef reserved static void update_per_node_mf_stats(unsigned long pfn, @@ -1330,9 +1286,10 @@ static int action_result(unsigned long pfn, enum mf_action_page_type type, { trace_memory_failure_event(pfn, type, result); - num_poisoned_pages_inc(pfn); - - update_per_node_mf_stats(pfn, result); + if (type != MF_MSG_ALREADY_POISONED && type != MF_MSG_PFN_MAP) { + num_poisoned_pages_inc(pfn); + update_per_node_mf_stats(pfn, result); + } pr_err("%#lx: recovery action for %s: %s\n", pfn, action_page_types[type], action_name[result]); @@ -1380,8 +1337,11 @@ void ClearPageHWPoisonTakenOff(struct page *page) */ static inline bool HWPoisonHandlable(struct page *page, unsigned long flags) { - /* Soft offline could migrate non-LRU movable pages */ - if ((flags & MF_SOFT_OFFLINE) && __PageMovable(page)) + if (PageSlab(page)) + return false; + + /* Soft offline could migrate movable_ops pages */ + if ((flags & MF_SOFT_OFFLINE) && page_has_movable_ops(page)) return true; return PageLRU(page) || is_free_buddy_page(page); @@ -1394,8 +1354,15 @@ static int __get_hwpoison_page(struct page *page, unsigned long flags) bool hugetlb = false; ret = get_hwpoison_hugetlb_folio(folio, &hugetlb, false); - if (hugetlb) - return ret; + if (hugetlb) { + /* Make sure hugetlb demotion did not happen from under us. */ + if (folio == page_folio(page)) + return ret; + if (ret > 0) { + folio_put(folio); + folio = page_folio(page); + } + } /* * This check prevents from calling folio_try_get() for any @@ -1416,6 +1383,8 @@ static int __get_hwpoison_page(struct page *page, unsigned long flags) return 0; } +#define GET_PAGE_MAX_RETRY_NUM 3 + static int get_any_page(struct page *p, unsigned long flags) { int ret = 0, pass = 0; @@ -1430,12 +1399,12 @@ try_again: if (!ret) { if (page_count(p)) { /* We raced with an allocation, retry. */ - if (pass++ < 3) + if (pass++ < GET_PAGE_MAX_RETRY_NUM) goto try_again; ret = -EBUSY; } else if (!PageHuge(p) && !is_free_buddy_page(p)) { /* We raced with put_page, retry. */ - if (pass++ < 3) + if (pass++ < GET_PAGE_MAX_RETRY_NUM) goto try_again; ret = -EIO; } @@ -1461,7 +1430,7 @@ try_again: * A page we cannot handle. Check whether we can turn * it into something we can handle. */ - if (pass++ < 3) { + if (pass++ < GET_PAGE_MAX_RETRY_NUM) { put_page(p); shake_page(p); count_increased = false; @@ -1484,8 +1453,13 @@ static int __get_unpoison_page(struct page *page) bool hugetlb = false; ret = get_hwpoison_hugetlb_folio(folio, &hugetlb, true); - if (hugetlb) - return ret; + if (hugetlb) { + /* Make sure hugetlb demotion did not happen from under us. */ + if (folio == page_folio(page)) + return ret; + if (ret > 0) + folio_put(folio); + } /* * PageHWPoisonTakenOff pages are not only marked as PG_hwpoison, @@ -1518,7 +1492,7 @@ static int __get_unpoison_page(struct page *page) * the given page has PG_hwpoison. So it's never reused for other page * allocations, and __get_unpoison_page() never races with them. * - * Return: 0 on failure, + * Return: 0 on failure or free buddy (hugetlb) page, * 1 on success for in-use pages in a well-defined state, * -EIO for pages on which we can not handle memory errors, * -EBUSY when get_hwpoison_page() has raced with page lifecycle @@ -1540,37 +1514,15 @@ static int get_hwpoison_page(struct page *p, unsigned long flags) } /* - * Do all that is necessary to remove user space mappings. Unmap - * the pages and send SIGBUS to the processes if the data was dirty. + * The caller must guarantee the folio isn't large folio, except hugetlb. + * try_to_unmap() can't handle it. */ -static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, - int flags, struct page *hpage) +int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill) { - struct folio *folio = page_folio(hpage); enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC | TTU_HWPOISON; struct address_space *mapping; - LIST_HEAD(tokill); - bool unmap_success; - int forcekill; - bool mlocked = PageMlocked(hpage); - - /* - * Here we are interested only in user-mapped pages, so skip any - * other types of pages. - */ - if (PageReserved(p) || PageSlab(p) || PageTable(p)) - return true; - if (!(PageLRU(hpage) || PageHuge(p))) - return true; - /* - * This check implies we don't kill processes if their pages - * are in the swap cache early. Those are always late kills. - */ - if (!page_mapped(hpage)) - return true; - - if (PageSwapCache(p)) { + if (folio_test_swapcache(folio)) { pr_err("%#lx: keeping poisoned page in swap cache\n", pfn); ttu &= ~TTU_HWPOISON; } @@ -1581,11 +1533,11 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, * XXX: the dirty test could be racy: set_page_dirty() may not always * be called inside page lock (it's recommended but not enforced). */ - mapping = page_mapping(hpage); - if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping && + mapping = folio_mapping(folio); + if (!must_kill && !folio_test_dirty(folio) && mapping && mapping_can_writeback(mapping)) { - if (page_mkclean(hpage)) { - SetPageDirty(hpage); + if (folio_mkclean(folio)) { + folio_set_dirty(folio); } else { ttu &= ~TTU_HWPOISON; pr_info("%#lx: corrupted page was clean: dropped without side effects\n", @@ -1593,42 +1545,77 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, } } - /* - * First collect all the processes that have the page - * mapped in dirty form. This has to be done before try_to_unmap, - * because ttu takes the rmap data structures down. - */ - collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED); - - if (PageHuge(hpage) && !PageAnon(hpage)) { + if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) { /* - * For hugetlb pages in shared mappings, try_to_unmap + * For hugetlb folios in shared mappings, try_to_unmap * could potentially call huge_pmd_unshare. Because of * this, take semaphore in write mode here and set * TTU_RMAP_LOCKED to indicate we have taken the lock * at this higher level. */ - mapping = hugetlb_page_mapping_lock_write(hpage); - if (mapping) { - try_to_unmap(folio, ttu|TTU_RMAP_LOCKED); - i_mmap_unlock_write(mapping); - } else - pr_info("%#lx: could not lock mapping for mapped huge page\n", pfn); + mapping = hugetlb_folio_mapping_lock_write(folio); + if (!mapping) { + pr_info("%#lx: could not lock mapping for mapped hugetlb folio\n", + folio_pfn(folio)); + return -EBUSY; + } + + try_to_unmap(folio, ttu|TTU_RMAP_LOCKED); + i_mmap_unlock_write(mapping); } else { try_to_unmap(folio, ttu); } - unmap_success = !page_mapped(hpage); + return folio_mapped(folio) ? -EBUSY : 0; +} + +/* + * Do all that is necessary to remove user space mappings. Unmap + * the pages and send SIGBUS to the processes if the data was dirty. + */ +static bool hwpoison_user_mappings(struct folio *folio, struct page *p, + unsigned long pfn, int flags) +{ + LIST_HEAD(tokill); + bool unmap_success; + int forcekill; + bool mlocked = folio_test_mlocked(folio); + + /* + * Here we are interested only in user-mapped pages, so skip any + * other types of pages. + */ + if (folio_test_reserved(folio) || folio_test_slab(folio) || + folio_test_pgtable(folio) || folio_test_offline(folio)) + return true; + if (!(folio_test_lru(folio) || folio_test_hugetlb(folio))) + return true; + + /* + * This check implies we don't kill processes if their pages + * are in the swap cache early. Those are always late kills. + */ + if (!folio_mapped(folio)) + return true; + + /* + * First collect all the processes that have the page + * mapped in dirty form. This has to be done before try_to_unmap, + * because ttu takes the rmap data structures down. + */ + collect_procs(folio, p, &tokill, flags & MF_ACTION_REQUIRED); + + unmap_success = !unmap_poisoned_folio(folio, pfn, flags & MF_MUST_KILL); if (!unmap_success) - pr_err("%#lx: failed to unmap page (mapcount=%d)\n", - pfn, page_mapcount(hpage)); + pr_err("%#lx: failed to unmap page (folio mapcount=%d)\n", + pfn, folio_mapcount(folio)); /* * try_to_unmap() might put mlocked page in lru cache, so call * shake_page() again to ensure that it's flushed. */ if (mlocked) - shake_page(hpage); + shake_folio(folio); /* * Now that the dirty bit has been propagated to the @@ -1640,9 +1627,9 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, * use a more force-full uncatchable kill to prevent * any accesses to the poisoned memory. */ - forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL) || + forcekill = folio_test_dirty(folio) || (flags & MF_MUST_KILL) || !unmap_success; - kill_procs(&tokill, forcekill, !unmap_success, pfn, flags); + kill_procs(&tokill, forcekill, pfn, flags); return unmap_success; } @@ -1658,10 +1645,10 @@ static int identify_page_state(unsigned long pfn, struct page *p, * carried out only if the first check can't determine the page status. */ for (ps = error_states;; ps++) - if ((p->flags & ps->mask) == ps->res) + if ((p->flags.f & ps->mask) == ps->res) break; - page_flags |= (p->flags & (1UL << PG_dirty)); + page_flags |= (p->flags.f & (1UL << PG_dirty)); if (!ps->mask) for (ps = error_states;; ps++) @@ -1670,15 +1657,21 @@ static int identify_page_state(unsigned long pfn, struct page *p, return page_action(ps, p, pfn); } -static int try_to_split_thp_page(struct page *page) +/* + * When 'release' is 'false', it means that if thp split has failed, + * there is still more to do, hence the page refcount we took earlier + * is still needed. + */ +static int try_to_split_thp_page(struct page *page, unsigned int new_order, + bool release) { int ret; lock_page(page); - ret = split_huge_page(page); + ret = split_huge_page_to_order(page, new_order); unlock_page(page); - if (unlikely(ret)) + if (ret && release) put_page(page); return ret; @@ -1701,40 +1694,43 @@ static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn, * mapping being torn down is communicated in siginfo, see * kill_proc() */ - loff_t start = (index << PAGE_SHIFT) & ~(size - 1); + loff_t start = ((loff_t)index << PAGE_SHIFT) & ~(size - 1); unmap_mapping_range(mapping, start, size, 0); } - kill_procs(to_kill, flags & MF_MUST_KILL, false, pfn, flags); + kill_procs(to_kill, flags & MF_MUST_KILL, pfn, flags); } +/* + * Only dev_pagemap pages get here, such as fsdax when the filesystem + * either do not claim or fails to claim a hwpoison event, or devdax. + * The fsdax pages are initialized per base page, and the devdax pages + * could be initialized either as base pages, or as compound pages with + * vmemmap optimization enabled. Devdax is simplistic in its dealing with + * hwpoison, such that, if a subpage of a compound page is poisoned, + * simply mark the compound head page is by far sufficient. + */ static int mf_generic_kill_procs(unsigned long long pfn, int flags, struct dev_pagemap *pgmap) { - struct page *page = pfn_to_page(pfn); + struct folio *folio = pfn_folio(pfn); LIST_HEAD(to_kill); dax_entry_t cookie; int rc = 0; /* - * Pages instantiated by device-dax (not filesystem-dax) - * may be compound pages. - */ - page = compound_head(page); - - /* * Prevent the inode from being freed while we are interrogating * the address_space, typically this would be handled by * lock_page(), but dax pages do not use the page lock. This * also prevents changes to the mapping of this pfn until * poison signaling is complete. */ - cookie = dax_lock_page(page); + cookie = dax_lock_folio(folio); if (!cookie) return -EBUSY; - if (hwpoison_filter(page)) { + if (hwpoison_filter(&folio->page)) { rc = -EOPNOTSUPP; goto unlock; } @@ -1756,7 +1752,7 @@ static int mf_generic_kill_procs(unsigned long long pfn, int flags, * Use this flag as an indication that the dax page has been * remapped UC to prevent speculative consumption of poison. */ - SetPageHWPoison(page); + SetPageHWPoison(&folio->page); /* * Unlike System-RAM there is no possibility to swap in a @@ -1765,11 +1761,11 @@ static int mf_generic_kill_procs(unsigned long long pfn, int flags, * SIGBUS (i.e. MF_MUST_KILL) */ flags |= MF_ACTION_REQUIRED | MF_MUST_KILL; - collect_procs(page, &to_kill, true); + collect_procs(folio, &folio->page, &to_kill, true); - unmap_and_kill(&to_kill, pfn, page->mapping, page->index, flags); + unmap_and_kill(&to_kill, pfn, folio->mapping, folio->index, flags); unlock: - dax_unlock_page(page, cookie); + dax_unlock_folio(folio, cookie); return rc; } @@ -1788,6 +1784,7 @@ int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index, dax_entry_t cookie; struct page *page; size_t end = index + count; + bool pre_remove = mf_flags & MF_MEM_PRE_REMOVE; mf_flags |= MF_ACTION_REQUIRED | MF_MUST_KILL; @@ -1799,9 +1796,14 @@ int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index, if (!page) goto unlock; - SetPageHWPoison(page); + if (!pre_remove) + SetPageHWPoison(page); - collect_procs_fsdax(page, mapping, index, &to_kill); + /* + * The pre_remove case is revoking access, the memory is still + * good and could theoretically be put back into service. + */ + collect_procs_fsdax(page, mapping, index, &to_kill, pre_remove); unmap_and_kill(&to_kill, page_to_pfn(page), mapping, index, mf_flags); unlock: @@ -1813,6 +1815,7 @@ EXPORT_SYMBOL_GPL(mf_dax_kill_procs); #endif /* CONFIG_FS_DAX */ #ifdef CONFIG_HUGETLB_PAGE + /* * Struct raw_hwp_page represents information about "raw error page", * constructing singly linked list from ->_hugetlb_hwpoison field of folio. @@ -1827,16 +1830,49 @@ static inline struct llist_head *raw_hwp_list_head(struct folio *folio) return (struct llist_head *)&folio->_hugetlb_hwpoison; } +bool is_raw_hwpoison_page_in_hugepage(struct page *page) +{ + struct llist_head *raw_hwp_head; + struct raw_hwp_page *p; + struct folio *folio = page_folio(page); + bool ret = false; + + if (!folio_test_hwpoison(folio)) + return false; + + if (!folio_test_hugetlb(folio)) + return PageHWPoison(page); + + /* + * When RawHwpUnreliable is set, kernel lost track of which subpages + * are HWPOISON. So return as if ALL subpages are HWPOISONed. + */ + if (folio_test_hugetlb_raw_hwp_unreliable(folio)) + return true; + + mutex_lock(&mf_mutex); + + raw_hwp_head = raw_hwp_list_head(folio); + llist_for_each_entry(p, raw_hwp_head->first, node) { + if (page == p->page) { + ret = true; + break; + } + } + + mutex_unlock(&mf_mutex); + + return ret; +} + static unsigned long __folio_free_raw_hwp(struct folio *folio, bool move_flag) { - struct llist_head *head; - struct llist_node *t, *tnode; + struct llist_node *head; + struct raw_hwp_page *p, *next; unsigned long count = 0; - head = raw_hwp_list_head(folio); - llist_for_each_safe(tnode, t, head->first) { - struct raw_hwp_page *p = container_of(tnode, struct raw_hwp_page, node); - + head = llist_del_all(raw_hwp_list_head(folio)); + llist_for_each_entry_safe(p, next, head, node) { if (move_flag) SetPageHWPoison(p->page); else @@ -1844,7 +1880,6 @@ static unsigned long __folio_free_raw_hwp(struct folio *folio, bool move_flag) kfree(p); count++; } - llist_del_all(head); return count; } @@ -1852,7 +1887,7 @@ static int folio_set_hugetlb_hwpoison(struct folio *folio, struct page *page) { struct llist_head *head; struct raw_hwp_page *raw_hwp; - struct llist_node *t, *tnode; + struct raw_hwp_page *p; int ret = folio_test_set_hwpoison(folio) ? -EHWPOISON : 0; /* @@ -1863,9 +1898,7 @@ static int folio_set_hugetlb_hwpoison(struct folio *folio, struct page *page) if (folio_test_hugetlb_raw_hwp_unreliable(folio)) return -EHWPOISON; head = raw_hwp_list_head(folio); - llist_for_each_safe(tnode, t, head->first) { - struct raw_hwp_page *p = container_of(tnode, struct raw_hwp_page, node); - + llist_for_each_entry(p, head->first, node) { if (p->page == page) return -EHWPOISON; } @@ -1916,6 +1949,8 @@ void folio_clear_hugetlb_hwpoison(struct folio *folio) { if (folio_test_hugetlb_raw_hwp_unreliable(folio)) return; + if (folio_test_hugetlb_vmemmap_optimized(folio)) + return; folio_clear_hwpoison(folio); folio_free_raw_hwp(folio, true); } @@ -1998,18 +2033,18 @@ retry: *hugetlb = 0; return 0; } else if (res == -EHWPOISON) { - pr_err("%#lx: already hardware poisoned\n", pfn); if (flags & MF_ACTION_REQUIRED) { folio = page_folio(p); res = kill_accessing_process(current, folio_pfn(folio), flags); } + action_result(pfn, MF_MSG_ALREADY_POISONED, MF_FAILED); return res; } else if (res == -EBUSY) { if (!(flags & MF_NO_RETRY)) { flags |= MF_NO_RETRY; goto retry; } - return action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED); + return action_result(pfn, MF_MSG_GET_HWPOISON, MF_IGNORED); } folio = page_folio(p); @@ -2031,7 +2066,7 @@ retry: */ if (res == 0) { folio_unlock(folio); - if (__page_handle_poison(p) >= 0) { + if (__page_handle_poison(p) > 0) { page_ref_inc(p); res = MF_RECOVERED; } else { @@ -2040,11 +2075,11 @@ retry: return action_result(pfn, MF_MSG_FREE_HUGE, res); } - page_flags = folio->flags; + page_flags = folio->flags.f; - if (!hwpoison_user_mappings(p, pfn, flags, &folio->page)) { + if (!hwpoison_user_mappings(folio, p, pfn, flags)) { folio_unlock(folio); - return action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED); + return action_result(pfn, MF_MSG_UNMAP_FAILED, MF_FAILED); } return identify_page_state(pfn, p, page_flags); @@ -2065,14 +2100,10 @@ static inline unsigned long folio_free_raw_hwp(struct folio *folio, bool flag) /* Drop the extra refcount in case we come from madvise() */ static void put_ref_page(unsigned long pfn, int flags) { - struct page *page; - if (!(flags & MF_COUNT_INCREASED)) return; - page = pfn_to_page(pfn); - if (page) - put_page(page); + put_page(pfn_to_page(pfn)); } static int memory_failure_dev_pagemap(unsigned long pfn, int flags, @@ -2080,8 +2111,6 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags, { int rc = -ENXIO; - put_ref_page(pfn, flags); - /* device metadata space is not recoverable */ if (!pgmap_pfn_valid(pgmap, pfn)) goto out; @@ -2104,11 +2133,158 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags, out: /* drop pgmap ref acquired in caller */ put_dev_pagemap(pgmap); - action_result(pfn, MF_MSG_DAX, rc ? MF_FAILED : MF_RECOVERED); + if (rc != -EOPNOTSUPP) + action_result(pfn, MF_MSG_DAX, rc ? MF_FAILED : MF_RECOVERED); return rc; } -static DEFINE_MUTEX(mf_mutex); +/* + * The calling condition is as such: thp split failed, page might have + * been RDMA pinned, not much can be done for recovery. + * But a SIGBUS should be delivered with vaddr provided so that the user + * application has a chance to recover. Also, application processes' + * election for MCE early killed will be honored. + */ +static void kill_procs_now(struct page *p, unsigned long pfn, int flags, + struct folio *folio) +{ + LIST_HEAD(tokill); + + folio_lock(folio); + collect_procs(folio, p, &tokill, flags & MF_ACTION_REQUIRED); + folio_unlock(folio); + + kill_procs(&tokill, true, pfn, flags); +} + +int register_pfn_address_space(struct pfn_address_space *pfn_space) +{ + guard(mutex)(&pfn_space_lock); + + if (interval_tree_iter_first(&pfn_space_itree, + pfn_space->node.start, + pfn_space->node.last)) + return -EBUSY; + + interval_tree_insert(&pfn_space->node, &pfn_space_itree); + + return 0; +} +EXPORT_SYMBOL_GPL(register_pfn_address_space); + +void unregister_pfn_address_space(struct pfn_address_space *pfn_space) +{ + guard(mutex)(&pfn_space_lock); + + if (interval_tree_iter_first(&pfn_space_itree, + pfn_space->node.start, + pfn_space->node.last)) + interval_tree_remove(&pfn_space->node, &pfn_space_itree); +} +EXPORT_SYMBOL_GPL(unregister_pfn_address_space); + +static void add_to_kill_pfn(struct task_struct *tsk, + struct vm_area_struct *vma, + struct list_head *to_kill, + unsigned long pfn) +{ + struct to_kill *tk; + + tk = kmalloc(sizeof(*tk), GFP_ATOMIC); + if (!tk) { + pr_info("Unable to kill proc %d\n", tsk->pid); + return; + } + + /* Check for pgoff not backed by struct page */ + tk->addr = vma_address(vma, pfn, 1); + tk->size_shift = PAGE_SHIFT; + + if (tk->addr == -EFAULT) + pr_info("Unable to find address %lx in %s\n", + pfn, tsk->comm); + + get_task_struct(tsk); + tk->tsk = tsk; + list_add_tail(&tk->nd, to_kill); +} + +/* + * Collect processes when the error hit a PFN not backed by struct page. + */ +static void collect_procs_pfn(struct address_space *mapping, + unsigned long pfn, struct list_head *to_kill) +{ + struct vm_area_struct *vma; + struct task_struct *tsk; + + i_mmap_lock_read(mapping); + rcu_read_lock(); + for_each_process(tsk) { + struct task_struct *t = tsk; + + t = task_early_kill(tsk, true); + if (!t) + continue; + vma_interval_tree_foreach(vma, &mapping->i_mmap, pfn, pfn) { + if (vma->vm_mm == t->mm) + add_to_kill_pfn(t, vma, to_kill, pfn); + } + } + rcu_read_unlock(); + i_mmap_unlock_read(mapping); +} + +/** + * memory_failure_pfn - Handle memory failure on a page not backed by + * struct page. + * @pfn: Page Number of the corrupted page + * @flags: fine tune action taken + * + * Return: + * 0 - success, + * -EBUSY - Page PFN does not belong to any address space mapping. + */ +static int memory_failure_pfn(unsigned long pfn, int flags) +{ + struct interval_tree_node *node; + LIST_HEAD(tokill); + + scoped_guard(mutex, &pfn_space_lock) { + bool mf_handled = false; + + /* + * Modules registers with MM the address space mapping to + * the device memory they manage. Iterate to identify + * exactly which address space has mapped to this failing + * PFN. + */ + for (node = interval_tree_iter_first(&pfn_space_itree, pfn, pfn); node; + node = interval_tree_iter_next(node, pfn, pfn)) { + struct pfn_address_space *pfn_space = + container_of(node, struct pfn_address_space, node); + + collect_procs_pfn(pfn_space->mapping, pfn, &tokill); + + mf_handled = true; + } + + if (!mf_handled) + return action_result(pfn, MF_MSG_PFN_MAP, MF_IGNORED); + } + + /* + * Unlike System-RAM there is no possibility to swap in a different + * physical page at a given virtual address, so all userspace + * consumption of direct PFN memory necessitates SIGBUS (i.e. + * MF_MUST_KILL) + */ + flags |= MF_ACTION_REQUIRED | MF_MUST_KILL; + + kill_procs(&tokill, true, pfn, flags); + + return action_result(pfn, MF_MSG_PFN_MAP, MF_RECOVERED); +} /** * memory_failure - Handle memory failure of a page. @@ -2125,16 +2301,20 @@ static DEFINE_MUTEX(mf_mutex); * detected by a background scrubber) * * Must run in process context (e.g. a work queue) with interrupts - * enabled and no spinlocks hold. + * enabled and no spinlocks held. * - * Return: 0 for successfully handled the memory error, - * -EOPNOTSUPP for hwpoison_filter() filtered the error event, - * < 0(except -EOPNOTSUPP) on failure. + * Return: + * 0 - success, + * -ENXIO - memory not managed by the kernel + * -EOPNOTSUPP - hwpoison_filter() filtered the error event, + * -EHWPOISON - the page was already poisoned, potentially + * kill process, + * other negative values - failure. */ int memory_failure(unsigned long pfn, int flags) { struct page *p; - struct page *hpage; + struct folio *folio; struct dev_pagemap *pgmap; int res = 0; unsigned long page_flags; @@ -2155,8 +2335,17 @@ int memory_failure(unsigned long pfn, int flags) if (res == 0) goto unlock_mutex; + if (!pfn_valid(pfn) && !arch_is_platform_page(PFN_PHYS(pfn))) { + /* + * The PFN is not backed by struct page. + */ + res = memory_failure_pfn(pfn, flags); + goto unlock_mutex; + } + if (pfn_valid(pfn)) { - pgmap = get_dev_pagemap(pfn, NULL); + pgmap = get_dev_pagemap(pfn); + put_ref_page(pfn, flags); if (pgmap) { res = memory_failure_dev_pagemap(pfn, flags, pgmap); @@ -2174,17 +2363,15 @@ try_again: goto unlock_mutex; if (TestSetPageHWPoison(p)) { - pr_err("%#lx: already hardware poisoned\n", pfn); res = -EHWPOISON; if (flags & MF_ACTION_REQUIRED) res = kill_accessing_process(current, pfn, flags); if (flags & MF_COUNT_INCREASED) put_page(p); + action_result(pfn, MF_MSG_ALREADY_POISONED, MF_FAILED); goto unlock_mutex; } - hpage = compound_head(p); - /* * We need/can do nothing about count=0 pages. * 1) it's a free page, and therefore in safe hand: @@ -2218,18 +2405,34 @@ try_again: } goto unlock_mutex; } else if (res < 0) { - res = action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED); + res = action_result(pfn, MF_MSG_GET_HWPOISON, MF_IGNORED); goto unlock_mutex; } } - if (PageTransHuge(hpage)) { + folio = page_folio(p); + + /* filter pages that are protected from hwpoison test by users */ + folio_lock(folio); + if (hwpoison_filter(p)) { + ClearPageHWPoison(p); + folio_unlock(folio); + folio_put(folio); + res = -EOPNOTSUPP; + goto unlock_mutex; + } + folio_unlock(folio); + + if (folio_test_large(folio)) { + const int new_order = min_order_for_split(folio); + int err; + /* * The flag must be set after the refcount is bumped * otherwise it may race with THP split. * And the flag can't be set in get_hwpoison_page() since * it is called by soft offline too and it is just called - * for !MF_COUNT_INCREASE. So here seems to be the best + * for !MF_COUNT_INCREASED. So here seems to be the best * place. * * Don't need care about the above error handling paths for @@ -2237,12 +2440,25 @@ try_again: * or unhandlable page. The refcount is bumped iff the * page is a valid handlable page. */ - SetPageHasHWPoisoned(hpage); - if (try_to_split_thp_page(p) < 0) { - res = action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED); + folio_set_has_hwpoisoned(folio); + err = try_to_split_thp_page(p, new_order, /* release= */ false); + /* + * If splitting a folio to order-0 fails, kill the process. + * Split the folio regardless to minimize unusable pages. + * Because the memory failure code cannot handle large + * folios, this split is always treated as if it failed. + */ + if (err || new_order) { + /* get folio again in case the original one is split */ + folio = page_folio(p); + res = -EHWPOISON; + kill_procs_now(p, pfn, flags, folio); + put_page(p); + action_result(pfn, MF_MSG_UNSPLIT_THP, MF_FAILED); goto unlock_mutex; } VM_BUG_ON_PAGE(!page_count(p), p); + folio = page_folio(p); } /* @@ -2253,73 +2469,54 @@ try_again: * The check (unnecessarily) ignores LRU pages being isolated and * walked by the page reclaim code, however that's not a big loss. */ - shake_page(p); + shake_folio(folio); - lock_page(p); + folio_lock(folio); /* * We're only intended to deal with the non-Compound page here. - * However, the page could have changed compound pages due to - * race window. If this happens, we could try again to hopefully - * handle the page next round. + * The page cannot become compound pages again as folio has been + * splited and extra refcnt is held. */ - if (PageCompound(p)) { - if (retry) { - ClearPageHWPoison(p); - unlock_page(p); - put_page(p); - flags &= ~MF_COUNT_INCREASED; - retry = false; - goto try_again; - } - res = action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED); - goto unlock_page; - } + WARN_ON(folio_test_large(folio)); /* * We use page flags to determine what action should be taken, but * the flags can be modified by the error containment action. One * example is an mlocked page, where PG_mlocked is cleared by - * page_remove_rmap() in try_to_unmap_one(). So to determine page status - * correctly, we save a copy of the page flags at this time. + * folio_remove_rmap_*() in try_to_unmap_one(). So to determine page + * status correctly, we save a copy of the page flags at this time. */ - page_flags = p->flags; - - if (hwpoison_filter(p)) { - ClearPageHWPoison(p); - unlock_page(p); - put_page(p); - res = -EOPNOTSUPP; - goto unlock_mutex; - } + page_flags = folio->flags.f; /* - * __munlock_folio() may clear a writeback page's LRU flag without - * page_lock. We need wait writeback completion for this page or it - * may trigger vfs BUG while evict inode. + * __munlock_folio() may clear a writeback folio's LRU flag without + * the folio lock. We need to wait for writeback completion for this + * folio or it may trigger a vfs BUG while evicting inode. */ - if (!PageLRU(p) && !PageWriteback(p)) + if (!folio_test_lru(folio) && !folio_test_writeback(folio)) goto identify_page_state; /* * It's very difficult to mess with pages currently under IO * and in many cases impossible, so we just avoid it here. */ - wait_on_page_writeback(p); + folio_wait_writeback(folio); /* * Now take care of user space mappings. * Abort on fail: __filemap_remove_folio() assumes unmapped page. */ - if (!hwpoison_user_mappings(p, pfn, flags, p)) { - res = action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED); + if (!hwpoison_user_mappings(folio, p, pfn, flags)) { + res = action_result(pfn, MF_MSG_UNMAP_FAILED, MF_FAILED); goto unlock_page; } /* * Torn down by someone else? */ - if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) { + if (folio_test_lru(folio) && !folio_test_swapcache(folio) && + folio->mapping == NULL) { res = action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED); goto unlock_page; } @@ -2329,7 +2526,7 @@ identify_page_state: mutex_unlock(&mf_mutex); return res; unlock_page: - unlock_page(p); + folio_unlock(folio); unlock_mutex: mutex_unlock(&mf_mutex); return res; @@ -2347,7 +2544,7 @@ struct memory_failure_entry { struct memory_failure_cpu { DECLARE_KFIFO(fifo, struct memory_failure_entry, MEMORY_FAILURE_FIFO_SIZE); - spinlock_t lock; + raw_spinlock_t lock; struct work_struct work; }; @@ -2373,20 +2570,22 @@ void memory_failure_queue(unsigned long pfn, int flags) { struct memory_failure_cpu *mf_cpu; unsigned long proc_flags; + bool buffer_overflow; struct memory_failure_entry entry = { .pfn = pfn, .flags = flags, }; mf_cpu = &get_cpu_var(memory_failure_cpu); - spin_lock_irqsave(&mf_cpu->lock, proc_flags); - if (kfifo_put(&mf_cpu->fifo, entry)) + raw_spin_lock_irqsave(&mf_cpu->lock, proc_flags); + buffer_overflow = !kfifo_put(&mf_cpu->fifo, entry); + if (!buffer_overflow) schedule_work_on(smp_processor_id(), &mf_cpu->work); - else + raw_spin_unlock_irqrestore(&mf_cpu->lock, proc_flags); + put_cpu_var(memory_failure_cpu); + if (buffer_overflow) pr_err("buffer overflow when queuing memory failure at %#lx\n", pfn); - spin_unlock_irqrestore(&mf_cpu->lock, proc_flags); - put_cpu_var(memory_failure_cpu); } EXPORT_SYMBOL_GPL(memory_failure_queue); @@ -2399,9 +2598,9 @@ static void memory_failure_work_func(struct work_struct *work) mf_cpu = container_of(work, struct memory_failure_cpu, work); for (;;) { - spin_lock_irqsave(&mf_cpu->lock, proc_flags); + raw_spin_lock_irqsave(&mf_cpu->lock, proc_flags); gotten = kfifo_get(&mf_cpu->fifo, &entry); - spin_unlock_irqrestore(&mf_cpu->lock, proc_flags); + raw_spin_unlock_irqrestore(&mf_cpu->lock, proc_flags); if (!gotten) break; if (entry.flags & MF_SOFT_OFFLINE) @@ -2411,19 +2610,6 @@ static void memory_failure_work_func(struct work_struct *work) } } -/* - * Process memory_failure work queued on the specified CPU. - * Used to avoid return-to-userspace racing with the memory_failure workqueue. - */ -void memory_failure_queue_kick(int cpu) -{ - struct memory_failure_cpu *mf_cpu; - - mf_cpu = &per_cpu(memory_failure_cpu, cpu); - cancel_work_sync(&mf_cpu->work); - memory_failure_work_func(&mf_cpu->work); -} - static int __init memory_failure_init(void) { struct memory_failure_cpu *mf_cpu; @@ -2431,7 +2617,7 @@ static int __init memory_failure_init(void) for_each_possible_cpu(cpu) { mf_cpu = &per_cpu(memory_failure_cpu, cpu); - spin_lock_init(&mf_cpu->lock); + raw_spin_lock_init(&mf_cpu->lock); INIT_KFIFO(mf_cpu->fifo); INIT_WORK(&mf_cpu->work, memory_failure_work_func); } @@ -2443,7 +2629,7 @@ static int __init memory_failure_init(void) core_initcall(memory_failure_init); #undef pr_fmt -#define pr_fmt(fmt) "" fmt +#define pr_fmt(fmt) "Unpoison: " fmt #define unpoison_pr_info(fmt, pfn, rs) \ ({ \ if (__ratelimit(rs)) \ @@ -2466,77 +2652,83 @@ int unpoison_memory(unsigned long pfn) { struct folio *folio; struct page *p; - int ret = -EBUSY; - unsigned long count = 1; + int ret = -EBUSY, ghp; + unsigned long count; bool huge = false; static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); - if (!pfn_valid(pfn)) - return -ENXIO; - - p = pfn_to_page(pfn); + p = pfn_to_online_page(pfn); + if (!p) + return -EIO; folio = page_folio(p); mutex_lock(&mf_mutex); if (hw_memory_failure) { - unpoison_pr_info("Unpoison: Disabled after HW memory failure %#lx\n", + unpoison_pr_info("%#lx: disabled after HW memory failure\n", + pfn, &unpoison_rs); + ret = -EOPNOTSUPP; + goto unlock_mutex; + } + + if (is_huge_zero_folio(folio)) { + unpoison_pr_info("%#lx: huge zero page is not supported\n", pfn, &unpoison_rs); ret = -EOPNOTSUPP; goto unlock_mutex; } if (!PageHWPoison(p)) { - unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n", + unpoison_pr_info("%#lx: page was already unpoisoned\n", pfn, &unpoison_rs); goto unlock_mutex; } if (folio_ref_count(folio) > 1) { - unpoison_pr_info("Unpoison: Someone grabs the hwpoison page %#lx\n", + unpoison_pr_info("%#lx: someone grabs the hwpoison page\n", pfn, &unpoison_rs); goto unlock_mutex; } + if (folio_test_slab(folio) || folio_test_pgtable(folio) || + folio_test_reserved(folio) || folio_test_offline(folio)) + goto unlock_mutex; + if (folio_mapped(folio)) { - unpoison_pr_info("Unpoison: Someone maps the hwpoison page %#lx\n", + unpoison_pr_info("%#lx: someone maps the hwpoison page\n", pfn, &unpoison_rs); goto unlock_mutex; } if (folio_mapping(folio)) { - unpoison_pr_info("Unpoison: the hwpoison page has non-NULL mapping %#lx\n", + unpoison_pr_info("%#lx: the hwpoison page has non-NULL mapping\n", pfn, &unpoison_rs); goto unlock_mutex; } - if (folio_test_slab(folio) || PageTable(&folio->page) || folio_test_reserved(folio)) - goto unlock_mutex; - - ret = get_hwpoison_page(p, MF_UNPOISON); - if (!ret) { - if (PageHuge(p)) { + ghp = get_hwpoison_page(p, MF_UNPOISON); + if (!ghp) { + if (folio_test_hugetlb(folio)) { huge = true; count = folio_free_raw_hwp(folio, false); - if (count == 0) { - ret = -EBUSY; + if (count == 0) goto unlock_mutex; - } } ret = folio_test_clear_hwpoison(folio) ? 0 : -EBUSY; - } else if (ret < 0) { - if (ret == -EHWPOISON) { + } else if (ghp < 0) { + if (ghp == -EHWPOISON) { ret = put_page_back_buddy(p) ? 0 : -EBUSY; - } else - unpoison_pr_info("Unpoison: failed to grab page %#lx\n", + } else { + ret = ghp; + unpoison_pr_info("%#lx: failed to grab page\n", pfn, &unpoison_rs); + } } else { - if (PageHuge(p)) { + if (folio_test_hugetlb(folio)) { huge = true; count = folio_free_raw_hwp(folio, false); if (count == 0) { - ret = -EBUSY; folio_put(folio); goto unlock_mutex; } @@ -2554,46 +2746,15 @@ unlock_mutex: if (!ret) { if (!huge) num_poisoned_pages_sub(pfn, 1); - unpoison_pr_info("Unpoison: Software-unpoisoned page %#lx\n", + unpoison_pr_info("%#lx: software-unpoisoned page\n", page_to_pfn(p), &unpoison_rs); } return ret; } EXPORT_SYMBOL(unpoison_memory); -static bool isolate_page(struct page *page, struct list_head *pagelist) -{ - bool isolated = false; - - if (PageHuge(page)) { - isolated = isolate_hugetlb(page_folio(page), pagelist); - } else { - bool lru = !__PageMovable(page); - - if (lru) - isolated = isolate_lru_page(page); - else - isolated = isolate_movable_page(page, - ISOLATE_UNEVICTABLE); - - if (isolated) { - list_add(&page->lru, pagelist); - if (lru) - inc_node_page_state(page, NR_ISOLATED_ANON + - page_is_file_lru(page)); - } - } - - /* - * If we succeed to isolate the page, we grabbed another refcount on - * the page, so we can safely drop the one we got from get_any_pages(). - * If we failed to isolate the page, it means that we cannot go further - * and we will return an error, so drop the reference we got from - * get_any_pages() as well. - */ - put_page(page); - return isolated; -} +#undef pr_fmt +#define pr_fmt(fmt) "Soft offline: " fmt /* * soft_offline_in_use_page handles hugetlb-pages and non-hugetlb pages. @@ -2604,48 +2765,71 @@ static int soft_offline_in_use_page(struct page *page) { long ret = 0; unsigned long pfn = page_to_pfn(page); - struct page *hpage = compound_head(page); + struct folio *folio = page_folio(page); char const *msg_page[] = {"page", "hugepage"}; - bool huge = PageHuge(page); + bool huge = folio_test_hugetlb(folio); + bool isolated; LIST_HEAD(pagelist); struct migration_target_control mtc = { .nid = NUMA_NO_NODE, .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, + .reason = MR_MEMORY_FAILURE, }; - if (!huge && PageTransHuge(hpage)) { - if (try_to_split_thp_page(page)) { - pr_info("soft offline: %#lx: thp split failed\n", pfn); + if (!huge && folio_test_large(folio)) { + const int new_order = min_order_for_split(folio); + + /* + * If new_order (target split order) is not 0, do not split the + * folio at all to retain the still accessible large folio. + * NOTE: if minimizing the number of soft offline pages is + * preferred, split it to non-zero new_order like it is done in + * memory_failure(). + */ + if (new_order || try_to_split_thp_page(page, /* new_order= */ 0, + /* release= */ true)) { + pr_info("%#lx: thp split failed\n", pfn); return -EBUSY; } - hpage = page; + folio = page_folio(page); } - lock_page(page); - if (!PageHuge(page)) - wait_on_page_writeback(page); + folio_lock(folio); + if (!huge) + folio_wait_writeback(folio); if (PageHWPoison(page)) { - unlock_page(page); - put_page(page); - pr_info("soft offline: %#lx page already poisoned\n", pfn); + folio_unlock(folio); + folio_put(folio); + pr_info("%#lx: page already poisoned\n", pfn); return 0; } - if (!PageHuge(page) && PageLRU(page) && !PageSwapCache(page)) + if (!huge && folio_test_lru(folio) && !folio_test_swapcache(folio)) /* * Try to invalidate first. This should work for * non dirty unmapped page cache pages. */ - ret = invalidate_inode_page(page); - unlock_page(page); + ret = mapping_evict_folio(folio_mapping(folio), folio); + folio_unlock(folio); if (ret) { - pr_info("soft_offline: %#lx: invalidated\n", pfn); + pr_info("%#lx: invalidated\n", pfn); page_handle_poison(page, false, true); return 0; } - if (isolate_page(hpage, &pagelist)) { + isolated = isolate_folio_to_list(folio, &pagelist); + + /* + * If we succeed to isolate the folio, we grabbed another refcount on + * the folio, so we can safely drop the one we got from get_any_page(). + * If we failed to isolate the folio, it means that we cannot go further + * and we will return an error, so drop the reference we got from + * get_any_page() as well. + */ + folio_put(folio); + + if (isolated) { ret = migrate_pages(&pagelist, alloc_migration_target, NULL, (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL); if (!ret) { @@ -2657,14 +2841,14 @@ static int soft_offline_in_use_page(struct page *page) if (!list_empty(&pagelist)) putback_movable_pages(&pagelist); - pr_info("soft offline: %#lx: %s migration failed %ld, type %pGp\n", - pfn, msg_page[huge], ret, &page->flags); + pr_info("%#lx: %s migration failed %ld, type %pGp\n", + pfn, msg_page[huge], ret, &page->flags.f); if (ret > 0) ret = -EBUSY; } } else { - pr_info("soft offline: %#lx: %s isolation failed, page count %d, type %pGp\n", - pfn, msg_page[huge], page_count(page), &page->flags); + pr_info("%#lx: %s isolation failed, page count %d, type %pGp\n", + pfn, msg_page[huge], page_count(page), &page->flags.f); ret = -EBUSY; } return ret; @@ -2675,8 +2859,9 @@ static int soft_offline_in_use_page(struct page *page) * @pfn: pfn to soft-offline * @flags: flags. Same as memory_failure(). * - * Returns 0 on success - * -EOPNOTSUPP for hwpoison_filter() filtered the error event + * Returns 0 on success, + * -EOPNOTSUPP for hwpoison_filter() filtered the error event, or + * disabled by /proc/sys/vm/enable_soft_offline, * < 0 otherwise negated errno. * * Soft offline a page, by migration or invalidation, @@ -2712,10 +2897,16 @@ int soft_offline_page(unsigned long pfn, int flags) return -EIO; } + if (!sysctl_enable_soft_offline) { + pr_info_once("disabled by /proc/sys/vm/enable_soft_offline\n"); + put_ref_page(pfn, flags); + return -EOPNOTSUPP; + } + mutex_lock(&mf_mutex); if (PageHWPoison(page)) { - pr_info("%s: %#lx page already poisoned\n", __func__, pfn); + pr_info("%#lx: page already poisoned\n", pfn); put_ref_page(pfn, flags); mutex_unlock(&mf_mutex); return 0; @@ -2737,10 +2928,13 @@ retry: if (ret > 0) { ret = soft_offline_in_use_page(page); } else if (ret == 0) { - if (!page_handle_poison(page, true, false) && try_again) { - try_again = false; - flags &= ~MF_COUNT_INCREASED; - goto retry; + if (!page_handle_poison(page, true, false)) { + if (try_again) { + try_again = false; + flags &= ~MF_COUNT_INCREASED; + goto retry; + } + ret = -EBUSY; } } |
