summaryrefslogtreecommitdiff
path: root/mm/memory-failure.c
diff options
context:
space:
mode:
authorLonglong Xia <xialonglong1@huawei.com>2023-04-14 10:17:41 +0800
committerAndrew Morton <akpm@linux-foundation.org>2023-04-18 16:53:52 -0700
commit4248d0083ec5817eebfb916c54950d100b3468ee (patch)
treeb0f64bae03987593a20444657b0afd07471bf6b2 /mm/memory-failure.c
parent4f775086a6eee07c6ae4be4734d736e13b537351 (diff)
mm: ksm: support hwpoison for ksm page
hwpoison_user_mappings() is updated to support ksm pages, and add collect_procs_ksm() to collect processes when the error hit an ksm page. The difference from collect_procs_anon() is that it also needs to traverse the rmap-item list on the stable node of the ksm page. At the same time, add_to_kill_ksm() is added to handle ksm pages. And task_in_to_kill_list() is added to avoid duplicate addition of tsk to the to_kill list. This is because when scanning the list, if the pages that make up the ksm page all come from the same process, they may be added repeatedly. Link: https://lkml.kernel.org/r/20230414021741.2597273-3-xialonglong1@huawei.com Signed-off-by: Longlong Xia <xialonglong1@huawei.com> Tested-by: Naoya Horiguchi <naoya.horiguchi@nec.com> Reviewed-by: Kefeng Wang <wangkefeng.wang@huawei.com> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Nanyong Sun <sunnanyong@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/memory-failure.c')
-rw-r--r--mm/memory-failure.c34
1 files changed, 25 insertions, 9 deletions
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index bb09999108ef..f69c410fb9f9 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -455,6 +455,27 @@ static void add_to_kill_anon_file(struct task_struct *tsk, struct page *p,
__add_to_kill(tsk, p, vma, to_kill, 0, FSDAX_INVALID_PGOFF);
}
+#ifdef CONFIG_KSM
+static bool task_in_to_kill_list(struct list_head *to_kill,
+ struct task_struct *tsk)
+{
+ struct to_kill *tk, *next;
+
+ list_for_each_entry_safe(tk, next, to_kill, nd) {
+ if (tk->tsk == tsk)
+ return true;
+ }
+
+ return false;
+}
+void add_to_kill_ksm(struct task_struct *tsk, struct page *p,
+ struct vm_area_struct *vma, struct list_head *to_kill,
+ unsigned long ksm_addr)
+{
+ if (!task_in_to_kill_list(to_kill, tsk))
+ __add_to_kill(tsk, p, vma, to_kill, ksm_addr, FSDAX_INVALID_PGOFF);
+}
+#endif
/*
* Kill the processes that have been collected earlier.
*
@@ -534,8 +555,7 @@ static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
* processes sharing the same error page,if the process is "early kill", the
* task_struct of the dedicated thread will also be returned.
*/
-static struct task_struct *task_early_kill(struct task_struct *tsk,
- int force_early)
+struct task_struct *task_early_kill(struct task_struct *tsk, int force_early)
{
if (!tsk->mm)
return NULL;
@@ -666,8 +686,9 @@ static void collect_procs(struct page *page, struct list_head *tokill,
{
if (!page->mapping)
return;
-
- if (PageAnon(page))
+ if (unlikely(PageKsm(page)))
+ collect_procs_ksm(page, tokill, force_early);
+ else if (PageAnon(page))
collect_procs_anon(page, tokill, force_early);
else
collect_procs_file(page, tokill, force_early);
@@ -1522,11 +1543,6 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
if (!page_mapped(hpage))
return true;
- if (PageKsm(p)) {
- pr_err("%#lx: can't handle KSM pages.\n", pfn);
- return false;
- }
-
if (PageSwapCache(p)) {
pr_err("%#lx: keeping poisoned page in swap cache\n", pfn);
ttu &= ~TTU_HWPOISON;