summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.com>2018-04-10 16:29:30 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-11 10:28:31 -0700
commit2a70f6a76bb86d1f39a34b7764f6dcc4257b0356 (patch)
treeb925efc4a6083d565eb06bf2289c6b4646e4b133
parent07707125aec6a7529900616ba491210ec3d85fc6 (diff)
memcg, thp: do not invoke oom killer on thp charges
A THP memcg charge can trigger the oom killer since 2516035499b9 ("mm, thp: remove __GFP_NORETRY from khugepaged and madvised allocations"). We have used an explicit __GFP_NORETRY previously which ruled the OOM killer automagically. Memcg charge path should be semantically compliant with the allocation path and that means that if we do not trigger the OOM killer for costly orders which should do the same in the memcg charge path as well. Otherwise we are forcing callers to distinguish the two and use different gfp masks which is both non-intuitive and bug prone. As soon as we get a costly high order kmalloc user we even do not have any means to tell the memcg specific gfp mask to prevent from OOM because the charging is deep within guts of the slab allocator. The unexpected memcg OOM on THP has already been fixed upstream by 9d3c3354bb85 ("mm, thp: do not cause memcg oom for thp") but this is a one-off fix rather than a generic solution. Teach mem_cgroup_oom to bail out on costly order requests to fix the THP issue as well as any other costly OOM eligible allocations to be added in future. Also revert 9d3c3354bb85 because special gfp for THP is no longer needed. Link: http://lkml.kernel.org/r/20180403193129.22146-1-mhocko@kernel.org Fixes: 2516035499b9 ("mm, thp: remove __GFP_NORETRY from khugepaged and madvised allocations") Signed-off-by: Michal Hocko <mhocko@suse.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/huge_memory.c5
-rw-r--r--mm/khugepaged.c8
-rw-r--r--mm/memcontrol.c2
3 files changed, 5 insertions, 10 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index f0ae8d1d4329..229ab8c75a6b 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -555,8 +555,7 @@ static int __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page,
VM_BUG_ON_PAGE(!PageCompound(page), page);
- if (mem_cgroup_try_charge(page, vma->vm_mm, gfp | __GFP_NORETRY, &memcg,
- true)) {
+ if (mem_cgroup_try_charge(page, vma->vm_mm, gfp, &memcg, true)) {
put_page(page);
count_vm_event(THP_FAULT_FALLBACK);
return VM_FAULT_FALLBACK;
@@ -1317,7 +1316,7 @@ alloc:
}
if (unlikely(mem_cgroup_try_charge(new_page, vma->vm_mm,
- huge_gfp | __GFP_NORETRY, &memcg, true))) {
+ huge_gfp, &memcg, true))) {
put_page(new_page);
split_huge_pmd(vma, vmf->pmd, vmf->address);
if (page)
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index e42568284e06..c15da1ea7e63 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -965,9 +965,7 @@ static void collapse_huge_page(struct mm_struct *mm,
goto out_nolock;
}
- /* Do not oom kill for khugepaged charges */
- if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp | __GFP_NORETRY,
- &memcg, true))) {
+ if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
result = SCAN_CGROUP_CHARGE_FAIL;
goto out_nolock;
}
@@ -1326,9 +1324,7 @@ static void collapse_shmem(struct mm_struct *mm,
goto out;
}
- /* Do not oom kill for khugepaged charges */
- if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp | __GFP_NORETRY,
- &memcg, true))) {
+ if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
result = SCAN_CGROUP_CHARGE_FAIL;
goto out;
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9ec024b862ac..6b4f5c0a8eef 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1485,7 +1485,7 @@ static void memcg_oom_recover(struct mem_cgroup *memcg)
static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
{
- if (!current->memcg_may_oom)
+ if (!current->memcg_may_oom || order > PAGE_ALLOC_COSTLY_ORDER)
return;
/*
* We are in the middle of the charge context here, so we