summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorKirill Tkhai <ktkhai@virtuozzo.com>2018-03-19 18:32:10 +0300
committerTejun Heo <tj@kernel.org>2018-03-19 09:38:50 -0700
commitf52ba1fef7b92e74d58efef8eae7b6f48c6d218d (patch)
treebe3bfd5e52ea11827506db4397dcd29e9dd6a83e /mm
parent71546d100422bcc2c543dadeb9328728997cd23a (diff)
mm: Allow to kill tasks doing pcpu_alloc() and waiting for pcpu_balance_workfn()
In case of memory deficit and low percpu memory pages, pcpu_balance_workfn() takes pcpu_alloc_mutex for a long time (as it makes memory allocations itself and waits for memory reclaim). If tasks doing pcpu_alloc() are choosen by OOM killer, they can't exit, because they are waiting for the mutex. The patch makes pcpu_alloc() to care about killing signal and use mutex_lock_killable(), when it's allowed by GFP flags. This guarantees, a task does not miss SIGKILL from OOM killer. Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/percpu.c13
1 files changed, 11 insertions, 2 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index 15a398c00791..9297098519a6 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1373,8 +1373,17 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
return NULL;
}
- if (!is_atomic)
- mutex_lock(&pcpu_alloc_mutex);
+ if (!is_atomic) {
+ /*
+ * pcpu_balance_workfn() allocates memory under this mutex,
+ * and it may wait for memory reclaim. Allow current task
+ * to become OOM victim, in case of memory pressure.
+ */
+ if (gfp & __GFP_NOFAIL)
+ mutex_lock(&pcpu_alloc_mutex);
+ else if (mutex_lock_killable(&pcpu_alloc_mutex))
+ return NULL;
+ }
spin_lock_irqsave(&pcpu_lock, flags);