summaryrefslogtreecommitdiff
path: root/ipc/sem.c
diff options
context:
space:
mode:
authorVasily Averin <vvs@virtuozzo.com>2021-06-30 18:57:09 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-07-01 11:06:07 -0700
commitfc37a3b8b4388e73e8e3525556d9f1feeb232bb9 (patch)
tree4a1adfb461437e5dff7626b725c7f77ab11dbcae /ipc/sem.c
parent3b52348345b2cfe038d317de52bcdef788c6520d (diff)
ipc sem: use kvmalloc for sem_undo allocation
Patch series "ipc: allocations cleanup", v2. Some ipc objects use the wrong allocation functions: small objects can use kmalloc(), and vice versa, potentially large objects can use kmalloc(). This patch (of 2): Size of sem_undo can exceed one page and with the maximum possible nsems = 32000 it can grow up to 64Kb. Let's switch its allocation to kvmalloc to avoid user-triggered disruptive actions like OOM killer in case of high-order memory shortage. User triggerable high order allocations are quite a problem on heavily fragmented systems. They can be a DoS vector. Link: https://lkml.kernel.org/r/ebc3ac79-3190-520d-81ce-22ad194986ec@virtuozzo.com Link: https://lkml.kernel.org/r/a6354fd9-2d55-2e63-dd4d-fa7dc1d11134@virtuozzo.com Signed-off-by: Vasily Averin <vvs@virtuozzo.com> Acked-by: Michal Hocko <mhocko@suse.com> Reviewed-by: Shakeel Butt <shakeelb@google.com> Acked-by: Roman Gushchin <guro@fb.com> Cc: Alexey Dobriyan <adobriyan@gmail.com> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Dmitry Safonov <0x7f454c46@gmail.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Manfred Spraul <manfred@colorfullife.com> Cc: Vladimir Davydov <vdavydov.dev@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'ipc/sem.c')
-rw-r--r--ipc/sem.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/ipc/sem.c b/ipc/sem.c
index bf534c74293e..3a58188733d8 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -1154,7 +1154,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
un->semid = -1;
list_del_rcu(&un->list_proc);
spin_unlock(&un->ulp->lock);
- kfree_rcu(un, rcu);
+ kvfree_rcu(un, rcu);
}
/* Wake up all pending processes and let them fail with EIDRM. */
@@ -1937,7 +1937,8 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
rcu_read_unlock();
/* step 2: allocate new undo structure */
- new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
+ new = kvzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems,
+ GFP_KERNEL);
if (!new) {
ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
return ERR_PTR(-ENOMEM);
@@ -1949,7 +1950,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
if (!ipc_valid_object(&sma->sem_perm)) {
sem_unlock(sma, -1);
rcu_read_unlock();
- kfree(new);
+ kvfree(new);
un = ERR_PTR(-EIDRM);
goto out;
}
@@ -1960,7 +1961,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
*/
un = lookup_undo(ulp, semid);
if (un) {
- kfree(new);
+ kvfree(new);
goto success;
}
/* step 5: initialize & link new undo structure */
@@ -2420,7 +2421,7 @@ void exit_sem(struct task_struct *tsk)
rcu_read_unlock();
wake_up_q(&wake_q);
- kfree_rcu(un, rcu);
+ kvfree_rcu(un, rcu);
}
kfree(ulp);
}