summaryrefslogtreecommitdiff
path: root/ipc/sem.c
diff options
context:
space:
mode:
Diffstat (limited to 'ipc/sem.c')
-rw-r--r--ipc/sem.c787
1 files changed, 529 insertions, 258 deletions
diff --git a/ipc/sem.c b/ipc/sem.c
index 38371e93bfa5..0f06e4bd4673 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* linux/ipc/sem.c
* Copyright (C) 1992 Krishna Balasubramanian
@@ -35,7 +36,7 @@
* - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
* - undo adjustments at process exit are limited to 0..SEMVMX.
* - namespace are supported.
- * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
+ * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtime by writing
* to /proc/sys/kernel/sem.
* - statistics about the usage are reported in /proc/sysvipc/sem.
*
@@ -69,6 +70,7 @@
* The worst-case behavior is nevertheless O(N^2) for N wakeups.
*/
+#include <linux/compat.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/init.h>
@@ -83,17 +85,53 @@
#include <linux/nsproxy.h>
#include <linux/ipc_namespace.h>
#include <linux/sched/wake_q.h>
+#include <linux/nospec.h>
+#include <linux/rhashtable.h>
#include <linux/uaccess.h>
#include "util.h"
+/* One semaphore structure for each semaphore in the system. */
+struct sem {
+ int semval; /* current value */
+ /*
+ * PID of the process that last modified the semaphore. For
+ * Linux, specifically these are:
+ * - semop
+ * - semctl, via SETVAL and SETALL.
+ * - at task exit when performing undo adjustments (see exit_sem).
+ */
+ struct pid *sempid;
+ spinlock_t lock; /* spinlock for fine-grained semtimedop */
+ struct list_head pending_alter; /* pending single-sop operations */
+ /* that alter the semaphore */
+ struct list_head pending_const; /* pending single-sop operations */
+ /* that do not alter the semaphore*/
+ time64_t sem_otime; /* candidate for sem_otime */
+} ____cacheline_aligned_in_smp;
+
+/* One sem_array data structure for each set of semaphores in the system. */
+struct sem_array {
+ struct kern_ipc_perm sem_perm; /* permissions .. see ipc.h */
+ time64_t sem_ctime; /* create/last semctl() time */
+ struct list_head pending_alter; /* pending operations */
+ /* that alter the array */
+ struct list_head pending_const; /* pending complex operations */
+ /* that do not alter semvals */
+ struct list_head list_id; /* undo requests on this array */
+ int sem_nsems; /* no. of semaphores in array */
+ int complex_count; /* pending complex operations */
+ unsigned int use_global_lock;/* >0: global lock required */
+
+ struct sem sems[];
+} __randomize_layout;
/* One queue for each sleeping process in the system. */
struct sem_queue {
struct list_head list; /* queue of pending operations */
struct task_struct *sleeper; /* this process */
struct sem_undo *undo; /* undo structure */
- int pid; /* process id of requesting process */
+ struct pid *pid; /* process id of requesting process */
int status; /* completion status of operation */
struct sembuf *sops; /* array of pending operations */
struct sembuf *blocking; /* the operation that blocked */
@@ -114,7 +152,7 @@ struct sem_undo {
struct list_head list_id; /* per semaphore array list:
* all undos for one array */
int semid; /* semaphore set identifier */
- short *semadj; /* array of adjustments */
+ short semadj[]; /* array of adjustments */
/* one per semaphore */
};
@@ -122,7 +160,7 @@ struct sem_undo {
* that may be shared among all a CLONE_SYSVSEM task group.
*/
struct sem_undo_list {
- atomic_t refcnt;
+ refcount_t refcnt;
spinlock_t lock;
struct list_head list_proc;
};
@@ -130,8 +168,6 @@ struct sem_undo_list {
#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
-#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
-
static int newary(struct ipc_namespace *, struct ipc_params *);
static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
#ifdef CONFIG_PROC_FS
@@ -169,15 +205,40 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
*
* Memory ordering:
* Most ordering is enforced by using spin_lock() and spin_unlock().
- * The special case is use_global_lock:
+ *
+ * Exceptions:
+ * 1) use_global_lock: (SEM_BARRIER_1)
* Setting it from non-zero to 0 is a RELEASE, this is ensured by
- * using smp_store_release().
+ * using smp_store_release(): Immediately after setting it to 0,
+ * a simple op can start.
* Testing if it is non-zero is an ACQUIRE, this is ensured by using
* smp_load_acquire().
* Setting it from 0 to non-zero must be ordered with regards to
* this smp_load_acquire(), this is guaranteed because the smp_load_acquire()
* is inside a spin_lock() and after a write from 0 to non-zero a
* spin_lock()+spin_unlock() is done.
+ * To prevent the compiler/cpu temporarily writing 0 to use_global_lock,
+ * READ_ONCE()/WRITE_ONCE() is used.
+ *
+ * 2) queue.status: (SEM_BARRIER_2)
+ * Initialization is done while holding sem_lock(), so no further barrier is
+ * required.
+ * Setting it to a result code is a RELEASE, this is ensured by both a
+ * smp_store_release() (for case a) and while holding sem_lock()
+ * (for case b).
+ * The ACQUIRE when reading the result code without holding sem_lock() is
+ * achieved by using READ_ONCE() + smp_acquire__after_ctrl_dep().
+ * (case a above).
+ * Reading the result code while holding sem_lock() needs no further barriers,
+ * the locks inside sem_lock() enforce ordering (case b above)
+ *
+ * 3) current->state:
+ * current->state is set to TASK_INTERRUPTIBLE while holding sem_lock().
+ * The wakeup is handled using the wake_q infrastructure. wake_q wakeups may
+ * happen immediately after calling wake_q_add. As wake_q_add_safe() is called
+ * when holding sem_lock(), no further barriers are required.
+ *
+ * See also ipc/mqueue.c for more details on the covered races.
*/
#define sc_semmsl sem_ctls[0]
@@ -200,6 +261,7 @@ void sem_exit_ns(struct ipc_namespace *ns)
{
free_ipcs(ns, &sem_ids(ns), freeary);
idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
+ rhashtable_destroy(&ns->ids[IPC_SEM_IDS].key_ht);
}
#endif
@@ -263,7 +325,7 @@ static void sem_rcu_free(struct rcu_head *head)
struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu);
struct sem_array *sma = container_of(p, struct sem_array, sem_perm);
- security_sem_free(sma);
+ security_sem_free(&sma->sem_perm);
kvfree(sma);
}
@@ -282,10 +344,10 @@ static void complexmode_enter(struct sem_array *sma)
* Nothing to do, just reset the
* counter until we return to simple mode.
*/
- sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
+ WRITE_ONCE(sma->use_global_lock, USE_GLOBAL_LOCK_HYSTERESIS);
return;
}
- sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
+ WRITE_ONCE(sma->use_global_lock, USE_GLOBAL_LOCK_HYSTERESIS);
for (i = 0; i < sma->sem_nsems; i++) {
sem = &sma->sems[i];
@@ -307,15 +369,12 @@ static void complexmode_tryleave(struct sem_array *sma)
return;
}
if (sma->use_global_lock == 1) {
- /*
- * Immediately after setting use_global_lock to 0,
- * a simple op can start. Thus: all memory writes
- * performed by the current operation must be visible
- * before we set use_global_lock to 0.
- */
+
+ /* See SEM_BARRIER_1 for purpose/pairing */
smp_store_release(&sma->use_global_lock, 0);
} else {
- sma->use_global_lock--;
+ WRITE_ONCE(sma->use_global_lock,
+ sma->use_global_lock-1);
}
}
@@ -331,6 +390,7 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
int nsops)
{
struct sem *sem;
+ int idx;
if (nsops != 1) {
/* Complex operation - acquire a full lock */
@@ -348,20 +408,21 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
*
* Both facts are tracked by use_global_mode.
*/
- sem = &sma->sems[sops->sem_num];
+ idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
+ sem = &sma->sems[idx];
/*
* Initial check for use_global_lock. Just an optimization,
* no locking, no memory barrier.
*/
- if (!sma->use_global_lock) {
+ if (!READ_ONCE(sma->use_global_lock)) {
/*
* It appears that no complex operation is around.
* Acquire the per-semaphore lock.
*/
spin_lock(&sem->lock);
- /* pairs with smp_store_release() */
+ /* see SEM_BARRIER_1 for purpose/pairing */
if (!smp_load_acquire(&sma->use_global_lock)) {
/* fast path successful! */
return sops->sem_num;
@@ -449,18 +510,14 @@ static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
static struct sem_array *sem_alloc(size_t nsems)
{
struct sem_array *sma;
- size_t size;
if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0]))
return NULL;
- size = sizeof(*sma) + nsems * sizeof(sma->sems[0]);
- sma = kvmalloc(size, GFP_KERNEL);
+ sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL_ACCOUNT);
if (unlikely(!sma))
return NULL;
- memset(sma, 0, size);
-
return sma;
}
@@ -493,7 +550,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
sma->sem_perm.key = key;
sma->sem_perm.security = NULL;
- retval = security_sem_alloc(sma);
+ retval = security_sem_alloc(&sma->sem_perm);
if (retval) {
kvfree(sma);
return retval;
@@ -511,11 +568,12 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
INIT_LIST_HEAD(&sma->pending_const);
INIT_LIST_HEAD(&sma->list_id);
sma->sem_nsems = nsems;
- sma->sem_ctime = get_seconds();
+ sma->sem_ctime = ktime_get_real_seconds();
+ /* ipc_addid() locks sma upon success. */
retval = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
if (retval < 0) {
- call_rcu(&sma->sem_perm.rcu, sem_rcu_free);
+ ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
return retval;
}
ns->used_sems += nsems;
@@ -530,19 +588,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
/*
* Called with sem_ids.rwsem and ipcp locked.
*/
-static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
-{
- struct sem_array *sma;
-
- sma = container_of(ipcp, struct sem_array, sem_perm);
- return security_sem_associate(sma, semflg);
-}
-
-/*
- * Called with sem_ids.rwsem and ipcp locked.
- */
-static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
- struct ipc_params *params)
+static int sem_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params)
{
struct sem_array *sma;
@@ -553,12 +599,12 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
return 0;
}
-SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
+long ksys_semget(key_t key, int nsems, int semflg)
{
struct ipc_namespace *ns;
static const struct ipc_ops sem_ops = {
.getnew = newary,
- .associate = sem_security,
+ .associate = security_sem_associate,
.more_checks = sem_more_checks,
};
struct ipc_params sem_params;
@@ -575,6 +621,11 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
}
+SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
+{
+ return ksys_semget(key, nsems, semflg);
+}
+
/**
* perform_atomic_semop[_slow] - Attempt to perform semaphore
* operations on a given array.
@@ -594,7 +645,8 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
*/
static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
{
- int result, sem_op, nsops, pid;
+ int result, sem_op, nsops;
+ struct pid *pid;
struct sembuf *sop;
struct sem *curr;
struct sembuf *sops;
@@ -605,7 +657,8 @@ static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
un = q->undo;
for (sop = sops; sop < sops + nsops; sop++) {
- curr = &sma->sems[sop->sem_num];
+ int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
+ curr = &sma->sems[idx];
sem_op = sop->sem_op;
result = curr->semval;
@@ -632,7 +685,7 @@ static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
sop--;
pid = q->pid;
while (sop >= sops) {
- sma->sems[sop->sem_num].sempid = pid;
+ ipc_update_pid(&sma->sems[sop->sem_num].sempid, pid);
sop--;
}
@@ -685,7 +738,9 @@ static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
* until the operations can go through.
*/
for (sop = sops; sop < sops + nsops; sop++) {
- curr = &sma->sems[sop->sem_num];
+ int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
+
+ curr = &sma->sems[idx];
sem_op = sop->sem_op;
result = curr->semval;
@@ -711,7 +766,6 @@ static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
for (sop = sops; sop < sops + nsops; sop++) {
curr = &sma->sems[sop->sem_num];
sem_op = sop->sem_op;
- result = curr->semval;
if (sop->sem_flg & SEM_UNDO) {
int undo = un->semadj[sop->sem_num] - sem_op;
@@ -719,7 +773,7 @@ static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
un->semadj[sop->sem_num] = undo;
}
curr->semval += sem_op;
- curr->sempid = q->pid;
+ ipc_update_pid(&curr->sempid, q->pid);
}
return 0;
@@ -732,15 +786,14 @@ would_block:
static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
struct wake_q_head *wake_q)
{
- wake_q_add(wake_q, q->sleeper);
- /*
- * Rely on the above implicit barrier, such that we can
- * ensure that we hold reference to the task before setting
- * q->status. Otherwise we could race with do_exit if the
- * task is awoken by an external event before calling
- * wake_up_process().
- */
- WRITE_ONCE(q->status, error);
+ struct task_struct *sleeper;
+
+ sleeper = get_task_struct(q->sleeper);
+
+ /* see SEM_BARRIER_2 for purpose/pairing */
+ smp_store_release(&q->status, error);
+
+ wake_q_add_safe(wake_q, sleeper);
}
static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
@@ -772,7 +825,7 @@ static inline int check_restart(struct sem_array *sma, struct sem_queue *q)
/* It is impossible that someone waits for the new value:
* - complex operations always restart.
- * - wait-for-zero are handled seperately.
+ * - wait-for-zero are handled separately.
* - q is a previously sleeping simple operation that
* altered the array. It must be a decrement, because
* simple increments never sleep.
@@ -952,10 +1005,10 @@ again:
static void set_semotime(struct sem_array *sma, struct sembuf *sops)
{
if (sops == NULL) {
- sma->sems[0].sem_otime = get_seconds();
+ sma->sems[0].sem_otime = ktime_get_real_seconds();
} else {
sma->sems[sops[0].sem_num].sem_otime =
- get_seconds();
+ ktime_get_real_seconds();
}
}
@@ -997,7 +1050,7 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop
* - No complex ops, thus all sleeping ops are
* decrease.
* - if we decreased the value, then any sleeping
- * semaphore ops wont be able to run: If the
+ * semaphore ops won't be able to run: If the
* previous value was too small, then the new
* value will be too small, too.
*/
@@ -1103,7 +1156,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
un->semid = -1;
list_del_rcu(&un->list_proc);
spin_unlock(&un->ulp->lock);
- kfree_rcu(un, rcu);
+ kvfree_rcu(un, rcu);
}
/* Wake up all pending processes and let them fail with EIDRM. */
@@ -1126,6 +1179,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
unlink_queue(sma, q);
wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
}
+ ipc_update_pid(&sem->sempid, NULL);
}
/* Remove the semaphore set from the IDR */
@@ -1162,14 +1216,14 @@ static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in,
}
}
-static time_t get_semotime(struct sem_array *sma)
+static time64_t get_semotime(struct sem_array *sma)
{
int i;
- time_t res;
+ time64_t res;
res = sma->sems[0].sem_otime;
for (i = 1; i < sma->sem_nsems; i++) {
- time_t to = sma->sems[i].sem_otime;
+ time64_t to = sma->sems[i].sem_otime;
if (to > res)
res = to;
@@ -1177,112 +1231,124 @@ static time_t get_semotime(struct sem_array *sma)
return res;
}
-static int semctl_nolock(struct ipc_namespace *ns, int semid,
- int cmd, int version, void __user *p)
+static int semctl_stat(struct ipc_namespace *ns, int semid,
+ int cmd, struct semid64_ds *semid64)
{
- int err;
struct sem_array *sma;
+ time64_t semotime;
+ int err;
- switch (cmd) {
- case IPC_INFO:
- case SEM_INFO:
- {
- struct seminfo seminfo;
- int max_id;
-
- err = security_sem_semctl(NULL, cmd);
- if (err)
- return err;
+ memset(semid64, 0, sizeof(*semid64));
- memset(&seminfo, 0, sizeof(seminfo));
- seminfo.semmni = ns->sc_semmni;
- seminfo.semmns = ns->sc_semmns;
- seminfo.semmsl = ns->sc_semmsl;
- seminfo.semopm = ns->sc_semopm;
- seminfo.semvmx = SEMVMX;
- seminfo.semmnu = SEMMNU;
- seminfo.semmap = SEMMAP;
- seminfo.semume = SEMUME;
- down_read(&sem_ids(ns).rwsem);
- if (cmd == SEM_INFO) {
- seminfo.semusz = sem_ids(ns).in_use;
- seminfo.semaem = ns->used_sems;
- } else {
- seminfo.semusz = SEMUSZ;
- seminfo.semaem = SEMAEM;
+ rcu_read_lock();
+ if (cmd == SEM_STAT || cmd == SEM_STAT_ANY) {
+ sma = sem_obtain_object(ns, semid);
+ if (IS_ERR(sma)) {
+ err = PTR_ERR(sma);
+ goto out_unlock;
}
- max_id = ipc_get_maxid(&sem_ids(ns));
- up_read(&sem_ids(ns).rwsem);
- if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
- return -EFAULT;
- return (max_id < 0) ? 0 : max_id;
- }
- case IPC_STAT:
- case SEM_STAT:
- {
- struct semid64_ds tbuf;
- int id = 0;
-
- memset(&tbuf, 0, sizeof(tbuf));
-
- rcu_read_lock();
- if (cmd == SEM_STAT) {
- sma = sem_obtain_object(ns, semid);
- if (IS_ERR(sma)) {
- err = PTR_ERR(sma);
- goto out_unlock;
- }
- id = sma->sem_perm.id;
- } else {
- sma = sem_obtain_object_check(ns, semid);
- if (IS_ERR(sma)) {
- err = PTR_ERR(sma);
- goto out_unlock;
- }
+ } else { /* IPC_STAT */
+ sma = sem_obtain_object_check(ns, semid);
+ if (IS_ERR(sma)) {
+ err = PTR_ERR(sma);
+ goto out_unlock;
}
+ }
+ /* see comment for SHM_STAT_ANY */
+ if (cmd == SEM_STAT_ANY)
+ audit_ipc_obj(&sma->sem_perm);
+ else {
err = -EACCES;
if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
goto out_unlock;
+ }
- err = security_sem_semctl(sma, cmd);
- if (err)
- goto out_unlock;
+ err = security_sem_semctl(&sma->sem_perm, cmd);
+ if (err)
+ goto out_unlock;
- kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm);
- tbuf.sem_otime = get_semotime(sma);
- tbuf.sem_ctime = sma->sem_ctime;
- tbuf.sem_nsems = sma->sem_nsems;
- rcu_read_unlock();
- if (copy_semid_to_user(p, &tbuf, version))
- return -EFAULT;
- return id;
+ ipc_lock_object(&sma->sem_perm);
+
+ if (!ipc_valid_object(&sma->sem_perm)) {
+ ipc_unlock_object(&sma->sem_perm);
+ err = -EIDRM;
+ goto out_unlock;
}
- default:
- return -EINVAL;
+
+ kernel_to_ipc64_perm(&sma->sem_perm, &semid64->sem_perm);
+ semotime = get_semotime(sma);
+ semid64->sem_otime = semotime;
+ semid64->sem_ctime = sma->sem_ctime;
+#ifndef CONFIG_64BIT
+ semid64->sem_otime_high = semotime >> 32;
+ semid64->sem_ctime_high = sma->sem_ctime >> 32;
+#endif
+ semid64->sem_nsems = sma->sem_nsems;
+
+ if (cmd == IPC_STAT) {
+ /*
+ * As defined in SUS:
+ * Return 0 on success
+ */
+ err = 0;
+ } else {
+ /*
+ * SEM_STAT and SEM_STAT_ANY (both Linux specific)
+ * Return the full id, including the sequence number
+ */
+ err = sma->sem_perm.id;
}
+ ipc_unlock_object(&sma->sem_perm);
out_unlock:
rcu_read_unlock();
return err;
}
+static int semctl_info(struct ipc_namespace *ns, int semid,
+ int cmd, void __user *p)
+{
+ struct seminfo seminfo;
+ int max_idx;
+ int err;
+
+ err = security_sem_semctl(NULL, cmd);
+ if (err)
+ return err;
+
+ memset(&seminfo, 0, sizeof(seminfo));
+ seminfo.semmni = ns->sc_semmni;
+ seminfo.semmns = ns->sc_semmns;
+ seminfo.semmsl = ns->sc_semmsl;
+ seminfo.semopm = ns->sc_semopm;
+ seminfo.semvmx = SEMVMX;
+ seminfo.semmnu = SEMMNU;
+ seminfo.semmap = SEMMAP;
+ seminfo.semume = SEMUME;
+ down_read(&sem_ids(ns).rwsem);
+ if (cmd == SEM_INFO) {
+ seminfo.semusz = sem_ids(ns).in_use;
+ seminfo.semaem = ns->used_sems;
+ } else {
+ seminfo.semusz = SEMUSZ;
+ seminfo.semaem = SEMAEM;
+ }
+ max_idx = ipc_get_maxidx(&sem_ids(ns));
+ up_read(&sem_ids(ns).rwsem);
+ if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
+ return -EFAULT;
+ return (max_idx < 0) ? 0 : max_idx;
+}
+
static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
- unsigned long arg)
+ int val)
{
struct sem_undo *un;
struct sem_array *sma;
struct sem *curr;
- int err, val;
+ int err;
DEFINE_WAKE_Q(wake_q);
-#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
- /* big-endian 64bit */
- val = arg >> 32;
-#else
- /* 32bit or little-endian 64bit */
- val = arg;
-#endif
-
if (val > SEMVMX || val < 0)
return -ERANGE;
@@ -1304,7 +1370,7 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
return -EACCES;
}
- err = security_sem_semctl(sma, SETVAL);
+ err = security_sem_semctl(&sma->sem_perm, SETVAL);
if (err) {
rcu_read_unlock();
return -EACCES;
@@ -1318,6 +1384,7 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
return -EIDRM;
}
+ semnum = array_index_nospec(semnum, sma->sem_nsems);
curr = &sma->sems[semnum];
ipc_assert_locked_object(&sma->sem_perm);
@@ -1325,8 +1392,8 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
un->semadj[semnum] = 0;
curr->semval = val;
- curr->sempid = task_tgid_vnr(current);
- sma->sem_ctime = get_seconds();
+ ipc_update_pid(&curr->sempid, task_tgid(current));
+ sma->sem_ctime = ktime_get_real_seconds();
/* maybe some queued-up processes were waiting for this */
do_smart_update(sma, NULL, 0, 0, &wake_q);
sem_unlock(sma, -1);
@@ -1358,11 +1425,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
goto out_rcu_wakeup;
- err = security_sem_semctl(sma, cmd);
+ err = security_sem_semctl(&sma->sem_perm, cmd);
if (err)
goto out_rcu_wakeup;
- err = -EACCES;
switch (cmd) {
case GETALL:
{
@@ -1446,7 +1512,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
for (i = 0; i < nsems; i++) {
sma->sems[i].semval = sem_io[i];
- sma->sems[i].sempid = task_tgid_vnr(current);
+ ipc_update_pid(&sma->sems[i].sempid, task_tgid(current));
}
ipc_assert_locked_object(&sma->sem_perm);
@@ -1454,7 +1520,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
for (i = 0; i < nsems; i++)
un->semadj[i] = 0;
}
- sma->sem_ctime = get_seconds();
+ sma->sem_ctime = ktime_get_real_seconds();
/* maybe some queued-up processes were waiting for this */
do_smart_update(sma, NULL, 0, 0, &wake_q);
err = 0;
@@ -1471,6 +1537,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
err = -EIDRM;
goto out_unlock;
}
+
+ semnum = array_index_nospec(semnum, nsems);
curr = &sma->sems[semnum];
switch (cmd) {
@@ -1478,7 +1546,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
err = curr->semval;
goto out_unlock;
case GETPID:
- err = curr->sempid;
+ err = pid_vnr(curr->sempid);
goto out_unlock;
case GETNCNT:
err = count_semcnt(sma, semnum, 0);
@@ -1531,23 +1599,17 @@ copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
* NOTE: no locks must be held, the rwsem is taken inside this function.
*/
static int semctl_down(struct ipc_namespace *ns, int semid,
- int cmd, int version, void __user *p)
+ int cmd, struct semid64_ds *semid64)
{
struct sem_array *sma;
int err;
- struct semid64_ds semid64;
struct kern_ipc_perm *ipcp;
- if (cmd == IPC_SET) {
- if (copy_semid_from_user(&semid64, p, version))
- return -EFAULT;
- }
-
down_write(&sem_ids(ns).rwsem);
rcu_read_lock();
- ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd,
- &semid64.sem_perm, 0);
+ ipcp = ipcctl_obtain_check(ns, &sem_ids(ns), semid, cmd,
+ &semid64->sem_perm, 0);
if (IS_ERR(ipcp)) {
err = PTR_ERR(ipcp);
goto out_unlock1;
@@ -1555,7 +1617,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
sma = container_of(ipcp, struct sem_array, sem_perm);
- err = security_sem_semctl(sma, cmd);
+ err = security_sem_semctl(&sma->sem_perm, cmd);
if (err)
goto out_unlock1;
@@ -1567,10 +1629,10 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
goto out_up;
case IPC_SET:
sem_lock(sma, NULL, -1);
- err = ipc_update_perm(&semid64.sem_perm, ipcp);
+ err = ipc_update_perm(&semid64->sem_perm, ipcp);
if (err)
goto out_unlock0;
- sma->sem_ctime = get_seconds();
+ sma->sem_ctime = ktime_get_real_seconds();
break;
default:
err = -EINVAL;
@@ -1586,24 +1648,31 @@ out_up:
return err;
}
-SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
+static long ksys_semctl(int semid, int semnum, int cmd, unsigned long arg, int version)
{
- int version;
struct ipc_namespace *ns;
void __user *p = (void __user *)arg;
+ struct semid64_ds semid64;
+ int err;
if (semid < 0)
return -EINVAL;
- version = ipc_parse_version(&cmd);
ns = current->nsproxy->ipc_ns;
switch (cmd) {
case IPC_INFO:
case SEM_INFO:
+ return semctl_info(ns, semid, cmd, p);
case IPC_STAT:
case SEM_STAT:
- return semctl_nolock(ns, semid, cmd, version, p);
+ case SEM_STAT_ANY:
+ err = semctl_stat(ns, semid, cmd, &semid64);
+ if (err < 0)
+ return err;
+ if (copy_semid_to_user(p, &semid64, version))
+ err = -EFAULT;
+ return err;
case GETALL:
case GETVAL:
case GETPID:
@@ -1611,16 +1680,162 @@ SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
case GETZCNT:
case SETALL:
return semctl_main(ns, semid, semnum, cmd, p);
+ case SETVAL: {
+ int val;
+#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
+ /* big-endian 64bit */
+ val = arg >> 32;
+#else
+ /* 32bit or little-endian 64bit */
+ val = arg;
+#endif
+ return semctl_setval(ns, semid, semnum, val);
+ }
+ case IPC_SET:
+ if (copy_semid_from_user(&semid64, p, version))
+ return -EFAULT;
+ fallthrough;
+ case IPC_RMID:
+ return semctl_down(ns, semid, cmd, &semid64);
+ default:
+ return -EINVAL;
+ }
+}
+
+SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
+{
+ return ksys_semctl(semid, semnum, cmd, arg, IPC_64);
+}
+
+#ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
+long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg)
+{
+ int version = ipc_parse_version(&cmd);
+
+ return ksys_semctl(semid, semnum, cmd, arg, version);
+}
+
+SYSCALL_DEFINE4(old_semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
+{
+ return ksys_old_semctl(semid, semnum, cmd, arg);
+}
+#endif
+
+#ifdef CONFIG_COMPAT
+
+struct compat_semid_ds {
+ struct compat_ipc_perm sem_perm;
+ old_time32_t sem_otime;
+ old_time32_t sem_ctime;
+ compat_uptr_t sem_base;
+ compat_uptr_t sem_pending;
+ compat_uptr_t sem_pending_last;
+ compat_uptr_t undo;
+ unsigned short sem_nsems;
+};
+
+static int copy_compat_semid_from_user(struct semid64_ds *out, void __user *buf,
+ int version)
+{
+ memset(out, 0, sizeof(*out));
+ if (version == IPC_64) {
+ struct compat_semid64_ds __user *p = buf;
+ return get_compat_ipc64_perm(&out->sem_perm, &p->sem_perm);
+ } else {
+ struct compat_semid_ds __user *p = buf;
+ return get_compat_ipc_perm(&out->sem_perm, &p->sem_perm);
+ }
+}
+
+static int copy_compat_semid_to_user(void __user *buf, struct semid64_ds *in,
+ int version)
+{
+ if (version == IPC_64) {
+ struct compat_semid64_ds v;
+ memset(&v, 0, sizeof(v));
+ to_compat_ipc64_perm(&v.sem_perm, &in->sem_perm);
+ v.sem_otime = lower_32_bits(in->sem_otime);
+ v.sem_otime_high = upper_32_bits(in->sem_otime);
+ v.sem_ctime = lower_32_bits(in->sem_ctime);
+ v.sem_ctime_high = upper_32_bits(in->sem_ctime);
+ v.sem_nsems = in->sem_nsems;
+ return copy_to_user(buf, &v, sizeof(v));
+ } else {
+ struct compat_semid_ds v;
+ memset(&v, 0, sizeof(v));
+ to_compat_ipc_perm(&v.sem_perm, &in->sem_perm);
+ v.sem_otime = in->sem_otime;
+ v.sem_ctime = in->sem_ctime;
+ v.sem_nsems = in->sem_nsems;
+ return copy_to_user(buf, &v, sizeof(v));
+ }
+}
+
+static long compat_ksys_semctl(int semid, int semnum, int cmd, int arg, int version)
+{
+ void __user *p = compat_ptr(arg);
+ struct ipc_namespace *ns;
+ struct semid64_ds semid64;
+ int err;
+
+ ns = current->nsproxy->ipc_ns;
+
+ if (semid < 0)
+ return -EINVAL;
+
+ switch (cmd & (~IPC_64)) {
+ case IPC_INFO:
+ case SEM_INFO:
+ return semctl_info(ns, semid, cmd, p);
+ case IPC_STAT:
+ case SEM_STAT:
+ case SEM_STAT_ANY:
+ err = semctl_stat(ns, semid, cmd, &semid64);
+ if (err < 0)
+ return err;
+ if (copy_compat_semid_to_user(p, &semid64, version))
+ err = -EFAULT;
+ return err;
+ case GETVAL:
+ case GETPID:
+ case GETNCNT:
+ case GETZCNT:
+ case GETALL:
+ case SETALL:
+ return semctl_main(ns, semid, semnum, cmd, p);
case SETVAL:
return semctl_setval(ns, semid, semnum, arg);
- case IPC_RMID:
case IPC_SET:
- return semctl_down(ns, semid, cmd, version, p);
+ if (copy_compat_semid_from_user(&semid64, p, version))
+ return -EFAULT;
+ fallthrough;
+ case IPC_RMID:
+ return semctl_down(ns, semid, cmd, &semid64);
default:
return -EINVAL;
}
}
+COMPAT_SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, int, arg)
+{
+ return compat_ksys_semctl(semid, semnum, cmd, arg, IPC_64);
+}
+
+#ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION
+long compat_ksys_old_semctl(int semid, int semnum, int cmd, int arg)
+{
+ int version = compat_ipc_parse_version(&cmd);
+
+ return compat_ksys_semctl(semid, semnum, cmd, arg, version);
+}
+
+COMPAT_SYSCALL_DEFINE4(old_semctl, int, semid, int, semnum, int, cmd, int, arg)
+{
+ return compat_ksys_old_semctl(semid, semnum, cmd, arg);
+}
+#endif
+#endif
+
/* If the task doesn't already have a undo_list, then allocate one
* here. We guarantee there is only one thread using this undo list,
* and current is THE ONE
@@ -1638,11 +1853,11 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp)
undo_list = current->sysvsem.undo_list;
if (!undo_list) {
- undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
+ undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL_ACCOUNT);
if (undo_list == NULL)
return -ENOMEM;
spin_lock_init(&undo_list->lock);
- atomic_set(&undo_list->refcnt, 1);
+ refcount_set(&undo_list->refcnt, 1);
INIT_LIST_HEAD(&undo_list->list_proc);
current->sysvsem.undo_list = undo_list;
@@ -1655,7 +1870,8 @@ static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
{
struct sem_undo *un;
- list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
+ list_for_each_entry_rcu(un, &ulp->list_proc, list_proc,
+ spin_is_locked(&ulp->lock)) {
if (un->semid == semid)
return un;
}
@@ -1722,7 +1938,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
rcu_read_unlock();
/* step 2: allocate new undo structure */
- new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
+ new = kvzalloc(struct_size(new, semadj, nsems), GFP_KERNEL_ACCOUNT);
if (!new) {
ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
return ERR_PTR(-ENOMEM);
@@ -1734,7 +1950,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
if (!ipc_valid_object(&sma->sem_perm)) {
sem_unlock(sma, -1);
rcu_read_unlock();
- kfree(new);
+ kvfree(new);
un = ERR_PTR(-EIDRM);
goto out;
}
@@ -1745,11 +1961,11 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
*/
un = lookup_undo(ulp, semid);
if (un) {
- kfree(new);
+ spin_unlock(&ulp->lock);
+ kvfree(new);
goto success;
}
/* step 5: initialize & link new undo structure */
- new->semadj = (short *) &new[1];
new->ulp = ulp;
new->semid = semid;
assert_spin_locked(&ulp->lock);
@@ -1757,59 +1973,42 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
ipc_assert_locked_object(&sma->sem_perm);
list_add(&new->list_id, &sma->list_id);
un = new;
-
-success:
spin_unlock(&ulp->lock);
+success:
sem_unlock(sma, -1);
out:
return un;
}
-SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
- unsigned, nsops, const struct timespec __user *, timeout)
+long __do_semtimedop(int semid, struct sembuf *sops,
+ unsigned nsops, const struct timespec64 *timeout,
+ struct ipc_namespace *ns)
{
int error = -EINVAL;
struct sem_array *sma;
- struct sembuf fast_sops[SEMOPM_FAST];
- struct sembuf *sops = fast_sops, *sop;
+ struct sembuf *sop;
struct sem_undo *un;
int max, locknum;
bool undos = false, alter = false, dupsop = false;
struct sem_queue queue;
- unsigned long dup = 0, jiffies_left = 0;
- struct ipc_namespace *ns;
-
- ns = current->nsproxy->ipc_ns;
+ unsigned long dup = 0;
+ ktime_t expires, *exp = NULL;
+ bool timed_out = false;
if (nsops < 1 || semid < 0)
return -EINVAL;
if (nsops > ns->sc_semopm)
return -E2BIG;
- if (nsops > SEMOPM_FAST) {
- sops = kmalloc(sizeof(*sops)*nsops, GFP_KERNEL);
- if (sops == NULL)
- return -ENOMEM;
- }
-
- if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) {
- error = -EFAULT;
- goto out_free;
- }
if (timeout) {
- struct timespec _timeout;
- if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) {
- error = -EFAULT;
- goto out_free;
- }
- if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 ||
- _timeout.tv_nsec >= 1000000000L) {
- error = -EINVAL;
- goto out_free;
- }
- jiffies_left = timespec_to_jiffies(&_timeout);
+ if (!timespec64_valid(timeout))
+ return -EINVAL;
+ expires = ktime_add_safe(ktime_get(),
+ timespec64_to_ktime(*timeout));
+ exp = &expires;
}
+
max = 0;
for (sop = sops; sop < sops + nsops; sop++) {
unsigned long mask = 1ULL << ((sop->sem_num) % BITS_PER_LONG);
@@ -1838,7 +2037,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
un = find_alloc_undo(ns, semid);
if (IS_ERR(un)) {
error = PTR_ERR(un);
- goto out_free;
+ goto out;
}
} else {
un = NULL;
@@ -1849,25 +2048,25 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
if (IS_ERR(sma)) {
rcu_read_unlock();
error = PTR_ERR(sma);
- goto out_free;
+ goto out;
}
error = -EFBIG;
if (max >= sma->sem_nsems) {
rcu_read_unlock();
- goto out_free;
+ goto out;
}
error = -EACCES;
if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) {
rcu_read_unlock();
- goto out_free;
+ goto out;
}
- error = security_sem_semop(sma, sops, nsops, alter);
+ error = security_sem_semop(&sma->sem_perm, sops, nsops, alter);
if (error) {
rcu_read_unlock();
- goto out_free;
+ goto out;
}
error = -EIDRM;
@@ -1881,7 +2080,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
* entangled here and why it's RMID race safe on comments at sem_lock()
*/
if (!ipc_valid_object(&sma->sem_perm))
- goto out_unlock_free;
+ goto out_unlock;
/*
* semid identifiers are not unique - find_alloc_undo may have
* allocated an undo structure, it was invalidated by an RMID
@@ -1890,17 +2089,17 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
* "un" itself is guaranteed by rcu.
*/
if (un && un->semid == -1)
- goto out_unlock_free;
+ goto out_unlock;
queue.sops = sops;
queue.nsops = nsops;
queue.undo = un;
- queue.pid = task_tgid_vnr(current);
+ queue.pid = task_tgid(current);
queue.alter = alter;
queue.dupsop = dupsop;
error = perform_atomic_semop(sma, &queue);
- if (error == 0) { /* non-blocking succesfull path */
+ if (error == 0) { /* non-blocking successful path */
DEFINE_WAKE_Q(wake_q);
/*
@@ -1916,10 +2115,10 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
rcu_read_unlock();
wake_up_q(&wake_q);
- goto out_free;
+ goto out;
}
if (error < 0) /* non-blocking error path */
- goto out_unlock_free;
+ goto out_unlock;
/*
* We need to sleep on this operation, so we put the current
@@ -1927,7 +2126,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
*/
if (nsops == 1) {
struct sem *curr;
- curr = &sma->sems[sops->sem_num];
+ int idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
+ curr = &sma->sems[idx];
if (alter) {
if (sma->complex_count) {
@@ -1954,17 +2154,17 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
}
do {
- queue.status = -EINTR;
+ /* memory ordering ensured by the lock in sem_lock() */
+ WRITE_ONCE(queue.status, -EINTR);
queue.sleeper = current;
+ /* memory ordering is ensured by the lock in sem_lock() */
__set_current_state(TASK_INTERRUPTIBLE);
sem_unlock(sma, locknum);
rcu_read_unlock();
- if (timeout)
- jiffies_left = schedule_timeout(jiffies_left);
- else
- schedule();
+ timed_out = !schedule_hrtimeout_range(exp,
+ current->timer_slack_ns, HRTIMER_MODE_ABS);
/*
* fastpath: the semop has completed, either successfully or
@@ -1977,24 +2177,23 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
* scenarios where we were awakened externally, during the
* window between wake_q_add() and wake_up_q().
*/
+ rcu_read_lock();
error = READ_ONCE(queue.status);
if (error != -EINTR) {
- /*
- * User space could assume that semop() is a memory
- * barrier: Without the mb(), the cpu could
- * speculatively read in userspace stale data that was
- * overwritten by the previous owner of the semaphore.
- */
- smp_mb();
- goto out_free;
+ /* see SEM_BARRIER_2 for purpose/pairing */
+ smp_acquire__after_ctrl_dep();
+ rcu_read_unlock();
+ goto out;
}
- rcu_read_lock();
locknum = sem_lock(sma, sops, nsops);
if (!ipc_valid_object(&sma->sem_perm))
- goto out_unlock_free;
+ goto out_unlock;
+ /*
+ * No necessity for any barrier: We are protect by sem_lock()
+ */
error = READ_ONCE(queue.status);
/*
@@ -2002,37 +2201,109 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
* Leave without unlink_queue(), but with sem_unlock().
*/
if (error != -EINTR)
- goto out_unlock_free;
+ goto out_unlock;
/*
* If an interrupt occurred we have to clean up the queue.
*/
- if (timeout && jiffies_left == 0)
+ if (timed_out)
error = -EAGAIN;
} while (error == -EINTR && !signal_pending(current)); /* spurious */
unlink_queue(sma, &queue);
-out_unlock_free:
+out_unlock:
sem_unlock(sma, locknum);
rcu_read_unlock();
+out:
+ return error;
+}
+
+static long do_semtimedop(int semid, struct sembuf __user *tsops,
+ unsigned nsops, const struct timespec64 *timeout)
+{
+ struct sembuf fast_sops[SEMOPM_FAST];
+ struct sembuf *sops = fast_sops;
+ struct ipc_namespace *ns;
+ int ret;
+
+ ns = current->nsproxy->ipc_ns;
+ if (nsops > ns->sc_semopm)
+ return -E2BIG;
+ if (nsops < 1)
+ return -EINVAL;
+
+ if (nsops > SEMOPM_FAST) {
+ sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL);
+ if (sops == NULL)
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) {
+ ret = -EFAULT;
+ goto out_free;
+ }
+
+ ret = __do_semtimedop(semid, sops, nsops, timeout, ns);
+
out_free:
if (sops != fast_sops)
- kfree(sops);
- return error;
+ kvfree(sops);
+
+ return ret;
+}
+
+long ksys_semtimedop(int semid, struct sembuf __user *tsops,
+ unsigned int nsops, const struct __kernel_timespec __user *timeout)
+{
+ if (timeout) {
+ struct timespec64 ts;
+ if (get_timespec64(&ts, timeout))
+ return -EFAULT;
+ return do_semtimedop(semid, tsops, nsops, &ts);
+ }
+ return do_semtimedop(semid, tsops, nsops, NULL);
}
+SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
+ unsigned int, nsops, const struct __kernel_timespec __user *, timeout)
+{
+ return ksys_semtimedop(semid, tsops, nsops, timeout);
+}
+
+#ifdef CONFIG_COMPAT_32BIT_TIME
+long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
+ unsigned int nsops,
+ const struct old_timespec32 __user *timeout)
+{
+ if (timeout) {
+ struct timespec64 ts;
+ if (get_old_timespec32(&ts, timeout))
+ return -EFAULT;
+ return do_semtimedop(semid, tsems, nsops, &ts);
+ }
+ return do_semtimedop(semid, tsems, nsops, NULL);
+}
+
+SYSCALL_DEFINE4(semtimedop_time32, int, semid, struct sembuf __user *, tsems,
+ unsigned int, nsops,
+ const struct old_timespec32 __user *, timeout)
+{
+ return compat_ksys_semtimedop(semid, tsems, nsops, timeout);
+}
+#endif
+
SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
unsigned, nsops)
{
- return sys_semtimedop(semid, tsops, nsops, NULL);
+ return do_semtimedop(semid, tsops, nsops, NULL);
}
/* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
* parent and child tasks.
*/
-int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
+int copy_semundo(u64 clone_flags, struct task_struct *tsk)
{
struct sem_undo_list *undo_list;
int error;
@@ -2041,7 +2312,7 @@ int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
error = get_undo_list(&undo_list);
if (error)
return error;
- atomic_inc(&undo_list->refcnt);
+ refcount_inc(&undo_list->refcnt);
tsk->sysvsem.undo_list = undo_list;
} else
tsk->sysvsem.undo_list = NULL;
@@ -2070,7 +2341,7 @@ void exit_sem(struct task_struct *tsk)
return;
tsk->sysvsem.undo_list = NULL;
- if (!atomic_dec_and_test(&ulp->refcnt))
+ if (!refcount_dec_and_test(&ulp->refcnt))
return;
for (;;) {
@@ -2091,7 +2362,8 @@ void exit_sem(struct task_struct *tsk)
* possibility where we exit while freeary() didn't
* finish unlocking sem_undo_list.
*/
- spin_unlock_wait(&ulp->lock);
+ spin_lock(&ulp->lock);
+ spin_unlock(&ulp->lock);
rcu_read_unlock();
break;
}
@@ -2133,11 +2405,9 @@ void exit_sem(struct task_struct *tsk)
ipc_assert_locked_object(&sma->sem_perm);
list_del(&un->list_id);
- /* we are the last process using this ulp, acquiring ulp->lock
- * isn't required. Besides that, we are also protected against
- * IPC_RMID as we hold sma->sem_perm lock now
- */
+ spin_lock(&ulp->lock);
list_del_rcu(&un->list_proc);
+ spin_unlock(&ulp->lock);
/* perform adjustments registered in un */
for (i = 0; i < sma->sem_nsems; i++) {
@@ -2161,7 +2431,7 @@ void exit_sem(struct task_struct *tsk)
semaphore->semval = 0;
if (semaphore->semval > SEMVMX)
semaphore->semval = SEMVMX;
- semaphore->sempid = task_tgid_vnr(current);
+ ipc_update_pid(&semaphore->sempid, task_tgid(current));
}
}
/* maybe some queued-up processes were waiting for this */
@@ -2170,7 +2440,7 @@ void exit_sem(struct task_struct *tsk)
rcu_read_unlock();
wake_up_q(&wake_q);
- kfree_rcu(un, rcu);
+ kvfree_rcu(un, rcu);
}
kfree(ulp);
}
@@ -2181,11 +2451,12 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
struct user_namespace *user_ns = seq_user_ns(s);
struct kern_ipc_perm *ipcp = it;
struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
- time_t sem_otime;
+ time64_t sem_otime;
/*
* The proc interface isn't aware of sem_lock(), it calls
- * ipc_lock_object() directly (in sysvipc_find_ipc).
+ * ipc_lock_object(), i.e. spin_lock(&sma->sem_perm.lock).
+ * (in sysvipc_find_ipc)
* In order to stay compatible with sem_lock(), we must
* enter / leave complex_mode.
*/
@@ -2194,7 +2465,7 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
sem_otime = get_semotime(sma);
seq_printf(s,
- "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
+ "%10d %10d %4o %10u %5u %5u %5u %5u %10llu %10llu\n",
sma->sem_perm.key,
sma->sem_perm.id,
sma->sem_perm.mode,