diff options
Diffstat (limited to 'ipc/sem.c')
| -rw-r--r-- | ipc/sem.c | 1869 |
1 files changed, 1138 insertions, 731 deletions
diff --git a/ipc/sem.c b/ipc/sem.c index 41088899783d..0f06e4bd4673 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * linux/ipc/sem.c * Copyright (C) 1992 Krishna Balasubramanian @@ -11,6 +12,7 @@ * (c) 2001 Red Hat Inc * Lockless wakeup * (c) 2003 Manfred Spraul <manfred@colorfullife.com> + * (c) 2016 Davidlohr Bueso <dave@stgolabs.net> * Further wakeup optimizations, documentation * (c) 2010 Manfred Spraul <manfred@colorfullife.com> * @@ -34,7 +36,7 @@ * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO. * - undo adjustments at process exit are limited to 0..SEMVMX. * - namespace are supported. - * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing + * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtime by writing * to /proc/sys/kernel/sem. * - statistics about the usage are reported in /proc/sysvipc/sem. * @@ -47,22 +49,17 @@ * Thus: Perfect SMP scaling between independent semaphore arrays. * If multiple semaphores in one array are used, then cache line * trashing on the semaphore array spinlock will limit the scaling. - * - semncnt and semzcnt are calculated on demand in count_semncnt() and - * count_semzcnt() + * - semncnt and semzcnt are calculated on demand in count_semcnt() * - the task that performs a successful semop() scans the list of all * sleeping tasks and completes any pending operations that can be fulfilled. * Semaphores are actively given to waiting tasks (necessary for FIFO). * (see update_queue()) * - To improve the scalability, the actual wake-up calls are performed after - * dropping all locks. (see wake_up_sem_queue_prepare(), - * wake_up_sem_queue_do()) + * dropping all locks. (see wake_up_sem_queue_prepare()) * - All work is done by the waker, the woken up task does not have to do * anything - not even acquiring a lock or dropping a refcount. * - A woken up task may not even touch the semaphore array anymore, it may * have been destroyed already by a semctl(RMID). - * - The synchronizations between wake-ups due to a timeout/signal and a - * wake-up due to a completed semaphore operation is achieved by using an - * intermediate state (IN_WAKEUP). * - UNDO values are stored in an array (one per process and per * semaphore array, lazily allocated). For backwards compatibility, multiple * modes for the UNDO variables are supported (per process, per thread) @@ -73,6 +70,7 @@ * The worst-case behavior is nevertheless O(N^2) for N wakeups. */ +#include <linux/compat.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/init.h> @@ -86,32 +84,60 @@ #include <linux/rwsem.h> #include <linux/nsproxy.h> #include <linux/ipc_namespace.h> +#include <linux/sched/wake_q.h> +#include <linux/nospec.h> +#include <linux/rhashtable.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include "util.h" /* One semaphore structure for each semaphore in the system. */ struct sem { int semval; /* current value */ - int sempid; /* pid of last operation */ + /* + * PID of the process that last modified the semaphore. For + * Linux, specifically these are: + * - semop + * - semctl, via SETVAL and SETALL. + * - at task exit when performing undo adjustments (see exit_sem). + */ + struct pid *sempid; spinlock_t lock; /* spinlock for fine-grained semtimedop */ struct list_head pending_alter; /* pending single-sop operations */ /* that alter the semaphore */ struct list_head pending_const; /* pending single-sop operations */ /* that do not alter the semaphore*/ - time_t sem_otime; /* candidate for sem_otime */ + time64_t sem_otime; /* candidate for sem_otime */ } ____cacheline_aligned_in_smp; +/* One sem_array data structure for each set of semaphores in the system. */ +struct sem_array { + struct kern_ipc_perm sem_perm; /* permissions .. see ipc.h */ + time64_t sem_ctime; /* create/last semctl() time */ + struct list_head pending_alter; /* pending operations */ + /* that alter the array */ + struct list_head pending_const; /* pending complex operations */ + /* that do not alter semvals */ + struct list_head list_id; /* undo requests on this array */ + int sem_nsems; /* no. of semaphores in array */ + int complex_count; /* pending complex operations */ + unsigned int use_global_lock;/* >0: global lock required */ + + struct sem sems[]; +} __randomize_layout; + /* One queue for each sleeping process in the system. */ struct sem_queue { struct list_head list; /* queue of pending operations */ struct task_struct *sleeper; /* this process */ struct sem_undo *undo; /* undo structure */ - int pid; /* process id of requesting process */ + struct pid *pid; /* process id of requesting process */ int status; /* completion status of operation */ struct sembuf *sops; /* array of pending operations */ + struct sembuf *blocking; /* the operation that blocked */ int nsops; /* number of operations */ - int alter; /* does *sops alter the array? */ + bool alter; /* does *sops alter the array? */ + bool dupsop; /* sops on more than one sem_num */ }; /* Each task has a list of undo requests. They are executed automatically @@ -126,7 +152,7 @@ struct sem_undo { struct list_head list_id; /* per semaphore array list: * all undos for one array */ int semid; /* semaphore set identifier */ - short *semadj; /* array of adjustments */ + short semadj[]; /* array of adjustments */ /* one per semaphore */ }; @@ -134,7 +160,7 @@ struct sem_undo { * that may be shared among all a CLONE_SYSVSEM task group. */ struct sem_undo_list { - atomic_t refcnt; + refcount_t refcnt; spinlock_t lock; struct list_head list_proc; }; @@ -142,8 +168,6 @@ struct sem_undo_list { #define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS]) -#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid) - static int newary(struct ipc_namespace *, struct ipc_params *); static void freeary(struct ipc_namespace *, struct kern_ipc_perm *); #ifdef CONFIG_PROC_FS @@ -154,15 +178,67 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it); #define SEMOPM_FAST 64 /* ~ 372 bytes on stack */ /* + * Switching from the mode suitable for simple ops + * to the mode for complex ops is costly. Therefore: + * use some hysteresis + */ +#define USE_GLOBAL_LOCK_HYSTERESIS 10 + +/* * Locking: + * a) global sem_lock() for read/write * sem_undo.id_next, * sem_array.complex_count, - * sem_array.pending{_alter,_cont}, - * sem_array.sem_undo: global sem_lock() for read/write - * sem_undo.proc_next: only "current" is allowed to read/write that field. - * - * sem_array.sem_base[i].pending_{const,alter}: - * global or semaphore sem_lock() for read/write + * sem_array.pending{_alter,_const}, + * sem_array.sem_undo + * + * b) global or semaphore sem_lock() for read/write: + * sem_array.sems[i].pending_{const,alter}: + * + * c) special: + * sem_undo_list.list_proc: + * * undo_list->lock for write + * * rcu for read + * use_global_lock: + * * global sem_lock() for write + * * either local or global sem_lock() for read. + * + * Memory ordering: + * Most ordering is enforced by using spin_lock() and spin_unlock(). + * + * Exceptions: + * 1) use_global_lock: (SEM_BARRIER_1) + * Setting it from non-zero to 0 is a RELEASE, this is ensured by + * using smp_store_release(): Immediately after setting it to 0, + * a simple op can start. + * Testing if it is non-zero is an ACQUIRE, this is ensured by using + * smp_load_acquire(). + * Setting it from 0 to non-zero must be ordered with regards to + * this smp_load_acquire(), this is guaranteed because the smp_load_acquire() + * is inside a spin_lock() and after a write from 0 to non-zero a + * spin_lock()+spin_unlock() is done. + * To prevent the compiler/cpu temporarily writing 0 to use_global_lock, + * READ_ONCE()/WRITE_ONCE() is used. + * + * 2) queue.status: (SEM_BARRIER_2) + * Initialization is done while holding sem_lock(), so no further barrier is + * required. + * Setting it to a result code is a RELEASE, this is ensured by both a + * smp_store_release() (for case a) and while holding sem_lock() + * (for case b). + * The ACQUIRE when reading the result code without holding sem_lock() is + * achieved by using READ_ONCE() + smp_acquire__after_ctrl_dep(). + * (case a above). + * Reading the result code while holding sem_lock() needs no further barriers, + * the locks inside sem_lock() enforce ordering (case b above) + * + * 3) current->state: + * current->state is set to TASK_INTERRUPTIBLE while holding sem_lock(). + * The wakeup is handled using the wake_q infrastructure. wake_q wakeups may + * happen immediately after calling wake_q_add. As wake_q_add_safe() is called + * when holding sem_lock(), no further barriers are required. + * + * See also ipc/mqueue.c for more details on the covered races. */ #define sc_semmsl sem_ctls[0] @@ -185,10 +261,11 @@ void sem_exit_ns(struct ipc_namespace *ns) { free_ipcs(ns, &sem_ids(ns), freeary); idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr); + rhashtable_destroy(&ns->ids[IPC_SEM_IDS].key_ht); } #endif -void __init sem_init (void) +void __init sem_init(void) { sem_init_ns(&init_ipc_ns); ipc_init_proc_interface("sysvipc/sem", @@ -217,7 +294,7 @@ static void unmerge_queues(struct sem_array *sma) */ list_for_each_entry_safe(q, tq, &sma->pending_alter, list) { struct sem *curr; - curr = &sma->sem_base[q->sops[0].sem_num]; + curr = &sma->sems[q->sops[0].sem_num]; list_add_tail(&q->list, &curr->pending_alter); } @@ -225,7 +302,7 @@ static void unmerge_queues(struct sem_array *sma) } /** - * merge_queues - Merge single semop queues into global queue + * merge_queues - merge single semop queues into global queue * @sma: semaphore array * * This function merges all per-semaphore queues into the global queue. @@ -237,122 +314,170 @@ static void merge_queues(struct sem_array *sma) { int i; for (i = 0; i < sma->sem_nsems; i++) { - struct sem *sem = sma->sem_base + i; + struct sem *sem = &sma->sems[i]; list_splice_init(&sem->pending_alter, &sma->pending_alter); } } +static void sem_rcu_free(struct rcu_head *head) +{ + struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu); + struct sem_array *sma = container_of(p, struct sem_array, sem_perm); + + security_sem_free(&sma->sem_perm); + kvfree(sma); +} + +/* + * Enter the mode suitable for non-simple operations: + * Caller must own sem_perm.lock. + */ +static void complexmode_enter(struct sem_array *sma) +{ + int i; + struct sem *sem; + + if (sma->use_global_lock > 0) { + /* + * We are already in global lock mode. + * Nothing to do, just reset the + * counter until we return to simple mode. + */ + WRITE_ONCE(sma->use_global_lock, USE_GLOBAL_LOCK_HYSTERESIS); + return; + } + WRITE_ONCE(sma->use_global_lock, USE_GLOBAL_LOCK_HYSTERESIS); + + for (i = 0; i < sma->sem_nsems; i++) { + sem = &sma->sems[i]; + spin_lock(&sem->lock); + spin_unlock(&sem->lock); + } +} + +/* + * Try to leave the mode that disallows simple operations: + * Caller must own sem_perm.lock. + */ +static void complexmode_tryleave(struct sem_array *sma) +{ + if (sma->complex_count) { + /* Complex ops are sleeping. + * We must stay in complex mode + */ + return; + } + if (sma->use_global_lock == 1) { + + /* See SEM_BARRIER_1 for purpose/pairing */ + smp_store_release(&sma->use_global_lock, 0); + } else { + WRITE_ONCE(sma->use_global_lock, + sma->use_global_lock-1); + } +} + +#define SEM_GLOBAL_LOCK (-1) /* * If the request contains only one semaphore operation, and there are * no complex transactions pending, lock only the semaphore involved. * Otherwise, lock the entire semaphore array, since we either have * multiple semaphores in our own semops, or we need to look at * semaphores from other pending complex operations. - * - * Carefully guard against sma->complex_count changing between zero - * and non-zero while we are spinning for the lock. The value of - * sma->complex_count cannot change while we are holding the lock, - * so sem_unlock should be fine. - * - * The global lock path checks that all the local locks have been released, - * checking each local lock once. This means that the local lock paths - * cannot start their critical sections while the global lock is held. */ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, int nsops) { - int locknum; - again: - if (nsops == 1 && !sma->complex_count) { - struct sem *sem = sma->sem_base + sops->sem_num; + struct sem *sem; + int idx; - /* Lock just the semaphore we are interested in. */ - spin_lock(&sem->lock); + if (nsops != 1) { + /* Complex operation - acquire a full lock */ + ipc_lock_object(&sma->sem_perm); + /* Prevent parallel simple ops */ + complexmode_enter(sma); + return SEM_GLOBAL_LOCK; + } + + /* + * Only one semaphore affected - try to optimize locking. + * Optimized locking is possible if no complex operation + * is either enqueued or processed right now. + * + * Both facts are tracked by use_global_mode. + */ + idx = array_index_nospec(sops->sem_num, sma->sem_nsems); + sem = &sma->sems[idx]; + + /* + * Initial check for use_global_lock. Just an optimization, + * no locking, no memory barrier. + */ + if (!READ_ONCE(sma->use_global_lock)) { /* - * If sma->complex_count was set while we were spinning, - * we may need to look at things we did not lock here. + * It appears that no complex operation is around. + * Acquire the per-semaphore lock. */ - if (unlikely(sma->complex_count)) { - spin_unlock(&sem->lock); - goto lock_array; + spin_lock(&sem->lock); + + /* see SEM_BARRIER_1 for purpose/pairing */ + if (!smp_load_acquire(&sma->use_global_lock)) { + /* fast path successful! */ + return sops->sem_num; } + spin_unlock(&sem->lock); + } + + /* slow path: acquire the full lock */ + ipc_lock_object(&sma->sem_perm); + if (sma->use_global_lock == 0) { /* - * Another process is holding the global lock on the - * sem_array; we cannot enter our critical section, - * but have to wait for the global lock to be released. + * The use_global_lock mode ended while we waited for + * sma->sem_perm.lock. Thus we must switch to locking + * with sem->lock. + * Unlike in the fast path, there is no need to recheck + * sma->use_global_lock after we have acquired sem->lock: + * We own sma->sem_perm.lock, thus use_global_lock cannot + * change. */ - if (unlikely(spin_is_locked(&sma->sem_perm.lock))) { - spin_unlock(&sem->lock); - spin_unlock_wait(&sma->sem_perm.lock); - goto again; - } + spin_lock(&sem->lock); - locknum = sops->sem_num; + ipc_unlock_object(&sma->sem_perm); + return sops->sem_num; } else { - int i; /* - * Lock the semaphore array, and wait for all of the - * individual semaphore locks to go away. The code - * above ensures no new single-lock holders will enter - * their critical section while the array lock is held. + * Not a false alarm, thus continue to use the global lock + * mode. No need for complexmode_enter(), this was done by + * the caller that has set use_global_mode to non-zero. */ - lock_array: - ipc_lock_object(&sma->sem_perm); - for (i = 0; i < sma->sem_nsems; i++) { - struct sem *sem = sma->sem_base + i; - spin_unlock_wait(&sem->lock); - } - locknum = -1; + return SEM_GLOBAL_LOCK; } - return locknum; } static inline void sem_unlock(struct sem_array *sma, int locknum) { - if (locknum == -1) { + if (locknum == SEM_GLOBAL_LOCK) { unmerge_queues(sma); + complexmode_tryleave(sma); ipc_unlock_object(&sma->sem_perm); } else { - struct sem *sem = sma->sem_base + locknum; + struct sem *sem = &sma->sems[locknum]; spin_unlock(&sem->lock); } } /* - * sem_lock_(check_) routines are called in the paths where the rw_mutex + * sem_lock_(check_) routines are called in the paths where the rwsem * is not held. * * The caller holds the RCU read lock. */ -static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, - int id, struct sembuf *sops, int nsops, int *locknum) -{ - struct kern_ipc_perm *ipcp; - struct sem_array *sma; - - ipcp = ipc_obtain_object(&sem_ids(ns), id); - if (IS_ERR(ipcp)) - return ERR_CAST(ipcp); - - sma = container_of(ipcp, struct sem_array, sem_perm); - *locknum = sem_lock(sma, sops, nsops); - - /* ipc_rmid() may have already freed the ID while sem_lock - * was spinning: verify that the structure is still valid - */ - if (!ipcp->deleted) - return container_of(ipcp, struct sem_array, sem_perm); - - sem_unlock(sma, *locknum); - return ERR_PTR(-EINVAL); -} - static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id) { - struct kern_ipc_perm *ipcp = ipc_obtain_object(&sem_ids(ns), id); + struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&sem_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); @@ -374,12 +499,7 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns static inline void sem_lock_and_putref(struct sem_array *sma) { sem_lock(sma, NULL, -1); - ipc_rcu_putref(sma); -} - -static inline void sem_putref(struct sem_array *sma) -{ - ipc_rcu_putref(sma); + ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); } static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) @@ -387,54 +507,31 @@ static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) ipc_rmid(&sem_ids(ns), &s->sem_perm); } -/* - * Lockless wakeup algorithm: - * Without the check/retry algorithm a lockless wakeup is possible: - * - queue.status is initialized to -EINTR before blocking. - * - wakeup is performed by - * * unlinking the queue entry from the pending list - * * setting queue.status to IN_WAKEUP - * This is the notification for the blocked thread that a - * result value is imminent. - * * call wake_up_process - * * set queue.status to the final value. - * - the previously blocked thread checks queue.status: - * * if it's IN_WAKEUP, then it must wait until the value changes - * * if it's not -EINTR, then the operation was completed by - * update_queue. semtimedop can return queue.status without - * performing any operation on the sem array. - * * otherwise it must acquire the spinlock and check what's up. - * - * The two-stage algorithm is necessary to protect against the following - * races: - * - if queue.status is set after wake_up_process, then the woken up idle - * thread could race forward and try (and fail) to acquire sma->lock - * before update_queue had a chance to set queue.status - * - if queue.status is written before wake_up_process and if the - * blocked process is woken up by a signal between writing - * queue.status and the wake_up_process, then the woken up - * process could return from semtimedop and die by calling - * sys_exit before wake_up_process is called. Then wake_up_process - * will oops, because the task structure is already invalid. - * (yes, this happened on s390 with sysv msg). - * - */ -#define IN_WAKEUP 1 +static struct sem_array *sem_alloc(size_t nsems) +{ + struct sem_array *sma; + + if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0])) + return NULL; + + sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL_ACCOUNT); + if (unlikely(!sma)) + return NULL; + + return sma; +} /** * newary - Create a new semaphore set * @ns: namespace * @params: ptr to the structure that contains key, semflg and nsems * - * Called with sem_ids.rw_mutex held (as a writer) + * Called with sem_ids.rwsem held (as a writer) */ - static int newary(struct ipc_namespace *ns, struct ipc_params *params) { - int id; int retval; struct sem_array *sma; - int size; key_t key = params->key; int nsems = params->u.nsems; int semflg = params->flg; @@ -445,45 +542,42 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) if (ns->used_sems + nsems > ns->sc_semmns) return -ENOSPC; - size = sizeof (*sma) + nsems * sizeof (struct sem); - sma = ipc_rcu_alloc(size); - if (!sma) { + sma = sem_alloc(nsems); + if (!sma) return -ENOMEM; - } - memset (sma, 0, size); sma->sem_perm.mode = (semflg & S_IRWXUGO); sma->sem_perm.key = key; sma->sem_perm.security = NULL; - retval = security_sem_alloc(sma); + retval = security_sem_alloc(&sma->sem_perm); if (retval) { - ipc_rcu_putref(sma); + kvfree(sma); return retval; } - id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni); - if (id < 0) { - security_sem_free(sma); - ipc_rcu_putref(sma); - return id; - } - ns->used_sems += nsems; - - sma->sem_base = (struct sem *) &sma[1]; - for (i = 0; i < nsems; i++) { - INIT_LIST_HEAD(&sma->sem_base[i].pending_alter); - INIT_LIST_HEAD(&sma->sem_base[i].pending_const); - spin_lock_init(&sma->sem_base[i].lock); + INIT_LIST_HEAD(&sma->sems[i].pending_alter); + INIT_LIST_HEAD(&sma->sems[i].pending_const); + spin_lock_init(&sma->sems[i].lock); } sma->complex_count = 0; + sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS; INIT_LIST_HEAD(&sma->pending_alter); INIT_LIST_HEAD(&sma->pending_const); INIT_LIST_HEAD(&sma->list_id); sma->sem_nsems = nsems; - sma->sem_ctime = get_seconds(); + sma->sem_ctime = ktime_get_real_seconds(); + + /* ipc_addid() locks sma upon success. */ + retval = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni); + if (retval < 0) { + ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); + return retval; + } + ns->used_sems += nsems; + sem_unlock(sma, -1); rcu_read_unlock(); @@ -492,21 +586,9 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) /* - * Called with sem_ids.rw_mutex and ipcp locked. - */ -static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg) -{ - struct sem_array *sma; - - sma = container_of(ipcp, struct sem_array, sem_perm); - return security_sem_associate(sma, semflg); -} - -/* - * Called with sem_ids.rw_mutex and ipcp locked. + * Called with sem_ids.rwsem and ipcp locked. */ -static inline int sem_more_checks(struct kern_ipc_perm *ipcp, - struct ipc_params *params) +static int sem_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params) { struct sem_array *sma; @@ -517,10 +599,14 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp, return 0; } -SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg) +long ksys_semget(key_t key, int nsems, int semflg) { struct ipc_namespace *ns; - struct ipc_ops sem_ops; + static const struct ipc_ops sem_ops = { + .getnew = newary, + .associate = security_sem_associate, + .more_checks = sem_more_checks, + }; struct ipc_params sem_params; ns = current->nsproxy->ipc_ns; @@ -528,10 +614,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg) if (nsems < 0 || nsems > ns->sc_semmsl) return -EINVAL; - sem_ops.getnew = newary; - sem_ops.associate = sem_security; - sem_ops.more_checks = sem_more_checks; - sem_params.key = key; sem_params.flg = semflg; sem_params.u.nsems = nsems; @@ -539,30 +621,47 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg) return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params); } -/** perform_atomic_semop - Perform (if possible) a semaphore operation +SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg) +{ + return ksys_semget(key, nsems, semflg); +} + +/** + * perform_atomic_semop[_slow] - Attempt to perform semaphore + * operations on a given array. * @sma: semaphore array - * @sops: array with operations that should be checked - * @nsems: number of sops - * @un: undo array - * @pid: pid that did the change + * @q: struct sem_queue that describes the operation + * + * Caller blocking are as follows, based the value + * indicated by the semaphore operation (sem_op): + * + * (1) >0 never blocks. + * (2) 0 (wait-for-zero operation): semval is non-zero. + * (3) <0 attempting to decrement semval to a value smaller than zero. * * Returns 0 if the operation was possible. * Returns 1 if the operation is impossible, the caller must sleep. - * Negative values are error codes. + * Returns <0 for error codes. */ - -static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops, - int nsops, struct sem_undo *un, int pid) +static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q) { - int result, sem_op; + int result, sem_op, nsops; + struct pid *pid; struct sembuf *sop; - struct sem * curr; + struct sem *curr; + struct sembuf *sops; + struct sem_undo *un; + + sops = q->sops; + nsops = q->nsops; + un = q->undo; for (sop = sops; sop < sops + nsops; sop++) { - curr = sma->sem_base + sop->sem_num; + int idx = array_index_nospec(sop->sem_num, sma->sem_nsems); + curr = &sma->sems[idx]; sem_op = sop->sem_op; result = curr->semval; - + if (!sem_op && result) goto would_block; @@ -571,25 +670,25 @@ static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops, goto would_block; if (result > SEMVMX) goto out_of_range; + if (sop->sem_flg & SEM_UNDO) { int undo = un->semadj[sop->sem_num] - sem_op; - /* - * Exceeding the undo range is an error. - */ + /* Exceeding the undo range is an error. */ if (undo < (-SEMAEM - 1) || undo > SEMAEM) goto out_of_range; + un->semadj[sop->sem_num] = undo; } + curr->semval = result; } sop--; + pid = q->pid; while (sop >= sops) { - sma->sem_base[sop->sem_num].sempid = pid; - if (sop->sem_flg & SEM_UNDO) - un->semadj[sop->sem_num] -= sop->sem_op; + ipc_update_pid(&sma->sems[sop->sem_num].sempid, pid); sop--; } - + return 0; out_of_range: @@ -597,6 +696,8 @@ out_of_range: goto undo; would_block: + q->blocking = sop; + if (sop->sem_flg & IPC_NOWAIT) result = -EAGAIN; else @@ -605,58 +706,94 @@ would_block: undo: sop--; while (sop >= sops) { - sma->sem_base[sop->sem_num].semval -= sop->sem_op; + sem_op = sop->sem_op; + sma->sems[sop->sem_num].semval -= sem_op; + if (sop->sem_flg & SEM_UNDO) + un->semadj[sop->sem_num] += sem_op; sop--; } return result; } -/** wake_up_sem_queue_prepare(q, error): Prepare wake-up - * @q: queue entry that must be signaled - * @error: Error value for the signal - * - * Prepare the wake-up of the queue entry q. - */ -static void wake_up_sem_queue_prepare(struct list_head *pt, - struct sem_queue *q, int error) +static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q) { - if (list_empty(pt)) { - /* - * Hold preempt off so that we don't get preempted and have the - * wakee busy-wait until we're scheduled back on. - */ - preempt_disable(); + int result, sem_op, nsops; + struct sembuf *sop; + struct sem *curr; + struct sembuf *sops; + struct sem_undo *un; + + sops = q->sops; + nsops = q->nsops; + un = q->undo; + + if (unlikely(q->dupsop)) + return perform_atomic_semop_slow(sma, q); + + /* + * We scan the semaphore set twice, first to ensure that the entire + * operation can succeed, therefore avoiding any pointless writes + * to shared memory and having to undo such changes in order to block + * until the operations can go through. + */ + for (sop = sops; sop < sops + nsops; sop++) { + int idx = array_index_nospec(sop->sem_num, sma->sem_nsems); + + curr = &sma->sems[idx]; + sem_op = sop->sem_op; + result = curr->semval; + + if (!sem_op && result) + goto would_block; /* wait-for-zero */ + + result += sem_op; + if (result < 0) + goto would_block; + + if (result > SEMVMX) + return -ERANGE; + + if (sop->sem_flg & SEM_UNDO) { + int undo = un->semadj[sop->sem_num] - sem_op; + + /* Exceeding the undo range is an error. */ + if (undo < (-SEMAEM - 1) || undo > SEMAEM) + return -ERANGE; + } } - q->status = IN_WAKEUP; - q->pid = error; - list_add_tail(&q->list, pt); + for (sop = sops; sop < sops + nsops; sop++) { + curr = &sma->sems[sop->sem_num]; + sem_op = sop->sem_op; + + if (sop->sem_flg & SEM_UNDO) { + int undo = un->semadj[sop->sem_num] - sem_op; + + un->semadj[sop->sem_num] = undo; + } + curr->semval += sem_op; + ipc_update_pid(&curr->sempid, q->pid); + } + + return 0; + +would_block: + q->blocking = sop; + return sop->sem_flg & IPC_NOWAIT ? -EAGAIN : 1; } -/** - * wake_up_sem_queue_do(pt) - do the actual wake-up - * @pt: list of tasks to be woken up - * - * Do the actual wake-up. - * The function is called without any locks held, thus the semaphore array - * could be destroyed already and the tasks can disappear as soon as the - * status is set to the actual return code. - */ -static void wake_up_sem_queue_do(struct list_head *pt) +static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error, + struct wake_q_head *wake_q) { - struct sem_queue *q, *t; - int did_something; + struct task_struct *sleeper; - did_something = !list_empty(pt); - list_for_each_entry_safe(q, t, pt, list) { - wake_up_process(q->sleeper); - /* q can disappear immediately after writing q->status. */ - smp_wmb(); - q->status = q->pid; - } - if (did_something) - preempt_enable(); + sleeper = get_task_struct(q->sleeper); + + /* see SEM_BARRIER_2 for purpose/pairing */ + smp_store_release(&q->status, error); + + wake_q_add_safe(wake_q, sleeper); } static void unlink_queue(struct sem_array *sma, struct sem_queue *q) @@ -676,7 +813,7 @@ static void unlink_queue(struct sem_array *sma, struct sem_queue *q) * modified the array. * Note that wait-for-zero operations are handled without restart. */ -static int check_restart(struct sem_array *sma, struct sem_queue *q) +static inline int check_restart(struct sem_array *sma, struct sem_queue *q) { /* pending complex alter operations are too difficult to analyse */ if (!list_empty(&sma->pending_alter)) @@ -688,7 +825,7 @@ static int check_restart(struct sem_array *sma, struct sem_queue *q) /* It is impossible that someone waits for the new value: * - complex operations always restart. - * - wait-for-zero are handled seperately. + * - wait-for-zero are handled separately. * - q is a previously sleeping simple operation that * altered the array. It must be a decrement, because * simple increments never sleep. @@ -701,69 +838,60 @@ static int check_restart(struct sem_array *sma, struct sem_queue *q) } /** - * wake_const_ops(sma, semnum, pt) - Wake up non-alter tasks + * wake_const_ops - wake up non-alter tasks * @sma: semaphore array. * @semnum: semaphore that was modified. - * @pt: list head for the tasks that must be woken up. + * @wake_q: lockless wake-queue head. * * wake_const_ops must be called after a semaphore in a semaphore array * was set to 0. If complex const operations are pending, wake_const_ops must * be called with semnum = -1, as well as with the number of each modified * semaphore. - * The tasks that must be woken up are added to @pt. The return code + * The tasks that must be woken up are added to @wake_q. The return code * is stored in q->pid. * The function returns 1 if at least one operation was completed successfully. */ static int wake_const_ops(struct sem_array *sma, int semnum, - struct list_head *pt) + struct wake_q_head *wake_q) { - struct sem_queue *q; - struct list_head *walk; + struct sem_queue *q, *tmp; struct list_head *pending_list; int semop_completed = 0; if (semnum == -1) pending_list = &sma->pending_const; else - pending_list = &sma->sem_base[semnum].pending_const; - - walk = pending_list->next; - while (walk != pending_list) { - int error; + pending_list = &sma->sems[semnum].pending_const; - q = container_of(walk, struct sem_queue, list); - walk = walk->next; + list_for_each_entry_safe(q, tmp, pending_list, list) { + int error = perform_atomic_semop(sma, q); - error = perform_atomic_semop(sma, q->sops, q->nsops, - q->undo, q->pid); - - if (error <= 0) { - /* operation completed, remove from queue & wakeup */ - - unlink_queue(sma, q); + if (error > 0) + continue; + /* operation completed, remove from queue & wakeup */ + unlink_queue(sma, q); - wake_up_sem_queue_prepare(pt, q, error); - if (error == 0) - semop_completed = 1; - } + wake_up_sem_queue_prepare(q, error, wake_q); + if (error == 0) + semop_completed = 1; } + return semop_completed; } /** - * do_smart_wakeup_zero(sma, sops, nsops, pt) - wakeup all wait for zero tasks + * do_smart_wakeup_zero - wakeup all wait for zero tasks * @sma: semaphore array * @sops: operations that were performed * @nsops: number of operations - * @pt: list head of the tasks that must be woken up. + * @wake_q: lockless wake-queue head * - * do_smart_wakeup_zero() checks all required queue for wait-for-zero - * operations, based on the actual changes that were performed on the - * semaphore array. + * Checks all required queue for wait-for-zero operations, based + * on the actual changes that were performed on the semaphore array. * The function returns 1 if at least one operation was completed successfully. */ static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops, - int nsops, struct list_head *pt) + int nsops, struct wake_q_head *wake_q) { int i; int semop_completed = 0; @@ -774,9 +902,9 @@ static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops, for (i = 0; i < nsops; i++) { int num = sops[i].sem_num; - if (sma->sem_base[num].semval == 0) { + if (sma->sems[num].semval == 0) { got_zero = 1; - semop_completed |= wake_const_ops(sma, num, pt); + semop_completed |= wake_const_ops(sma, num, wake_q); } } } else { @@ -785,9 +913,9 @@ static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops, * Assume all were changed. */ for (i = 0; i < sma->sem_nsems; i++) { - if (sma->sem_base[i].semval == 0) { + if (sma->sems[i].semval == 0) { got_zero = 1; - semop_completed |= wake_const_ops(sma, i, pt); + semop_completed |= wake_const_ops(sma, i, wake_q); } } } @@ -796,48 +924,43 @@ static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops, * then check the global queue, too. */ if (got_zero) - semop_completed |= wake_const_ops(sma, -1, pt); + semop_completed |= wake_const_ops(sma, -1, wake_q); return semop_completed; } /** - * update_queue(sma, semnum): Look for tasks that can be completed. + * update_queue - look for tasks that can be completed. * @sma: semaphore array. * @semnum: semaphore that was modified. - * @pt: list head for the tasks that must be woken up. + * @wake_q: lockless wake-queue head. * * update_queue must be called after a semaphore in a semaphore array * was modified. If multiple semaphores were modified, update_queue must * be called with semnum = -1, as well as with the number of each modified * semaphore. - * The tasks that must be woken up are added to @pt. The return code + * The tasks that must be woken up are added to @wake_q. The return code * is stored in q->pid. * The function internally checks if const operations can now succeed. * * The function return 1 if at least one semop was completed successfully. */ -static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt) +static int update_queue(struct sem_array *sma, int semnum, struct wake_q_head *wake_q) { - struct sem_queue *q; - struct list_head *walk; + struct sem_queue *q, *tmp; struct list_head *pending_list; int semop_completed = 0; if (semnum == -1) pending_list = &sma->pending_alter; else - pending_list = &sma->sem_base[semnum].pending_alter; + pending_list = &sma->sems[semnum].pending_alter; again: - walk = pending_list->next; - while (walk != pending_list) { + list_for_each_entry_safe(q, tmp, pending_list, list) { int error, restart; - q = container_of(walk, struct sem_queue, list); - walk = walk->next; - /* If we are scanning the single sop, per-semaphore list of * one semaphore and that semaphore is 0, then it is not * necessary to scan further: simple increments @@ -845,11 +968,10 @@ again: * be in the per semaphore pending queue, and decrements * cannot be successful if the value is already 0. */ - if (semnum != -1 && sma->sem_base[semnum].semval == 0) + if (semnum != -1 && sma->sems[semnum].semval == 0) break; - error = perform_atomic_semop(sma, q->sops, q->nsops, - q->undo, q->pid); + error = perform_atomic_semop(sma, q); /* Does q->sleeper still need to sleep? */ if (error > 0) @@ -861,11 +983,11 @@ again: restart = 0; } else { semop_completed = 1; - do_smart_wakeup_zero(sma, q->sops, q->nsops, pt); + do_smart_wakeup_zero(sma, q->sops, q->nsops, wake_q); restart = check_restart(sma, q); } - wake_up_sem_queue_prepare(pt, q, error); + wake_up_sem_queue_prepare(q, error, wake_q); if (restart) goto again; } @@ -873,29 +995,47 @@ again: } /** - * do_smart_update(sma, sops, nsops, otime, pt) - optimized update_queue + * set_semotime - set sem_otime + * @sma: semaphore array + * @sops: operations that modified the array, may be NULL + * + * sem_otime is replicated to avoid cache line trashing. + * This function sets one instance to the current time. + */ +static void set_semotime(struct sem_array *sma, struct sembuf *sops) +{ + if (sops == NULL) { + sma->sems[0].sem_otime = ktime_get_real_seconds(); + } else { + sma->sems[sops[0].sem_num].sem_otime = + ktime_get_real_seconds(); + } +} + +/** + * do_smart_update - optimized update_queue * @sma: semaphore array * @sops: operations that were performed * @nsops: number of operations * @otime: force setting otime - * @pt: list head of the tasks that must be woken up. + * @wake_q: lockless wake-queue head * * do_smart_update() does the required calls to update_queue and wakeup_zero, * based on the actual changes that were performed on the semaphore array. * Note that the function does not do the actual wake-up: the caller is - * responsible for calling wake_up_sem_queue_do(@pt). + * responsible for calling wake_up_q(). * It is safe to perform this call after dropping all locks. */ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops, - int otime, struct list_head *pt) + int otime, struct wake_q_head *wake_q) { int i; - otime |= do_smart_wakeup_zero(sma, sops, nsops, pt); + otime |= do_smart_wakeup_zero(sma, sops, nsops, wake_q); if (!list_empty(&sma->pending_alter)) { /* semaphore array uses the global queue - just process it. */ - otime |= update_queue(sma, -1, pt); + otime |= update_queue(sma, -1, wake_q); } else { if (!sops) { /* @@ -903,99 +1043,101 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop * known. Check all. */ for (i = 0; i < sma->sem_nsems; i++) - otime |= update_queue(sma, i, pt); + otime |= update_queue(sma, i, wake_q); } else { /* * Check the semaphores that were increased: * - No complex ops, thus all sleeping ops are * decrease. * - if we decreased the value, then any sleeping - * semaphore ops wont be able to run: If the + * semaphore ops won't be able to run: If the * previous value was too small, then the new * value will be too small, too. */ for (i = 0; i < nsops; i++) { if (sops[i].sem_op > 0) { otime |= update_queue(sma, - sops[i].sem_num, pt); + sops[i].sem_num, wake_q); } } } } - if (otime) { - if (sops == NULL) { - sma->sem_base[0].sem_otime = get_seconds(); - } else { - sma->sem_base[sops[0].sem_num].sem_otime = - get_seconds(); - } - } + if (otime) + set_semotime(sma, sops); } +/* + * check_qop: Test if a queued operation sleeps on the semaphore semnum + */ +static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q, + bool count_zero) +{ + struct sembuf *sop = q->blocking; + + /* + * Linux always (since 0.99.10) reported a task as sleeping on all + * semaphores. This violates SUS, therefore it was changed to the + * standard compliant behavior. + * Give the administrators a chance to notice that an application + * might misbehave because it relies on the Linux behavior. + */ + pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n" + "The task %s (%d) triggered the difference, watch for misbehavior.\n", + current->comm, task_pid_nr(current)); + + if (sop->sem_num != semnum) + return 0; + + if (count_zero && sop->sem_op == 0) + return 1; + if (!count_zero && sop->sem_op < 0) + return 1; + + return 0; +} /* The following counts are associated to each semaphore: * semncnt number of tasks waiting on semval being nonzero * semzcnt number of tasks waiting on semval being zero - * This model assumes that a task waits on exactly one semaphore. - * Since semaphore operations are to be performed atomically, tasks actually - * wait on a whole sequence of semaphores simultaneously. - * The counts we return here are a rough approximation, but still - * warrant that semncnt+semzcnt>0 if the task is on the pending queue. + * + * Per definition, a task waits only on the semaphore of the first semop + * that cannot proceed, even if additional operation would block, too. */ -static int count_semncnt (struct sem_array * sma, ushort semnum) +static int count_semcnt(struct sem_array *sma, ushort semnum, + bool count_zero) { - int semncnt; - struct sem_queue * q; + struct list_head *l; + struct sem_queue *q; + int semcnt; - semncnt = 0; - list_for_each_entry(q, &sma->sem_base[semnum].pending_alter, list) { - struct sembuf * sops = q->sops; - BUG_ON(sops->sem_num != semnum); - if ((sops->sem_op < 0) && !(sops->sem_flg & IPC_NOWAIT)) - semncnt++; - } + semcnt = 0; + /* First: check the simple operations. They are easy to evaluate */ + if (count_zero) + l = &sma->sems[semnum].pending_const; + else + l = &sma->sems[semnum].pending_alter; - list_for_each_entry(q, &sma->pending_alter, list) { - struct sembuf * sops = q->sops; - int nsops = q->nsops; - int i; - for (i = 0; i < nsops; i++) - if (sops[i].sem_num == semnum - && (sops[i].sem_op < 0) - && !(sops[i].sem_flg & IPC_NOWAIT)) - semncnt++; + list_for_each_entry(q, l, list) { + /* all task on a per-semaphore list sleep on exactly + * that semaphore + */ + semcnt++; } - return semncnt; -} - -static int count_semzcnt (struct sem_array * sma, ushort semnum) -{ - int semzcnt; - struct sem_queue * q; - semzcnt = 0; - list_for_each_entry(q, &sma->sem_base[semnum].pending_const, list) { - struct sembuf * sops = q->sops; - BUG_ON(sops->sem_num != semnum); - if ((sops->sem_op == 0) && !(sops->sem_flg & IPC_NOWAIT)) - semzcnt++; + /* Then: check the complex operations. */ + list_for_each_entry(q, &sma->pending_alter, list) { + semcnt += check_qop(sma, semnum, q, count_zero); } - - list_for_each_entry(q, &sma->pending_const, list) { - struct sembuf * sops = q->sops; - int nsops = q->nsops; - int i; - for (i = 0; i < nsops; i++) - if (sops[i].sem_num == semnum - && (sops[i].sem_op == 0) - && !(sops[i].sem_flg & IPC_NOWAIT)) - semzcnt++; + if (count_zero) { + list_for_each_entry(q, &sma->pending_const, list) { + semcnt += check_qop(sma, semnum, q, count_zero); + } } - return semzcnt; + return semcnt; } -/* Free a semaphore set. freeary() is called with sem_ids.rw_mutex locked - * as a writer and the spinlock for this semaphore set hold. sem_ids.rw_mutex +/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked + * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem * remains locked on exit. */ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) @@ -1003,8 +1145,8 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) struct sem_undo *un, *tu; struct sem_queue *q, *tq; struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm); - struct list_head tasks; int i; + DEFINE_WAKE_Q(wake_q); /* Free the existing undo structures for this semaphore set. */ ipc_assert_locked_object(&sma->sem_perm); @@ -1014,30 +1156,30 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) un->semid = -1; list_del_rcu(&un->list_proc); spin_unlock(&un->ulp->lock); - kfree_rcu(un, rcu); + kvfree_rcu(un, rcu); } /* Wake up all pending processes and let them fail with EIDRM. */ - INIT_LIST_HEAD(&tasks); list_for_each_entry_safe(q, tq, &sma->pending_const, list) { unlink_queue(sma, q); - wake_up_sem_queue_prepare(&tasks, q, -EIDRM); + wake_up_sem_queue_prepare(q, -EIDRM, &wake_q); } list_for_each_entry_safe(q, tq, &sma->pending_alter, list) { unlink_queue(sma, q); - wake_up_sem_queue_prepare(&tasks, q, -EIDRM); + wake_up_sem_queue_prepare(q, -EIDRM, &wake_q); } for (i = 0; i < sma->sem_nsems; i++) { - struct sem *sem = sma->sem_base + i; + struct sem *sem = &sma->sems[i]; list_for_each_entry_safe(q, tq, &sem->pending_const, list) { unlink_queue(sma, q); - wake_up_sem_queue_prepare(&tasks, q, -EIDRM); + wake_up_sem_queue_prepare(q, -EIDRM, &wake_q); } list_for_each_entry_safe(q, tq, &sem->pending_alter, list) { unlink_queue(sma, q); - wake_up_sem_queue_prepare(&tasks, q, -EIDRM); + wake_up_sem_queue_prepare(q, -EIDRM, &wake_q); } + ipc_update_pid(&sem->sempid, NULL); } /* Remove the semaphore set from the IDR */ @@ -1045,15 +1187,14 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) sem_unlock(sma, -1); rcu_read_unlock(); - wake_up_sem_queue_do(&tasks); + wake_up_q(&wake_q); ns->used_sems -= sma->sem_nsems; - security_sem_free(sma); - ipc_rcu_putref(sma); + ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); } static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version) { - switch(version) { + switch (version) { case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: @@ -1075,14 +1216,14 @@ static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, } } -static time_t get_semotime(struct sem_array *sma) +static time64_t get_semotime(struct sem_array *sma) { int i; - time_t res; + time64_t res; - res = sma->sem_base[0].sem_otime; + res = sma->sems[0].sem_otime; for (i = 1; i < sma->sem_nsems; i++) { - time_t to = sma->sem_base[i].sem_otime; + time64_t to = sma->sems[i].sem_otime; if (to > res) res = to; @@ -1090,117 +1231,127 @@ static time_t get_semotime(struct sem_array *sma) return res; } -static int semctl_nolock(struct ipc_namespace *ns, int semid, - int cmd, int version, void __user *p) +static int semctl_stat(struct ipc_namespace *ns, int semid, + int cmd, struct semid64_ds *semid64) { - int err; struct sem_array *sma; + time64_t semotime; + int err; - switch(cmd) { - case IPC_INFO: - case SEM_INFO: - { - struct seminfo seminfo; - int max_id; + memset(semid64, 0, sizeof(*semid64)); - err = security_sem_semctl(NULL, cmd); - if (err) - return err; - - memset(&seminfo,0,sizeof(seminfo)); - seminfo.semmni = ns->sc_semmni; - seminfo.semmns = ns->sc_semmns; - seminfo.semmsl = ns->sc_semmsl; - seminfo.semopm = ns->sc_semopm; - seminfo.semvmx = SEMVMX; - seminfo.semmnu = SEMMNU; - seminfo.semmap = SEMMAP; - seminfo.semume = SEMUME; - down_read(&sem_ids(ns).rw_mutex); - if (cmd == SEM_INFO) { - seminfo.semusz = sem_ids(ns).in_use; - seminfo.semaem = ns->used_sems; - } else { - seminfo.semusz = SEMUSZ; - seminfo.semaem = SEMAEM; + rcu_read_lock(); + if (cmd == SEM_STAT || cmd == SEM_STAT_ANY) { + sma = sem_obtain_object(ns, semid); + if (IS_ERR(sma)) { + err = PTR_ERR(sma); + goto out_unlock; } - max_id = ipc_get_maxid(&sem_ids(ns)); - up_read(&sem_ids(ns).rw_mutex); - if (copy_to_user(p, &seminfo, sizeof(struct seminfo))) - return -EFAULT; - return (max_id < 0) ? 0: max_id; - } - case IPC_STAT: - case SEM_STAT: - { - struct semid64_ds tbuf; - int id = 0; - - memset(&tbuf, 0, sizeof(tbuf)); - - rcu_read_lock(); - if (cmd == SEM_STAT) { - sma = sem_obtain_object(ns, semid); - if (IS_ERR(sma)) { - err = PTR_ERR(sma); - goto out_unlock; - } - id = sma->sem_perm.id; - } else { - sma = sem_obtain_object_check(ns, semid); - if (IS_ERR(sma)) { - err = PTR_ERR(sma); - goto out_unlock; - } + } else { /* IPC_STAT */ + sma = sem_obtain_object_check(ns, semid); + if (IS_ERR(sma)) { + err = PTR_ERR(sma); + goto out_unlock; } + } + /* see comment for SHM_STAT_ANY */ + if (cmd == SEM_STAT_ANY) + audit_ipc_obj(&sma->sem_perm); + else { err = -EACCES; if (ipcperms(ns, &sma->sem_perm, S_IRUGO)) goto out_unlock; + } - err = security_sem_semctl(sma, cmd); - if (err) - goto out_unlock; + err = security_sem_semctl(&sma->sem_perm, cmd); + if (err) + goto out_unlock; - kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm); - tbuf.sem_otime = get_semotime(sma); - tbuf.sem_ctime = sma->sem_ctime; - tbuf.sem_nsems = sma->sem_nsems; - rcu_read_unlock(); - if (copy_semid_to_user(p, &tbuf, version)) - return -EFAULT; - return id; + ipc_lock_object(&sma->sem_perm); + + if (!ipc_valid_object(&sma->sem_perm)) { + ipc_unlock_object(&sma->sem_perm); + err = -EIDRM; + goto out_unlock; } - default: - return -EINVAL; + + kernel_to_ipc64_perm(&sma->sem_perm, &semid64->sem_perm); + semotime = get_semotime(sma); + semid64->sem_otime = semotime; + semid64->sem_ctime = sma->sem_ctime; +#ifndef CONFIG_64BIT + semid64->sem_otime_high = semotime >> 32; + semid64->sem_ctime_high = sma->sem_ctime >> 32; +#endif + semid64->sem_nsems = sma->sem_nsems; + + if (cmd == IPC_STAT) { + /* + * As defined in SUS: + * Return 0 on success + */ + err = 0; + } else { + /* + * SEM_STAT and SEM_STAT_ANY (both Linux specific) + * Return the full id, including the sequence number + */ + err = sma->sem_perm.id; } + ipc_unlock_object(&sma->sem_perm); out_unlock: rcu_read_unlock(); return err; } +static int semctl_info(struct ipc_namespace *ns, int semid, + int cmd, void __user *p) +{ + struct seminfo seminfo; + int max_idx; + int err; + + err = security_sem_semctl(NULL, cmd); + if (err) + return err; + + memset(&seminfo, 0, sizeof(seminfo)); + seminfo.semmni = ns->sc_semmni; + seminfo.semmns = ns->sc_semmns; + seminfo.semmsl = ns->sc_semmsl; + seminfo.semopm = ns->sc_semopm; + seminfo.semvmx = SEMVMX; + seminfo.semmnu = SEMMNU; + seminfo.semmap = SEMMAP; + seminfo.semume = SEMUME; + down_read(&sem_ids(ns).rwsem); + if (cmd == SEM_INFO) { + seminfo.semusz = sem_ids(ns).in_use; + seminfo.semaem = ns->used_sems; + } else { + seminfo.semusz = SEMUSZ; + seminfo.semaem = SEMAEM; + } + max_idx = ipc_get_maxidx(&sem_ids(ns)); + up_read(&sem_ids(ns).rwsem); + if (copy_to_user(p, &seminfo, sizeof(struct seminfo))) + return -EFAULT; + return (max_idx < 0) ? 0 : max_idx; +} + static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum, - unsigned long arg) + int val) { struct sem_undo *un; struct sem_array *sma; - struct sem* curr; + struct sem *curr; int err; - struct list_head tasks; - int val; -#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN) - /* big-endian 64bit */ - val = arg >> 32; -#else - /* 32bit or little-endian 64bit */ - val = arg; -#endif + DEFINE_WAKE_Q(wake_q); if (val > SEMVMX || val < 0) return -ERANGE; - INIT_LIST_HEAD(&tasks); - rcu_read_lock(); sma = sem_obtain_object_check(ns, semid); if (IS_ERR(sma)) { @@ -1219,7 +1370,7 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum, return -EACCES; } - err = security_sem_semctl(sma, SETVAL); + err = security_sem_semctl(&sma->sem_perm, SETVAL); if (err) { rcu_read_unlock(); return -EACCES; @@ -1227,20 +1378,27 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum, sem_lock(sma, NULL, -1); - curr = &sma->sem_base[semnum]; + if (!ipc_valid_object(&sma->sem_perm)) { + sem_unlock(sma, -1); + rcu_read_unlock(); + return -EIDRM; + } + + semnum = array_index_nospec(semnum, sma->sem_nsems); + curr = &sma->sems[semnum]; ipc_assert_locked_object(&sma->sem_perm); list_for_each_entry(un, &sma->list_id, list_id) un->semadj[semnum] = 0; curr->semval = val; - curr->sempid = task_tgid_vnr(current); - sma->sem_ctime = get_seconds(); + ipc_update_pid(&curr->sempid, task_tgid(current)); + sma->sem_ctime = ktime_get_real_seconds(); /* maybe some queued-up processes were waiting for this */ - do_smart_update(sma, NULL, 0, 0, &tasks); + do_smart_update(sma, NULL, 0, 0, &wake_q); sem_unlock(sma, -1); rcu_read_unlock(); - wake_up_sem_queue_do(&tasks); + wake_up_q(&wake_q); return 0; } @@ -1248,13 +1406,11 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, int cmd, void __user *p) { struct sem_array *sma; - struct sem* curr; + struct sem *curr; int err, nsems; ushort fast_sem_io[SEMMSL_FAST]; - ushort* sem_io = fast_sem_io; - struct list_head tasks; - - INIT_LIST_HEAD(&tasks); + ushort *sem_io = fast_sem_io; + DEFINE_WAKE_Q(wake_q); rcu_read_lock(); sma = sem_obtain_object_check(ns, semid); @@ -1269,11 +1425,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO)) goto out_rcu_wakeup; - err = security_sem_semctl(sma, cmd); + err = security_sem_semctl(&sma->sem_perm, cmd); if (err) goto out_rcu_wakeup; - err = -EACCES; switch (cmd) { case GETALL: { @@ -1281,36 +1436,37 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, int i; sem_lock(sma, NULL, -1); - if(nsems > SEMMSL_FAST) { - if (!ipc_rcu_getref(sma)) { - sem_unlock(sma, -1); - rcu_read_unlock(); + if (!ipc_valid_object(&sma->sem_perm)) { + err = -EIDRM; + goto out_unlock; + } + if (nsems > SEMMSL_FAST) { + if (!ipc_rcu_getref(&sma->sem_perm)) { err = -EIDRM; - goto out_free; + goto out_unlock; } sem_unlock(sma, -1); rcu_read_unlock(); - sem_io = ipc_alloc(sizeof(ushort)*nsems); - if(sem_io == NULL) { - sem_putref(sma); + sem_io = kvmalloc_array(nsems, sizeof(ushort), + GFP_KERNEL); + if (sem_io == NULL) { + ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); return -ENOMEM; } rcu_read_lock(); sem_lock_and_putref(sma); - if (sma->sem_perm.deleted) { - sem_unlock(sma, -1); - rcu_read_unlock(); + if (!ipc_valid_object(&sma->sem_perm)) { err = -EIDRM; - goto out_free; + goto out_unlock; } } for (i = 0; i < sma->sem_nsems; i++) - sem_io[i] = sma->sem_base[i].semval; + sem_io[i] = sma->sems[i].semval; sem_unlock(sma, -1); rcu_read_unlock(); err = 0; - if(copy_to_user(array, sem_io, nsems*sizeof(ushort))) + if (copy_to_user(array, sem_io, nsems*sizeof(ushort))) err = -EFAULT; goto out_free; } @@ -1319,53 +1475,54 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, int i; struct sem_undo *un; - if (!ipc_rcu_getref(sma)) { - rcu_read_unlock(); - return -EIDRM; + if (!ipc_rcu_getref(&sma->sem_perm)) { + err = -EIDRM; + goto out_rcu_wakeup; } rcu_read_unlock(); - if(nsems > SEMMSL_FAST) { - sem_io = ipc_alloc(sizeof(ushort)*nsems); - if(sem_io == NULL) { - sem_putref(sma); + if (nsems > SEMMSL_FAST) { + sem_io = kvmalloc_array(nsems, sizeof(ushort), + GFP_KERNEL); + if (sem_io == NULL) { + ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); return -ENOMEM; } } - if (copy_from_user (sem_io, p, nsems*sizeof(ushort))) { - sem_putref(sma); + if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) { + ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); err = -EFAULT; goto out_free; } for (i = 0; i < nsems; i++) { if (sem_io[i] > SEMVMX) { - sem_putref(sma); + ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); err = -ERANGE; goto out_free; } } rcu_read_lock(); sem_lock_and_putref(sma); - if (sma->sem_perm.deleted) { - sem_unlock(sma, -1); - rcu_read_unlock(); + if (!ipc_valid_object(&sma->sem_perm)) { err = -EIDRM; - goto out_free; + goto out_unlock; } - for (i = 0; i < nsems; i++) - sma->sem_base[i].semval = sem_io[i]; + for (i = 0; i < nsems; i++) { + sma->sems[i].semval = sem_io[i]; + ipc_update_pid(&sma->sems[i].sempid, task_tgid(current)); + } ipc_assert_locked_object(&sma->sem_perm); list_for_each_entry(un, &sma->list_id, list_id) { for (i = 0; i < nsems; i++) un->semadj[i] = 0; } - sma->sem_ctime = get_seconds(); + sma->sem_ctime = ktime_get_real_seconds(); /* maybe some queued-up processes were waiting for this */ - do_smart_update(sma, NULL, 0, 0, &tasks); + do_smart_update(sma, NULL, 0, 0, &wake_q); err = 0; goto out_unlock; } @@ -1376,20 +1533,26 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, goto out_rcu_wakeup; sem_lock(sma, NULL, -1); - curr = &sma->sem_base[semnum]; + if (!ipc_valid_object(&sma->sem_perm)) { + err = -EIDRM; + goto out_unlock; + } + + semnum = array_index_nospec(semnum, nsems); + curr = &sma->sems[semnum]; switch (cmd) { case GETVAL: err = curr->semval; goto out_unlock; case GETPID: - err = curr->sempid; + err = pid_vnr(curr->sempid); goto out_unlock; case GETNCNT: - err = count_semncnt(sma,semnum); + err = count_semcnt(sma, semnum, 0); goto out_unlock; case GETZCNT: - err = count_semzcnt(sma,semnum); + err = count_semcnt(sma, semnum, 1); goto out_unlock; } @@ -1397,17 +1560,17 @@ out_unlock: sem_unlock(sma, -1); out_rcu_wakeup: rcu_read_unlock(); - wake_up_sem_queue_do(&tasks); + wake_up_q(&wake_q); out_free: - if(sem_io != fast_sem_io) - ipc_free(sem_io, sizeof(ushort)*nsems); + if (sem_io != fast_sem_io) + kvfree(sem_io); return err; } static inline unsigned long copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version) { - switch(version) { + switch (version) { case IPC_64: if (copy_from_user(out, buf, sizeof(*out))) return -EFAULT; @@ -1416,7 +1579,7 @@ copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version) { struct semid_ds tbuf_old; - if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) + if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) return -EFAULT; out->sem_perm.uid = tbuf_old.sem_perm.uid; @@ -1431,28 +1594,22 @@ copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version) } /* - * This function handles some semctl commands which require the rw_mutex + * This function handles some semctl commands which require the rwsem * to be held in write mode. - * NOTE: no locks must be held, the rw_mutex is taken inside this function. + * NOTE: no locks must be held, the rwsem is taken inside this function. */ static int semctl_down(struct ipc_namespace *ns, int semid, - int cmd, int version, void __user *p) + int cmd, struct semid64_ds *semid64) { struct sem_array *sma; int err; - struct semid64_ds semid64; struct kern_ipc_perm *ipcp; - if(cmd == IPC_SET) { - if (copy_semid_from_user(&semid64, p, version)) - return -EFAULT; - } - - down_write(&sem_ids(ns).rw_mutex); + down_write(&sem_ids(ns).rwsem); rcu_read_lock(); - ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd, - &semid64.sem_perm, 0); + ipcp = ipcctl_obtain_check(ns, &sem_ids(ns), semid, cmd, + &semid64->sem_perm, 0); if (IS_ERR(ipcp)) { err = PTR_ERR(ipcp); goto out_unlock1; @@ -1460,7 +1617,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid, sma = container_of(ipcp, struct sem_array, sem_perm); - err = security_sem_semctl(sma, cmd); + err = security_sem_semctl(&sma->sem_perm, cmd); if (err) goto out_unlock1; @@ -1472,10 +1629,10 @@ static int semctl_down(struct ipc_namespace *ns, int semid, goto out_up; case IPC_SET: sem_lock(sma, NULL, -1); - err = ipc_update_perm(&semid64.sem_perm, ipcp); + err = ipc_update_perm(&semid64->sem_perm, ipcp); if (err) goto out_unlock0; - sma->sem_ctime = get_seconds(); + sma->sem_ctime = ktime_get_real_seconds(); break; default: err = -EINVAL; @@ -1487,28 +1644,35 @@ out_unlock0: out_unlock1: rcu_read_unlock(); out_up: - up_write(&sem_ids(ns).rw_mutex); + up_write(&sem_ids(ns).rwsem); return err; } -SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg) +static long ksys_semctl(int semid, int semnum, int cmd, unsigned long arg, int version) { - int version; struct ipc_namespace *ns; void __user *p = (void __user *)arg; + struct semid64_ds semid64; + int err; if (semid < 0) return -EINVAL; - version = ipc_parse_version(&cmd); ns = current->nsproxy->ipc_ns; - switch(cmd) { + switch (cmd) { case IPC_INFO: case SEM_INFO: + return semctl_info(ns, semid, cmd, p); case IPC_STAT: case SEM_STAT: - return semctl_nolock(ns, semid, cmd, version, p); + case SEM_STAT_ANY: + err = semctl_stat(ns, semid, cmd, &semid64); + if (err < 0) + return err; + if (copy_semid_to_user(p, &semid64, version)) + err = -EFAULT; + return err; case GETALL: case GETVAL: case GETPID: @@ -1516,16 +1680,162 @@ SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg) case GETZCNT: case SETALL: return semctl_main(ns, semid, semnum, cmd, p); + case SETVAL: { + int val; +#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN) + /* big-endian 64bit */ + val = arg >> 32; +#else + /* 32bit or little-endian 64bit */ + val = arg; +#endif + return semctl_setval(ns, semid, semnum, val); + } + case IPC_SET: + if (copy_semid_from_user(&semid64, p, version)) + return -EFAULT; + fallthrough; + case IPC_RMID: + return semctl_down(ns, semid, cmd, &semid64); + default: + return -EINVAL; + } +} + +SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg) +{ + return ksys_semctl(semid, semnum, cmd, arg, IPC_64); +} + +#ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION +long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg) +{ + int version = ipc_parse_version(&cmd); + + return ksys_semctl(semid, semnum, cmd, arg, version); +} + +SYSCALL_DEFINE4(old_semctl, int, semid, int, semnum, int, cmd, unsigned long, arg) +{ + return ksys_old_semctl(semid, semnum, cmd, arg); +} +#endif + +#ifdef CONFIG_COMPAT + +struct compat_semid_ds { + struct compat_ipc_perm sem_perm; + old_time32_t sem_otime; + old_time32_t sem_ctime; + compat_uptr_t sem_base; + compat_uptr_t sem_pending; + compat_uptr_t sem_pending_last; + compat_uptr_t undo; + unsigned short sem_nsems; +}; + +static int copy_compat_semid_from_user(struct semid64_ds *out, void __user *buf, + int version) +{ + memset(out, 0, sizeof(*out)); + if (version == IPC_64) { + struct compat_semid64_ds __user *p = buf; + return get_compat_ipc64_perm(&out->sem_perm, &p->sem_perm); + } else { + struct compat_semid_ds __user *p = buf; + return get_compat_ipc_perm(&out->sem_perm, &p->sem_perm); + } +} + +static int copy_compat_semid_to_user(void __user *buf, struct semid64_ds *in, + int version) +{ + if (version == IPC_64) { + struct compat_semid64_ds v; + memset(&v, 0, sizeof(v)); + to_compat_ipc64_perm(&v.sem_perm, &in->sem_perm); + v.sem_otime = lower_32_bits(in->sem_otime); + v.sem_otime_high = upper_32_bits(in->sem_otime); + v.sem_ctime = lower_32_bits(in->sem_ctime); + v.sem_ctime_high = upper_32_bits(in->sem_ctime); + v.sem_nsems = in->sem_nsems; + return copy_to_user(buf, &v, sizeof(v)); + } else { + struct compat_semid_ds v; + memset(&v, 0, sizeof(v)); + to_compat_ipc_perm(&v.sem_perm, &in->sem_perm); + v.sem_otime = in->sem_otime; + v.sem_ctime = in->sem_ctime; + v.sem_nsems = in->sem_nsems; + return copy_to_user(buf, &v, sizeof(v)); + } +} + +static long compat_ksys_semctl(int semid, int semnum, int cmd, int arg, int version) +{ + void __user *p = compat_ptr(arg); + struct ipc_namespace *ns; + struct semid64_ds semid64; + int err; + + ns = current->nsproxy->ipc_ns; + + if (semid < 0) + return -EINVAL; + + switch (cmd & (~IPC_64)) { + case IPC_INFO: + case SEM_INFO: + return semctl_info(ns, semid, cmd, p); + case IPC_STAT: + case SEM_STAT: + case SEM_STAT_ANY: + err = semctl_stat(ns, semid, cmd, &semid64); + if (err < 0) + return err; + if (copy_compat_semid_to_user(p, &semid64, version)) + err = -EFAULT; + return err; + case GETVAL: + case GETPID: + case GETNCNT: + case GETZCNT: + case GETALL: + case SETALL: + return semctl_main(ns, semid, semnum, cmd, p); case SETVAL: return semctl_setval(ns, semid, semnum, arg); - case IPC_RMID: case IPC_SET: - return semctl_down(ns, semid, cmd, version, p); + if (copy_compat_semid_from_user(&semid64, p, version)) + return -EFAULT; + fallthrough; + case IPC_RMID: + return semctl_down(ns, semid, cmd, &semid64); default: return -EINVAL; } } +COMPAT_SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, int, arg) +{ + return compat_ksys_semctl(semid, semnum, cmd, arg, IPC_64); +} + +#ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION +long compat_ksys_old_semctl(int semid, int semnum, int cmd, int arg) +{ + int version = compat_ipc_parse_version(&cmd); + + return compat_ksys_semctl(semid, semnum, cmd, arg, version); +} + +COMPAT_SYSCALL_DEFINE4(old_semctl, int, semid, int, semnum, int, cmd, int, arg) +{ + return compat_ksys_old_semctl(semid, semnum, cmd, arg); +} +#endif +#endif + /* If the task doesn't already have a undo_list, then allocate one * here. We guarantee there is only one thread using this undo list, * and current is THE ONE @@ -1543,11 +1853,11 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp) undo_list = current->sysvsem.undo_list; if (!undo_list) { - undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL); + undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL_ACCOUNT); if (undo_list == NULL) return -ENOMEM; spin_lock_init(&undo_list->lock); - atomic_set(&undo_list->refcnt, 1); + refcount_set(&undo_list->refcnt, 1); INIT_LIST_HEAD(&undo_list->list_proc); current->sysvsem.undo_list = undo_list; @@ -1560,7 +1870,8 @@ static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid) { struct sem_undo *un; - list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) { + list_for_each_entry_rcu(un, &ulp->list_proc, list_proc, + spin_is_locked(&ulp->lock)) { if (un->semid == semid) return un; } @@ -1571,7 +1882,7 @@ static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) { struct sem_undo *un; - assert_spin_locked(&ulp->lock); + assert_spin_locked(&ulp->lock); un = __lookup_undo(ulp, semid); if (un) { @@ -1582,7 +1893,7 @@ static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) } /** - * find_alloc_undo - Lookup (and if not present create) undo array + * find_alloc_undo - lookup (and if not present create) undo array * @ns: namespace * @semid: semaphore array id * @@ -1607,7 +1918,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) spin_lock(&ulp->lock); un = lookup_undo(ulp, semid); spin_unlock(&ulp->lock); - if (likely(un!=NULL)) + if (likely(un != NULL)) goto out; /* no undo structure around - allocate one. */ @@ -1619,7 +1930,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) } nsems = sma->sem_nsems; - if (!ipc_rcu_getref(sma)) { + if (!ipc_rcu_getref(&sma->sem_perm)) { rcu_read_unlock(); un = ERR_PTR(-EIDRM); goto out; @@ -1627,19 +1938,19 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) rcu_read_unlock(); /* step 2: allocate new undo structure */ - new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); + new = kvzalloc(struct_size(new, semadj, nsems), GFP_KERNEL_ACCOUNT); if (!new) { - sem_putref(sma); + ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); return ERR_PTR(-ENOMEM); } /* step 3: Acquire the lock on semaphore array */ rcu_read_lock(); sem_lock_and_putref(sma); - if (sma->sem_perm.deleted) { + if (!ipc_valid_object(&sma->sem_perm)) { sem_unlock(sma, -1); rcu_read_unlock(); - kfree(new); + kvfree(new); un = ERR_PTR(-EIDRM); goto out; } @@ -1650,11 +1961,11 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) */ un = lookup_undo(ulp, semid); if (un) { - kfree(new); + spin_unlock(&ulp->lock); + kvfree(new); goto success; } /* step 5: initialize & link new undo structure */ - new->semadj = (short *) &new[1]; new->ulp = ulp; new->semid = semid; assert_spin_locked(&ulp->lock); @@ -1662,100 +1973,71 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) ipc_assert_locked_object(&sma->sem_perm); list_add(&new->list_id, &sma->list_id); un = new; - -success: spin_unlock(&ulp->lock); +success: sem_unlock(sma, -1); out: return un; } - -/** - * get_queue_result - Retrieve the result code from sem_queue - * @q: Pointer to queue structure - * - * Retrieve the return code from the pending queue. If IN_WAKEUP is found in - * q->status, then we must loop until the value is replaced with the final - * value: This may happen if a task is woken up by an unrelated event (e.g. - * signal) and in parallel the task is woken up by another task because it got - * the requested semaphores. - * - * The function can be called with or without holding the semaphore spinlock. - */ -static int get_queue_result(struct sem_queue *q) -{ - int error; - - error = q->status; - while (unlikely(error == IN_WAKEUP)) { - cpu_relax(); - error = q->status; - } - - return error; -} - -SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, - unsigned, nsops, const struct timespec __user *, timeout) +long __do_semtimedop(int semid, struct sembuf *sops, + unsigned nsops, const struct timespec64 *timeout, + struct ipc_namespace *ns) { int error = -EINVAL; struct sem_array *sma; - struct sembuf fast_sops[SEMOPM_FAST]; - struct sembuf* sops = fast_sops, *sop; + struct sembuf *sop; struct sem_undo *un; - int undos = 0, alter = 0, max, locknum; + int max, locknum; + bool undos = false, alter = false, dupsop = false; struct sem_queue queue; - unsigned long jiffies_left = 0; - struct ipc_namespace *ns; - struct list_head tasks; - - ns = current->nsproxy->ipc_ns; + unsigned long dup = 0; + ktime_t expires, *exp = NULL; + bool timed_out = false; if (nsops < 1 || semid < 0) return -EINVAL; if (nsops > ns->sc_semopm) return -E2BIG; - if(nsops > SEMOPM_FAST) { - sops = kmalloc(sizeof(*sops)*nsops,GFP_KERNEL); - if(sops==NULL) - return -ENOMEM; - } - if (copy_from_user (sops, tsops, nsops * sizeof(*tsops))) { - error=-EFAULT; - goto out_free; - } + if (timeout) { - struct timespec _timeout; - if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) { - error = -EFAULT; - goto out_free; - } - if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 || - _timeout.tv_nsec >= 1000000000L) { - error = -EINVAL; - goto out_free; - } - jiffies_left = timespec_to_jiffies(&_timeout); + if (!timespec64_valid(timeout)) + return -EINVAL; + expires = ktime_add_safe(ktime_get(), + timespec64_to_ktime(*timeout)); + exp = &expires; } + + max = 0; for (sop = sops; sop < sops + nsops; sop++) { + unsigned long mask = 1ULL << ((sop->sem_num) % BITS_PER_LONG); + if (sop->sem_num >= max) max = sop->sem_num; if (sop->sem_flg & SEM_UNDO) - undos = 1; - if (sop->sem_op != 0) - alter = 1; + undos = true; + if (dup & mask) { + /* + * There was a previous alter access that appears + * to have accessed the same semaphore, thus use + * the dupsop logic. "appears", because the detection + * can only check % BITS_PER_LONG. + */ + dupsop = true; + } + if (sop->sem_op != 0) { + alter = true; + dup |= mask; + } } - INIT_LIST_HEAD(&tasks); - if (undos) { /* On success, find_alloc_undo takes the rcu_read_lock */ un = find_alloc_undo(ns, semid); if (IS_ERR(un)) { error = PTR_ERR(un); - goto out_free; + goto out; } } else { un = NULL; @@ -1766,21 +2048,39 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, if (IS_ERR(sma)) { rcu_read_unlock(); error = PTR_ERR(sma); - goto out_free; + goto out; } error = -EFBIG; - if (max >= sma->sem_nsems) - goto out_rcu_wakeup; + if (max >= sma->sem_nsems) { + rcu_read_unlock(); + goto out; + } error = -EACCES; - if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) - goto out_rcu_wakeup; + if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) { + rcu_read_unlock(); + goto out; + } - error = security_sem_semop(sma, sops, nsops, alter); - if (error) - goto out_rcu_wakeup; + error = security_sem_semop(&sma->sem_perm, sops, nsops, alter); + if (error) { + rcu_read_unlock(); + goto out; + } + error = -EIDRM; + locknum = sem_lock(sma, sops, nsops); + /* + * We eventually might perform the following check in a lockless + * fashion, considering ipc_valid_object() locking constraints. + * If nsops == 1 and there is no contention for sem_perm.lock, then + * only a per-semaphore lock is held and it's OK to proceed with the + * check below. More details on the fine grained locking scheme + * entangled here and why it's RMID race safe on comments at sem_lock() + */ + if (!ipc_valid_object(&sma->sem_perm)) + goto out_unlock; /* * semid identifiers are not unique - find_alloc_undo may have * allocated an undo structure, it was invalidated by an RMID @@ -1788,33 +2088,46 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, * This case can be detected checking un->semid. The existence of * "un" itself is guaranteed by rcu. */ - error = -EIDRM; - locknum = sem_lock(sma, sops, nsops); if (un && un->semid == -1) - goto out_unlock_free; - - error = perform_atomic_semop(sma, sops, nsops, un, - task_tgid_vnr(current)); - if (error <= 0) { - if (alter && error == 0) - do_smart_update(sma, sops, nsops, 1, &tasks); - - goto out_unlock_free; - } + goto out_unlock; - /* We need to sleep on this operation, so we put the current - * task into the pending queue and go to sleep. - */ - queue.sops = sops; queue.nsops = nsops; queue.undo = un; - queue.pid = task_tgid_vnr(current); + queue.pid = task_tgid(current); queue.alter = alter; + queue.dupsop = dupsop; + + error = perform_atomic_semop(sma, &queue); + if (error == 0) { /* non-blocking successful path */ + DEFINE_WAKE_Q(wake_q); + + /* + * If the operation was successful, then do + * the required updates. + */ + if (alter) + do_smart_update(sma, sops, nsops, 1, &wake_q); + else + set_semotime(sma, sops); + + sem_unlock(sma, locknum); + rcu_read_unlock(); + wake_up_q(&wake_q); + + goto out; + } + if (error < 0) /* non-blocking error path */ + goto out_unlock; + /* + * We need to sleep on this operation, so we put the current + * task into the pending queue and go to sleep. + */ if (nsops == 1) { struct sem *curr; - curr = &sma->sem_base[sops->sem_num]; + int idx = array_index_nospec(sops->sem_num, sma->sem_nsems); + curr = &sma->sems[idx]; if (alter) { if (sma->complex_count) { @@ -1840,96 +2153,157 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, sma->complex_count++; } - queue.status = -EINTR; - queue.sleeper = current; + do { + /* memory ordering ensured by the lock in sem_lock() */ + WRITE_ONCE(queue.status, -EINTR); + queue.sleeper = current; -sleep_again: - current->state = TASK_INTERRUPTIBLE; - sem_unlock(sma, locknum); - rcu_read_unlock(); + /* memory ordering is ensured by the lock in sem_lock() */ + __set_current_state(TASK_INTERRUPTIBLE); + sem_unlock(sma, locknum); + rcu_read_unlock(); - if (timeout) - jiffies_left = schedule_timeout(jiffies_left); - else - schedule(); + timed_out = !schedule_hrtimeout_range(exp, + current->timer_slack_ns, HRTIMER_MODE_ABS); - error = get_queue_result(&queue); + /* + * fastpath: the semop has completed, either successfully or + * not, from the syscall pov, is quite irrelevant to us at this + * point; we're done. + * + * We _do_ care, nonetheless, about being awoken by a signal or + * spuriously. The queue.status is checked again in the + * slowpath (aka after taking sem_lock), such that we can detect + * scenarios where we were awakened externally, during the + * window between wake_q_add() and wake_up_q(). + */ + rcu_read_lock(); + error = READ_ONCE(queue.status); + if (error != -EINTR) { + /* see SEM_BARRIER_2 for purpose/pairing */ + smp_acquire__after_ctrl_dep(); + rcu_read_unlock(); + goto out; + } + + locknum = sem_lock(sma, sops, nsops); - if (error != -EINTR) { - /* fast path: update_queue already obtained all requested - * resources. - * Perform a smp_mb(): User space could assume that semop() - * is a memory barrier: Without the mb(), the cpu could - * speculatively read in user space stale data that was - * overwritten by the previous owner of the semaphore. + if (!ipc_valid_object(&sma->sem_perm)) + goto out_unlock; + + /* + * No necessity for any barrier: We are protect by sem_lock() */ - smp_mb(); + error = READ_ONCE(queue.status); - goto out_free; - } + /* + * If queue.status != -EINTR we are woken up by another process. + * Leave without unlink_queue(), but with sem_unlock(). + */ + if (error != -EINTR) + goto out_unlock; - rcu_read_lock(); - sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum); + /* + * If an interrupt occurred we have to clean up the queue. + */ + if (timed_out) + error = -EAGAIN; + } while (error == -EINTR && !signal_pending(current)); /* spurious */ - /* - * Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing. - */ - error = get_queue_result(&queue); + unlink_queue(sma, &queue); - /* - * Array removed? If yes, leave without sem_unlock(). - */ - if (IS_ERR(sma)) { - rcu_read_unlock(); +out_unlock: + sem_unlock(sma, locknum); + rcu_read_unlock(); +out: + return error; +} + +static long do_semtimedop(int semid, struct sembuf __user *tsops, + unsigned nsops, const struct timespec64 *timeout) +{ + struct sembuf fast_sops[SEMOPM_FAST]; + struct sembuf *sops = fast_sops; + struct ipc_namespace *ns; + int ret; + + ns = current->nsproxy->ipc_ns; + if (nsops > ns->sc_semopm) + return -E2BIG; + if (nsops < 1) + return -EINVAL; + + if (nsops > SEMOPM_FAST) { + sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL); + if (sops == NULL) + return -ENOMEM; + } + + if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) { + ret = -EFAULT; goto out_free; } + ret = __do_semtimedop(semid, sops, nsops, timeout, ns); - /* - * If queue.status != -EINTR we are woken up by another process. - * Leave without unlink_queue(), but with sem_unlock(). - */ +out_free: + if (sops != fast_sops) + kvfree(sops); - if (error != -EINTR) { - goto out_unlock_free; - } + return ret; +} - /* - * If an interrupt occurred we have to clean up the queue - */ - if (timeout && jiffies_left == 0) - error = -EAGAIN; +long ksys_semtimedop(int semid, struct sembuf __user *tsops, + unsigned int nsops, const struct __kernel_timespec __user *timeout) +{ + if (timeout) { + struct timespec64 ts; + if (get_timespec64(&ts, timeout)) + return -EFAULT; + return do_semtimedop(semid, tsops, nsops, &ts); + } + return do_semtimedop(semid, tsops, nsops, NULL); +} - /* - * If the wakeup was spurious, just retry - */ - if (error == -EINTR && !signal_pending(current)) - goto sleep_again; +SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, + unsigned int, nsops, const struct __kernel_timespec __user *, timeout) +{ + return ksys_semtimedop(semid, tsops, nsops, timeout); +} - unlink_queue(sma, &queue); +#ifdef CONFIG_COMPAT_32BIT_TIME +long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems, + unsigned int nsops, + const struct old_timespec32 __user *timeout) +{ + if (timeout) { + struct timespec64 ts; + if (get_old_timespec32(&ts, timeout)) + return -EFAULT; + return do_semtimedop(semid, tsems, nsops, &ts); + } + return do_semtimedop(semid, tsems, nsops, NULL); +} -out_unlock_free: - sem_unlock(sma, locknum); -out_rcu_wakeup: - rcu_read_unlock(); - wake_up_sem_queue_do(&tasks); -out_free: - if(sops != fast_sops) - kfree(sops); - return error; +SYSCALL_DEFINE4(semtimedop_time32, int, semid, struct sembuf __user *, tsems, + unsigned int, nsops, + const struct old_timespec32 __user *, timeout) +{ + return compat_ksys_semtimedop(semid, tsems, nsops, timeout); } +#endif SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops, unsigned, nsops) { - return sys_semtimedop(semid, tsops, nsops, NULL); + return do_semtimedop(semid, tsops, nsops, NULL); } /* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between * parent and child tasks. */ -int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) +int copy_semundo(u64 clone_flags, struct task_struct *tsk) { struct sem_undo_list *undo_list; int error; @@ -1938,9 +2312,9 @@ int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) error = get_undo_list(&undo_list); if (error) return error; - atomic_inc(&undo_list->refcnt); + refcount_inc(&undo_list->refcnt); tsk->sysvsem.undo_list = undo_list; - } else + } else tsk->sysvsem.undo_list = NULL; return 0; @@ -1967,29 +2341,43 @@ void exit_sem(struct task_struct *tsk) return; tsk->sysvsem.undo_list = NULL; - if (!atomic_dec_and_test(&ulp->refcnt)) + if (!refcount_dec_and_test(&ulp->refcnt)) return; for (;;) { struct sem_array *sma; struct sem_undo *un; - struct list_head tasks; int semid, i; + DEFINE_WAKE_Q(wake_q); + + cond_resched(); rcu_read_lock(); un = list_entry_rcu(ulp->list_proc.next, struct sem_undo, list_proc); - if (&un->list_proc == &ulp->list_proc) - semid = -1; - else - semid = un->semid; + if (&un->list_proc == &ulp->list_proc) { + /* + * We must wait for freeary() before freeing this ulp, + * in case we raced with last sem_undo. There is a small + * possibility where we exit while freeary() didn't + * finish unlocking sem_undo_list. + */ + spin_lock(&ulp->lock); + spin_unlock(&ulp->lock); + rcu_read_unlock(); + break; + } + spin_lock(&ulp->lock); + semid = un->semid; + spin_unlock(&ulp->lock); + /* exit_sem raced with IPC_RMID, nothing to do */ if (semid == -1) { rcu_read_unlock(); - break; + continue; } - sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid); + sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid); /* exit_sem raced with IPC_RMID, nothing to do */ if (IS_ERR(sma)) { rcu_read_unlock(); @@ -1997,6 +2385,12 @@ void exit_sem(struct task_struct *tsk) } sem_lock(sma, NULL, -1); + /* exit_sem raced with IPC_RMID, nothing to do */ + if (!ipc_valid_object(&sma->sem_perm)) { + sem_unlock(sma, -1); + rcu_read_unlock(); + continue; + } un = __lookup_undo(ulp, semid); if (un == NULL) { /* exit_sem raced with IPC_RMID+semget() that created @@ -2017,7 +2411,7 @@ void exit_sem(struct task_struct *tsk) /* perform adjustments registered in un */ for (i = 0; i < sma->sem_nsems; i++) { - struct sem * semaphore = &sma->sem_base[i]; + struct sem *semaphore = &sma->sems[i]; if (un->semadj[i]) { semaphore->semval += un->semadj[i]; /* @@ -2031,23 +2425,22 @@ void exit_sem(struct task_struct *tsk) * Linux caps the semaphore value, both at 0 * and at SEMVMX. * - * Manfred <manfred@colorfullife.com> + * Manfred <manfred@colorfullife.com> */ if (semaphore->semval < 0) semaphore->semval = 0; if (semaphore->semval > SEMVMX) semaphore->semval = SEMVMX; - semaphore->sempid = task_tgid_vnr(current); + ipc_update_pid(&semaphore->sempid, task_tgid(current)); } } /* maybe some queued-up processes were waiting for this */ - INIT_LIST_HEAD(&tasks); - do_smart_update(sma, NULL, 0, 1, &tasks); + do_smart_update(sma, NULL, 0, 1, &wake_q); sem_unlock(sma, -1); rcu_read_unlock(); - wake_up_sem_queue_do(&tasks); + wake_up_q(&wake_q); - kfree_rcu(un, rcu); + kvfree_rcu(un, rcu); } kfree(ulp); } @@ -2056,22 +2449,36 @@ void exit_sem(struct task_struct *tsk) static int sysvipc_sem_proc_show(struct seq_file *s, void *it) { struct user_namespace *user_ns = seq_user_ns(s); - struct sem_array *sma = it; - time_t sem_otime; + struct kern_ipc_perm *ipcp = it; + struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm); + time64_t sem_otime; + + /* + * The proc interface isn't aware of sem_lock(), it calls + * ipc_lock_object(), i.e. spin_lock(&sma->sem_perm.lock). + * (in sysvipc_find_ipc) + * In order to stay compatible with sem_lock(), we must + * enter / leave complex_mode. + */ + complexmode_enter(sma); sem_otime = get_semotime(sma); - return seq_printf(s, - "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n", - sma->sem_perm.key, - sma->sem_perm.id, - sma->sem_perm.mode, - sma->sem_nsems, - from_kuid_munged(user_ns, sma->sem_perm.uid), - from_kgid_munged(user_ns, sma->sem_perm.gid), - from_kuid_munged(user_ns, sma->sem_perm.cuid), - from_kgid_munged(user_ns, sma->sem_perm.cgid), - sem_otime, - sma->sem_ctime); + seq_printf(s, + "%10d %10d %4o %10u %5u %5u %5u %5u %10llu %10llu\n", + sma->sem_perm.key, + sma->sem_perm.id, + sma->sem_perm.mode, + sma->sem_nsems, + from_kuid_munged(user_ns, sma->sem_perm.uid), + from_kgid_munged(user_ns, sma->sem_perm.gid), + from_kuid_munged(user_ns, sma->sem_perm.cuid), + from_kgid_munged(user_ns, sma->sem_perm.cgid), + sem_otime, + sma->sem_ctime); + + complexmode_tryleave(sma); + + return 0; } #endif |
