diff options
Diffstat (limited to 'ipc/sem.c')
| -rw-r--r-- | ipc/sem.c | 283 |
1 files changed, 171 insertions, 112 deletions
diff --git a/ipc/sem.c b/ipc/sem.c index 745dc6187e84..0f06e4bd4673 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -36,7 +36,7 @@ * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO. * - undo adjustments at process exit are limited to 0..SEMVMX. * - namespace are supported. - * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing + * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtime by writing * to /proc/sys/kernel/sem. * - statistics about the usage are reported in /proc/sysvipc/sem. * @@ -152,7 +152,7 @@ struct sem_undo { struct list_head list_id; /* per semaphore array list: * all undos for one array */ int semid; /* semaphore set identifier */ - short *semadj; /* array of adjustments */ + short semadj[]; /* array of adjustments */ /* one per semaphore */ }; @@ -205,15 +205,40 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it); * * Memory ordering: * Most ordering is enforced by using spin_lock() and spin_unlock(). - * The special case is use_global_lock: + * + * Exceptions: + * 1) use_global_lock: (SEM_BARRIER_1) * Setting it from non-zero to 0 is a RELEASE, this is ensured by - * using smp_store_release(). + * using smp_store_release(): Immediately after setting it to 0, + * a simple op can start. * Testing if it is non-zero is an ACQUIRE, this is ensured by using * smp_load_acquire(). * Setting it from 0 to non-zero must be ordered with regards to * this smp_load_acquire(), this is guaranteed because the smp_load_acquire() * is inside a spin_lock() and after a write from 0 to non-zero a * spin_lock()+spin_unlock() is done. + * To prevent the compiler/cpu temporarily writing 0 to use_global_lock, + * READ_ONCE()/WRITE_ONCE() is used. + * + * 2) queue.status: (SEM_BARRIER_2) + * Initialization is done while holding sem_lock(), so no further barrier is + * required. + * Setting it to a result code is a RELEASE, this is ensured by both a + * smp_store_release() (for case a) and while holding sem_lock() + * (for case b). + * The ACQUIRE when reading the result code without holding sem_lock() is + * achieved by using READ_ONCE() + smp_acquire__after_ctrl_dep(). + * (case a above). + * Reading the result code while holding sem_lock() needs no further barriers, + * the locks inside sem_lock() enforce ordering (case b above) + * + * 3) current->state: + * current->state is set to TASK_INTERRUPTIBLE while holding sem_lock(). + * The wakeup is handled using the wake_q infrastructure. wake_q wakeups may + * happen immediately after calling wake_q_add. As wake_q_add_safe() is called + * when holding sem_lock(), no further barriers are required. + * + * See also ipc/mqueue.c for more details on the covered races. */ #define sc_semmsl sem_ctls[0] @@ -319,10 +344,10 @@ static void complexmode_enter(struct sem_array *sma) * Nothing to do, just reset the * counter until we return to simple mode. */ - sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS; + WRITE_ONCE(sma->use_global_lock, USE_GLOBAL_LOCK_HYSTERESIS); return; } - sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS; + WRITE_ONCE(sma->use_global_lock, USE_GLOBAL_LOCK_HYSTERESIS); for (i = 0; i < sma->sem_nsems; i++) { sem = &sma->sems[i]; @@ -344,15 +369,12 @@ static void complexmode_tryleave(struct sem_array *sma) return; } if (sma->use_global_lock == 1) { - /* - * Immediately after setting use_global_lock to 0, - * a simple op can start. Thus: all memory writes - * performed by the current operation must be visible - * before we set use_global_lock to 0. - */ + + /* See SEM_BARRIER_1 for purpose/pairing */ smp_store_release(&sma->use_global_lock, 0); } else { - sma->use_global_lock--; + WRITE_ONCE(sma->use_global_lock, + sma->use_global_lock-1); } } @@ -393,14 +415,14 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, * Initial check for use_global_lock. Just an optimization, * no locking, no memory barrier. */ - if (!sma->use_global_lock) { + if (!READ_ONCE(sma->use_global_lock)) { /* * It appears that no complex operation is around. * Acquire the per-semaphore lock. */ spin_lock(&sem->lock); - /* pairs with smp_store_release() */ + /* see SEM_BARRIER_1 for purpose/pairing */ if (!smp_load_acquire(&sma->use_global_lock)) { /* fast path successful! */ return sops->sem_num; @@ -488,18 +510,14 @@ static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) static struct sem_array *sem_alloc(size_t nsems) { struct sem_array *sma; - size_t size; if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0])) return NULL; - size = sizeof(*sma) + nsems * sizeof(sma->sems[0]); - sma = kvmalloc(size, GFP_KERNEL); + sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL_ACCOUNT); if (unlikely(!sma)) return NULL; - memset(sma, 0, size); - return sma; } @@ -570,8 +588,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) /* * Called with sem_ids.rwsem and ipcp locked. */ -static inline int sem_more_checks(struct kern_ipc_perm *ipcp, - struct ipc_params *params) +static int sem_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params) { struct sem_array *sma; @@ -749,7 +766,6 @@ static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q) for (sop = sops; sop < sops + nsops; sop++) { curr = &sma->sems[sop->sem_num]; sem_op = sop->sem_op; - result = curr->semval; if (sop->sem_flg & SEM_UNDO) { int undo = un->semadj[sop->sem_num] - sem_op; @@ -770,15 +786,14 @@ would_block: static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error, struct wake_q_head *wake_q) { - wake_q_add(wake_q, q->sleeper); - /* - * Rely on the above implicit barrier, such that we can - * ensure that we hold reference to the task before setting - * q->status. Otherwise we could race with do_exit if the - * task is awoken by an external event before calling - * wake_up_process(). - */ - WRITE_ONCE(q->status, error); + struct task_struct *sleeper; + + sleeper = get_task_struct(q->sleeper); + + /* see SEM_BARRIER_2 for purpose/pairing */ + smp_store_release(&q->status, error); + + wake_q_add_safe(wake_q, sleeper); } static void unlink_queue(struct sem_array *sma, struct sem_queue *q) @@ -810,7 +825,7 @@ static inline int check_restart(struct sem_array *sma, struct sem_queue *q) /* It is impossible that someone waits for the new value: * - complex operations always restart. - * - wait-for-zero are handled seperately. + * - wait-for-zero are handled separately. * - q is a previously sleeping simple operation that * altered the array. It must be a decrement, because * simple increments never sleep. @@ -1035,7 +1050,7 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop * - No complex ops, thus all sleeping ops are * decrease. * - if we decreased the value, then any sleeping - * semaphore ops wont be able to run: If the + * semaphore ops won't be able to run: If the * previous value was too small, then the new * value will be too small, too. */ @@ -1141,7 +1156,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) un->semid = -1; list_del_rcu(&un->list_proc); spin_unlock(&un->ulp->lock); - kfree_rcu(un, rcu); + kvfree_rcu(un, rcu); } /* Wake up all pending processes and let them fail with EIDRM. */ @@ -1414,7 +1429,6 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, if (err) goto out_rcu_wakeup; - err = -EACCES; switch (cmd) { case GETALL: { @@ -1634,9 +1648,8 @@ out_up: return err; } -long ksys_semctl(int semid, int semnum, int cmd, unsigned long arg) +static long ksys_semctl(int semid, int semnum, int cmd, unsigned long arg, int version) { - int version; struct ipc_namespace *ns; void __user *p = (void __user *)arg; struct semid64_ds semid64; @@ -1645,7 +1658,6 @@ long ksys_semctl(int semid, int semnum, int cmd, unsigned long arg) if (semid < 0) return -EINVAL; - version = ipc_parse_version(&cmd); ns = current->nsproxy->ipc_ns; switch (cmd) { @@ -1682,6 +1694,7 @@ long ksys_semctl(int semid, int semnum, int cmd, unsigned long arg) case IPC_SET: if (copy_semid_from_user(&semid64, p, version)) return -EFAULT; + fallthrough; case IPC_RMID: return semctl_down(ns, semid, cmd, &semid64); default: @@ -1691,8 +1704,22 @@ long ksys_semctl(int semid, int semnum, int cmd, unsigned long arg) SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg) { - return ksys_semctl(semid, semnum, cmd, arg); + return ksys_semctl(semid, semnum, cmd, arg, IPC_64); +} + +#ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION +long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg) +{ + int version = ipc_parse_version(&cmd); + + return ksys_semctl(semid, semnum, cmd, arg, version); +} + +SYSCALL_DEFINE4(old_semctl, int, semid, int, semnum, int, cmd, unsigned long, arg) +{ + return ksys_old_semctl(semid, semnum, cmd, arg); } +#endif #ifdef CONFIG_COMPAT @@ -1744,12 +1771,11 @@ static int copy_compat_semid_to_user(void __user *buf, struct semid64_ds *in, } } -long compat_ksys_semctl(int semid, int semnum, int cmd, int arg) +static long compat_ksys_semctl(int semid, int semnum, int cmd, int arg, int version) { void __user *p = compat_ptr(arg); struct ipc_namespace *ns; struct semid64_ds semid64; - int version = compat_ipc_parse_version(&cmd); int err; ns = current->nsproxy->ipc_ns; @@ -1782,7 +1808,7 @@ long compat_ksys_semctl(int semid, int semnum, int cmd, int arg) case IPC_SET: if (copy_compat_semid_from_user(&semid64, p, version)) return -EFAULT; - /* fallthru */ + fallthrough; case IPC_RMID: return semctl_down(ns, semid, cmd, &semid64); default: @@ -1792,9 +1818,23 @@ long compat_ksys_semctl(int semid, int semnum, int cmd, int arg) COMPAT_SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, int, arg) { - return compat_ksys_semctl(semid, semnum, cmd, arg); + return compat_ksys_semctl(semid, semnum, cmd, arg, IPC_64); +} + +#ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION +long compat_ksys_old_semctl(int semid, int semnum, int cmd, int arg) +{ + int version = compat_ipc_parse_version(&cmd); + + return compat_ksys_semctl(semid, semnum, cmd, arg, version); +} + +COMPAT_SYSCALL_DEFINE4(old_semctl, int, semid, int, semnum, int, cmd, int, arg) +{ + return compat_ksys_old_semctl(semid, semnum, cmd, arg); } #endif +#endif /* If the task doesn't already have a undo_list, then allocate one * here. We guarantee there is only one thread using this undo list, @@ -1813,7 +1853,7 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp) undo_list = current->sysvsem.undo_list; if (!undo_list) { - undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL); + undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL_ACCOUNT); if (undo_list == NULL) return -ENOMEM; spin_lock_init(&undo_list->lock); @@ -1830,7 +1870,8 @@ static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid) { struct sem_undo *un; - list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) { + list_for_each_entry_rcu(un, &ulp->list_proc, list_proc, + spin_is_locked(&ulp->lock)) { if (un->semid == semid) return un; } @@ -1897,7 +1938,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) rcu_read_unlock(); /* step 2: allocate new undo structure */ - new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); + new = kvzalloc(struct_size(new, semadj, nsems), GFP_KERNEL_ACCOUNT); if (!new) { ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); return ERR_PTR(-ENOMEM); @@ -1909,7 +1950,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) if (!ipc_valid_object(&sma->sem_perm)) { sem_unlock(sma, -1); rcu_read_unlock(); - kfree(new); + kvfree(new); un = ERR_PTR(-EIDRM); goto out; } @@ -1920,11 +1961,11 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) */ un = lookup_undo(ulp, semid); if (un) { - kfree(new); + spin_unlock(&ulp->lock); + kvfree(new); goto success; } /* step 5: initialize & link new undo structure */ - new->semadj = (short *) &new[1]; new->ulp = ulp; new->semid = semid; assert_spin_locked(&ulp->lock); @@ -1932,54 +1973,42 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) ipc_assert_locked_object(&sma->sem_perm); list_add(&new->list_id, &sma->list_id); un = new; - -success: spin_unlock(&ulp->lock); +success: sem_unlock(sma, -1); out: return un; } -static long do_semtimedop(int semid, struct sembuf __user *tsops, - unsigned nsops, const struct timespec64 *timeout) +long __do_semtimedop(int semid, struct sembuf *sops, + unsigned nsops, const struct timespec64 *timeout, + struct ipc_namespace *ns) { int error = -EINVAL; struct sem_array *sma; - struct sembuf fast_sops[SEMOPM_FAST]; - struct sembuf *sops = fast_sops, *sop; + struct sembuf *sop; struct sem_undo *un; int max, locknum; bool undos = false, alter = false, dupsop = false; struct sem_queue queue; - unsigned long dup = 0, jiffies_left = 0; - struct ipc_namespace *ns; - - ns = current->nsproxy->ipc_ns; + unsigned long dup = 0; + ktime_t expires, *exp = NULL; + bool timed_out = false; if (nsops < 1 || semid < 0) return -EINVAL; if (nsops > ns->sc_semopm) return -E2BIG; - if (nsops > SEMOPM_FAST) { - sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL); - if (sops == NULL) - return -ENOMEM; - } - - if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) { - error = -EFAULT; - goto out_free; - } if (timeout) { - if (timeout->tv_sec < 0 || timeout->tv_nsec < 0 || - timeout->tv_nsec >= 1000000000L) { - error = -EINVAL; - goto out_free; - } - jiffies_left = timespec64_to_jiffies(timeout); + if (!timespec64_valid(timeout)) + return -EINVAL; + expires = ktime_add_safe(ktime_get(), + timespec64_to_ktime(*timeout)); + exp = &expires; } + max = 0; for (sop = sops; sop < sops + nsops; sop++) { unsigned long mask = 1ULL << ((sop->sem_num) % BITS_PER_LONG); @@ -2008,7 +2037,7 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops, un = find_alloc_undo(ns, semid); if (IS_ERR(un)) { error = PTR_ERR(un); - goto out_free; + goto out; } } else { un = NULL; @@ -2019,25 +2048,25 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops, if (IS_ERR(sma)) { rcu_read_unlock(); error = PTR_ERR(sma); - goto out_free; + goto out; } error = -EFBIG; if (max >= sma->sem_nsems) { rcu_read_unlock(); - goto out_free; + goto out; } error = -EACCES; if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) { rcu_read_unlock(); - goto out_free; + goto out; } error = security_sem_semop(&sma->sem_perm, sops, nsops, alter); if (error) { rcu_read_unlock(); - goto out_free; + goto out; } error = -EIDRM; @@ -2051,7 +2080,7 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops, * entangled here and why it's RMID race safe on comments at sem_lock() */ if (!ipc_valid_object(&sma->sem_perm)) - goto out_unlock_free; + goto out_unlock; /* * semid identifiers are not unique - find_alloc_undo may have * allocated an undo structure, it was invalidated by an RMID @@ -2060,7 +2089,7 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops, * "un" itself is guaranteed by rcu. */ if (un && un->semid == -1) - goto out_unlock_free; + goto out_unlock; queue.sops = sops; queue.nsops = nsops; @@ -2070,7 +2099,7 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops, queue.dupsop = dupsop; error = perform_atomic_semop(sma, &queue); - if (error == 0) { /* non-blocking succesfull path */ + if (error == 0) { /* non-blocking successful path */ DEFINE_WAKE_Q(wake_q); /* @@ -2086,10 +2115,10 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops, rcu_read_unlock(); wake_up_q(&wake_q); - goto out_free; + goto out; } if (error < 0) /* non-blocking error path */ - goto out_unlock_free; + goto out_unlock; /* * We need to sleep on this operation, so we put the current @@ -2125,17 +2154,17 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops, } do { + /* memory ordering ensured by the lock in sem_lock() */ WRITE_ONCE(queue.status, -EINTR); queue.sleeper = current; + /* memory ordering is ensured by the lock in sem_lock() */ __set_current_state(TASK_INTERRUPTIBLE); sem_unlock(sma, locknum); rcu_read_unlock(); - if (timeout) - jiffies_left = schedule_timeout(jiffies_left); - else - schedule(); + timed_out = !schedule_hrtimeout_range(exp, + current->timer_slack_ns, HRTIMER_MODE_ABS); /* * fastpath: the semop has completed, either successfully or @@ -2148,24 +2177,23 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops, * scenarios where we were awakened externally, during the * window between wake_q_add() and wake_up_q(). */ + rcu_read_lock(); error = READ_ONCE(queue.status); if (error != -EINTR) { - /* - * User space could assume that semop() is a memory - * barrier: Without the mb(), the cpu could - * speculatively read in userspace stale data that was - * overwritten by the previous owner of the semaphore. - */ - smp_mb(); - goto out_free; + /* see SEM_BARRIER_2 for purpose/pairing */ + smp_acquire__after_ctrl_dep(); + rcu_read_unlock(); + goto out; } - rcu_read_lock(); locknum = sem_lock(sma, sops, nsops); if (!ipc_valid_object(&sma->sem_perm)) - goto out_unlock_free; + goto out_unlock; + /* + * No necessity for any barrier: We are protect by sem_lock() + */ error = READ_ONCE(queue.status); /* @@ -2173,24 +2201,56 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops, * Leave without unlink_queue(), but with sem_unlock(). */ if (error != -EINTR) - goto out_unlock_free; + goto out_unlock; /* * If an interrupt occurred we have to clean up the queue. */ - if (timeout && jiffies_left == 0) + if (timed_out) error = -EAGAIN; } while (error == -EINTR && !signal_pending(current)); /* spurious */ unlink_queue(sma, &queue); -out_unlock_free: +out_unlock: sem_unlock(sma, locknum); rcu_read_unlock(); +out: + return error; +} + +static long do_semtimedop(int semid, struct sembuf __user *tsops, + unsigned nsops, const struct timespec64 *timeout) +{ + struct sembuf fast_sops[SEMOPM_FAST]; + struct sembuf *sops = fast_sops; + struct ipc_namespace *ns; + int ret; + + ns = current->nsproxy->ipc_ns; + if (nsops > ns->sc_semopm) + return -E2BIG; + if (nsops < 1) + return -EINVAL; + + if (nsops > SEMOPM_FAST) { + sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL); + if (sops == NULL) + return -ENOMEM; + } + + if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) { + ret = -EFAULT; + goto out_free; + } + + ret = __do_semtimedop(semid, sops, nsops, timeout, ns); + out_free: if (sops != fast_sops) kvfree(sops); - return error; + + return ret; } long ksys_semtimedop(int semid, struct sembuf __user *tsops, @@ -2225,7 +2285,7 @@ long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems, return do_semtimedop(semid, tsems, nsops, NULL); } -COMPAT_SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsems, +SYSCALL_DEFINE4(semtimedop_time32, int, semid, struct sembuf __user *, tsems, unsigned int, nsops, const struct old_timespec32 __user *, timeout) { @@ -2243,7 +2303,7 @@ SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops, * parent and child tasks. */ -int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) +int copy_semundo(u64 clone_flags, struct task_struct *tsk) { struct sem_undo_list *undo_list; int error; @@ -2345,11 +2405,9 @@ void exit_sem(struct task_struct *tsk) ipc_assert_locked_object(&sma->sem_perm); list_del(&un->list_id); - /* we are the last process using this ulp, acquiring ulp->lock - * isn't required. Besides that, we are also protected against - * IPC_RMID as we hold sma->sem_perm lock now - */ + spin_lock(&ulp->lock); list_del_rcu(&un->list_proc); + spin_unlock(&ulp->lock); /* perform adjustments registered in un */ for (i = 0; i < sma->sem_nsems; i++) { @@ -2382,7 +2440,7 @@ void exit_sem(struct task_struct *tsk) rcu_read_unlock(); wake_up_q(&wake_q); - kfree_rcu(un, rcu); + kvfree_rcu(un, rcu); } kfree(ulp); } @@ -2397,7 +2455,8 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it) /* * The proc interface isn't aware of sem_lock(), it calls - * ipc_lock_object() directly (in sysvipc_find_ipc). + * ipc_lock_object(), i.e. spin_lock(&sma->sem_perm.lock). + * (in sysvipc_find_ipc) * In order to stay compatible with sem_lock(), we must * enter / leave complex_mode. */ |
