diff options
Diffstat (limited to 'ipc')
| -rw-r--r-- | ipc/Makefile | 6 | ||||
| -rw-r--r-- | ipc/compat.c | 751 | ||||
| -rw-r--r-- | ipc/compat_mq.c | 151 | ||||
| -rw-r--r-- | ipc/ipc_sysctl.c | 396 | ||||
| -rw-r--r-- | ipc/ipcns_notifier.c | 92 | ||||
| -rw-r--r-- | ipc/mq_sysctl.c | 161 | ||||
| -rw-r--r-- | ipc/mqueue.c | 1234 | ||||
| -rw-r--r-- | ipc/msg.c | 1044 | ||||
| -rw-r--r-- | ipc/msgutil.c | 54 | ||||
| -rw-r--r-- | ipc/namespace.c | 187 | ||||
| -rw-r--r-- | ipc/sem.c | 1869 | ||||
| -rw-r--r-- | ipc/shm.c | 1464 | ||||
| -rw-r--r-- | ipc/syscall.c | 148 | ||||
| -rw-r--r-- | ipc/util.c | 862 | ||||
| -rw-r--r-- | ipc/util.h | 207 |
15 files changed, 4794 insertions, 3832 deletions
diff --git a/ipc/Makefile b/ipc/Makefile index 9075e172e52c..c2558c430f51 100644 --- a/ipc/Makefile +++ b/ipc/Makefile @@ -1,12 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0 # # Makefile for the linux ipc. # obj-$(CONFIG_SYSVIPC_COMPAT) += compat.o -obj-$(CONFIG_SYSVIPC) += util.o msgutil.o msg.o sem.o shm.o ipcns_notifier.o syscall.o +obj-$(CONFIG_SYSVIPC) += util.o msgutil.o msg.o sem.o shm.o syscall.o obj-$(CONFIG_SYSVIPC_SYSCTL) += ipc_sysctl.o -obj_mq-$(CONFIG_COMPAT) += compat_mq.o -obj-$(CONFIG_POSIX_MQUEUE) += mqueue.o msgutil.o $(obj_mq-y) +obj-$(CONFIG_POSIX_MQUEUE) += mqueue.o msgutil.o obj-$(CONFIG_IPC_NS) += namespace.o obj-$(CONFIG_POSIX_MQUEUE_SYSCTL) += mq_sysctl.o diff --git a/ipc/compat.c b/ipc/compat.c index 892f6585dd60..5ab8225923af 100644 --- a/ipc/compat.c +++ b/ipc/compat.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * 32 bit compatibility code for System V IPC * @@ -30,736 +31,52 @@ #include <linux/ptrace.h> #include <linux/mutex.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include "util.h" -struct compat_msgbuf { - compat_long_t mtype; - char mtext[1]; -}; - -struct compat_ipc_perm { - key_t key; - __compat_uid_t uid; - __compat_gid_t gid; - __compat_uid_t cuid; - __compat_gid_t cgid; - compat_mode_t mode; - unsigned short seq; -}; - -struct compat_semid_ds { - struct compat_ipc_perm sem_perm; - compat_time_t sem_otime; - compat_time_t sem_ctime; - compat_uptr_t sem_base; - compat_uptr_t sem_pending; - compat_uptr_t sem_pending_last; - compat_uptr_t undo; - unsigned short sem_nsems; -}; - -struct compat_msqid_ds { - struct compat_ipc_perm msg_perm; - compat_uptr_t msg_first; - compat_uptr_t msg_last; - compat_time_t msg_stime; - compat_time_t msg_rtime; - compat_time_t msg_ctime; - compat_ulong_t msg_lcbytes; - compat_ulong_t msg_lqbytes; - unsigned short msg_cbytes; - unsigned short msg_qnum; - unsigned short msg_qbytes; - compat_ipc_pid_t msg_lspid; - compat_ipc_pid_t msg_lrpid; -}; - -struct compat_shmid_ds { - struct compat_ipc_perm shm_perm; - int shm_segsz; - compat_time_t shm_atime; - compat_time_t shm_dtime; - compat_time_t shm_ctime; - compat_ipc_pid_t shm_cpid; - compat_ipc_pid_t shm_lpid; - unsigned short shm_nattch; - unsigned short shm_unused; - compat_uptr_t shm_unused2; - compat_uptr_t shm_unused3; -}; - -struct compat_ipc_kludge { - compat_uptr_t msgp; - compat_long_t msgtyp; -}; - -struct compat_shminfo64 { - compat_ulong_t shmmax; - compat_ulong_t shmmin; - compat_ulong_t shmmni; - compat_ulong_t shmseg; - compat_ulong_t shmall; - compat_ulong_t __unused1; - compat_ulong_t __unused2; - compat_ulong_t __unused3; - compat_ulong_t __unused4; -}; - -struct compat_shm_info { - compat_int_t used_ids; - compat_ulong_t shm_tot, shm_rss, shm_swp; - compat_ulong_t swap_attempts, swap_successes; -}; - -extern int sem_ctls[]; -#define sc_semopm (sem_ctls[2]) - -static inline int compat_ipc_parse_version(int *cmd) -{ -#ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION - int version = *cmd & IPC_64; - - /* this is tricky: architectures that have support for the old - * ipc structures in 64 bit binaries need to have IPC_64 set - * in cmd, the others need to have it cleared */ -#ifndef ipc_parse_version - *cmd |= IPC_64; -#else - *cmd &= ~IPC_64; -#endif - return version; -#else - /* With the asm-generic APIs, we always use the 64-bit versions. */ - return IPC_64; -#endif -} - -static inline int __get_compat_ipc64_perm(struct ipc64_perm *p64, - struct compat_ipc64_perm __user *up64) -{ - int err; - - err = __get_user(p64->uid, &up64->uid); - err |= __get_user(p64->gid, &up64->gid); - err |= __get_user(p64->mode, &up64->mode); - return err; -} - -static inline int __get_compat_ipc_perm(struct ipc64_perm *p, - struct compat_ipc_perm __user *up) -{ - int err; - - err = __get_user(p->uid, &up->uid); - err |= __get_user(p->gid, &up->gid); - err |= __get_user(p->mode, &up->mode); - return err; -} - -static inline int __put_compat_ipc64_perm(struct ipc64_perm *p64, - struct compat_ipc64_perm __user *up64) -{ - int err; - - err = __put_user(p64->key, &up64->key); - err |= __put_user(p64->uid, &up64->uid); - err |= __put_user(p64->gid, &up64->gid); - err |= __put_user(p64->cuid, &up64->cuid); - err |= __put_user(p64->cgid, &up64->cgid); - err |= __put_user(p64->mode, &up64->mode); - err |= __put_user(p64->seq, &up64->seq); - return err; -} - -static inline int __put_compat_ipc_perm(struct ipc64_perm *p, - struct compat_ipc_perm __user *up) -{ - int err; - __compat_uid_t u; - __compat_gid_t g; - - err = __put_user(p->key, &up->key); - SET_UID(u, p->uid); - err |= __put_user(u, &up->uid); - SET_GID(g, p->gid); - err |= __put_user(g, &up->gid); - SET_UID(u, p->cuid); - err |= __put_user(u, &up->cuid); - SET_GID(g, p->cgid); - err |= __put_user(g, &up->cgid); - err |= __put_user(p->mode, &up->mode); - err |= __put_user(p->seq, &up->seq); - return err; -} - -static inline int get_compat_semid64_ds(struct semid64_ds *s64, - struct compat_semid64_ds __user *up64) -{ - if (!access_ok (VERIFY_READ, up64, sizeof(*up64))) - return -EFAULT; - return __get_compat_ipc64_perm(&s64->sem_perm, &up64->sem_perm); -} - -static inline int get_compat_semid_ds(struct semid64_ds *s, - struct compat_semid_ds __user *up) -{ - if (!access_ok (VERIFY_READ, up, sizeof(*up))) - return -EFAULT; - return __get_compat_ipc_perm(&s->sem_perm, &up->sem_perm); -} - -static inline int put_compat_semid64_ds(struct semid64_ds *s64, - struct compat_semid64_ds __user *up64) -{ - int err; - - if (!access_ok (VERIFY_WRITE, up64, sizeof(*up64))) - return -EFAULT; - err = __put_compat_ipc64_perm(&s64->sem_perm, &up64->sem_perm); - err |= __put_user(s64->sem_otime, &up64->sem_otime); - err |= __put_user(s64->sem_ctime, &up64->sem_ctime); - err |= __put_user(s64->sem_nsems, &up64->sem_nsems); - return err; -} - -static inline int put_compat_semid_ds(struct semid64_ds *s, - struct compat_semid_ds __user *up) -{ - int err; - - if (!access_ok (VERIFY_WRITE, up, sizeof(*up))) - return -EFAULT; - err = __put_compat_ipc_perm(&s->sem_perm, &up->sem_perm); - err |= __put_user(s->sem_otime, &up->sem_otime); - err |= __put_user(s->sem_ctime, &up->sem_ctime); - err |= __put_user(s->sem_nsems, &up->sem_nsems); - return err; -} - -static long do_compat_semctl(int first, int second, int third, u32 pad) -{ - unsigned long fourth; - int err, err2; - struct semid64_ds s64; - struct semid64_ds __user *up64; - int version = compat_ipc_parse_version(&third); - - memset(&s64, 0, sizeof(s64)); - - if ((third & (~IPC_64)) == SETVAL) -#ifdef __BIG_ENDIAN - fourth = (unsigned long)pad << 32; -#else - fourth = pad; -#endif - else - fourth = (unsigned long)compat_ptr(pad); - switch (third & (~IPC_64)) { - case IPC_INFO: - case IPC_RMID: - case SEM_INFO: - case GETVAL: - case GETPID: - case GETNCNT: - case GETZCNT: - case GETALL: - case SETVAL: - case SETALL: - err = sys_semctl(first, second, third, fourth); - break; - - case IPC_STAT: - case SEM_STAT: - up64 = compat_alloc_user_space(sizeof(s64)); - fourth = (unsigned long)up64; - err = sys_semctl(first, second, third, fourth); - if (err < 0) - break; - if (copy_from_user(&s64, up64, sizeof(s64))) - err2 = -EFAULT; - else if (version == IPC_64) - err2 = put_compat_semid64_ds(&s64, compat_ptr(pad)); - else - err2 = put_compat_semid_ds(&s64, compat_ptr(pad)); - if (err2) - err = -EFAULT; - break; - - case IPC_SET: - if (version == IPC_64) { - err = get_compat_semid64_ds(&s64, compat_ptr(pad)); - } else { - err = get_compat_semid_ds(&s64, compat_ptr(pad)); - } - up64 = compat_alloc_user_space(sizeof(s64)); - if (copy_to_user(up64, &s64, sizeof(s64))) - err = -EFAULT; - if (err) - break; - - fourth = (unsigned long)up64; - err = sys_semctl(first, second, third, fourth); - break; - - default: - err = -EINVAL; - break; - } - return err; -} - -static long compat_do_msg_fill(void __user *dest, struct msg_msg *msg, size_t bufsz) -{ - struct compat_msgbuf __user *msgp = dest; - size_t msgsz; - - if (put_user(msg->m_type, &msgp->mtype)) - return -EFAULT; - - msgsz = (bufsz > msg->m_ts) ? msg->m_ts : bufsz; - if (store_msg(msgp->mtext, msg, msgsz)) - return -EFAULT; - return msgsz; -} - -#ifndef COMPAT_SHMLBA -#define COMPAT_SHMLBA SHMLBA -#endif - -#ifdef CONFIG_ARCH_WANT_OLD_COMPAT_IPC -COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second, - u32, third, compat_uptr_t, ptr, u32, fifth) -{ - int version; - u32 pad; - - version = call >> 16; /* hack for backward compatibility */ - call &= 0xffff; - - switch (call) { - case SEMOP: - /* struct sembuf is the same on 32 and 64bit :)) */ - return sys_semtimedop(first, compat_ptr(ptr), second, NULL); - case SEMTIMEDOP: - return compat_sys_semtimedop(first, compat_ptr(ptr), second, - compat_ptr(fifth)); - case SEMGET: - return sys_semget(first, second, third); - case SEMCTL: - if (!ptr) - return -EINVAL; - if (get_user(pad, (u32 __user *) compat_ptr(ptr))) - return -EFAULT; - return do_compat_semctl(first, second, third, pad); - - case MSGSND: { - struct compat_msgbuf __user *up = compat_ptr(ptr); - compat_long_t type; - - if (first < 0 || second < 0) - return -EINVAL; - - if (get_user(type, &up->mtype)) - return -EFAULT; - - return do_msgsnd(first, type, up->mtext, second, third); - } - case MSGRCV: { - void __user *uptr = compat_ptr(ptr); - - if (first < 0 || second < 0) - return -EINVAL; - - if (!version) { - struct compat_ipc_kludge ipck; - if (!uptr) - return -EINVAL; - if (copy_from_user (&ipck, uptr, sizeof(ipck))) - return -EFAULT; - uptr = compat_ptr(ipck.msgp); - fifth = ipck.msgtyp; - } - return do_msgrcv(first, uptr, second, fifth, third, - compat_do_msg_fill); - } - case MSGGET: - return sys_msgget(first, second); - case MSGCTL: - return compat_sys_msgctl(first, second, compat_ptr(ptr)); - - case SHMAT: { - int err; - unsigned long raddr; - - if (version == 1) - return -EINVAL; - err = do_shmat(first, compat_ptr(ptr), second, &raddr, - COMPAT_SHMLBA); - if (err < 0) - return err; - return put_user(raddr, (compat_ulong_t *)compat_ptr(third)); - } - case SHMDT: - return sys_shmdt(compat_ptr(ptr)); - case SHMGET: - return sys_shmget(first, (unsigned)second, third); - case SHMCTL: - return compat_sys_shmctl(first, second, compat_ptr(ptr)); - } - - return -ENOSYS; -} -#endif - -COMPAT_SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, int, arg) -{ - return do_compat_semctl(semid, semnum, cmd, arg); -} - -COMPAT_SYSCALL_DEFINE4(msgsnd, int, msqid, compat_uptr_t, msgp, - compat_ssize_t, msgsz, int, msgflg) -{ - struct compat_msgbuf __user *up = compat_ptr(msgp); - compat_long_t mtype; - - if (get_user(mtype, &up->mtype)) - return -EFAULT; - return do_msgsnd(msqid, mtype, up->mtext, (ssize_t)msgsz, msgflg); -} - -COMPAT_SYSCALL_DEFINE5(msgrcv, int, msqid, compat_uptr_t, msgp, - compat_ssize_t, msgsz, long, msgtyp, int, msgflg) -{ - return do_msgrcv(msqid, compat_ptr(msgp), (ssize_t)msgsz, msgtyp, - msgflg, compat_do_msg_fill); -} - -static inline int get_compat_msqid64(struct msqid64_ds *m64, - struct compat_msqid64_ds __user *up64) -{ - int err; - - if (!access_ok(VERIFY_READ, up64, sizeof(*up64))) - return -EFAULT; - err = __get_compat_ipc64_perm(&m64->msg_perm, &up64->msg_perm); - err |= __get_user(m64->msg_qbytes, &up64->msg_qbytes); - return err; -} - -static inline int get_compat_msqid(struct msqid64_ds *m, - struct compat_msqid_ds __user *up) +int get_compat_ipc64_perm(struct ipc64_perm *to, + struct compat_ipc64_perm __user *from) { - int err; - - if (!access_ok(VERIFY_READ, up, sizeof(*up))) + struct compat_ipc64_perm v; + if (copy_from_user(&v, from, sizeof(v))) return -EFAULT; - err = __get_compat_ipc_perm(&m->msg_perm, &up->msg_perm); - err |= __get_user(m->msg_qbytes, &up->msg_qbytes); - return err; + to->uid = v.uid; + to->gid = v.gid; + to->mode = v.mode; + return 0; } -static inline int put_compat_msqid64_ds(struct msqid64_ds *m64, - struct compat_msqid64_ds __user *up64) +int get_compat_ipc_perm(struct ipc64_perm *to, + struct compat_ipc_perm __user *from) { - int err; - - if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64))) + struct compat_ipc_perm v; + if (copy_from_user(&v, from, sizeof(v))) return -EFAULT; - err = __put_compat_ipc64_perm(&m64->msg_perm, &up64->msg_perm); - err |= __put_user(m64->msg_stime, &up64->msg_stime); - err |= __put_user(m64->msg_rtime, &up64->msg_rtime); - err |= __put_user(m64->msg_ctime, &up64->msg_ctime); - err |= __put_user(m64->msg_cbytes, &up64->msg_cbytes); - err |= __put_user(m64->msg_qnum, &up64->msg_qnum); - err |= __put_user(m64->msg_qbytes, &up64->msg_qbytes); - err |= __put_user(m64->msg_lspid, &up64->msg_lspid); - err |= __put_user(m64->msg_lrpid, &up64->msg_lrpid); - return err; + to->uid = v.uid; + to->gid = v.gid; + to->mode = v.mode; + return 0; } -static inline int put_compat_msqid_ds(struct msqid64_ds *m, - struct compat_msqid_ds __user *up) +void to_compat_ipc64_perm(struct compat_ipc64_perm *to, struct ipc64_perm *from) { - int err; - - if (!access_ok(VERIFY_WRITE, up, sizeof(*up))) - return -EFAULT; - err = __put_compat_ipc_perm(&m->msg_perm, &up->msg_perm); - err |= __put_user(m->msg_stime, &up->msg_stime); - err |= __put_user(m->msg_rtime, &up->msg_rtime); - err |= __put_user(m->msg_ctime, &up->msg_ctime); - err |= __put_user(m->msg_cbytes, &up->msg_cbytes); - err |= __put_user(m->msg_qnum, &up->msg_qnum); - err |= __put_user(m->msg_qbytes, &up->msg_qbytes); - err |= __put_user(m->msg_lspid, &up->msg_lspid); - err |= __put_user(m->msg_lrpid, &up->msg_lrpid); - return err; -} - -long compat_sys_msgctl(int first, int second, void __user *uptr) -{ - int err, err2; - struct msqid64_ds m64; - int version = compat_ipc_parse_version(&second); - void __user *p; - - memset(&m64, 0, sizeof(m64)); - - switch (second & (~IPC_64)) { - case IPC_INFO: - case IPC_RMID: - case MSG_INFO: - err = sys_msgctl(first, second, uptr); - break; - - case IPC_SET: - if (version == IPC_64) { - err = get_compat_msqid64(&m64, uptr); - } else { - err = get_compat_msqid(&m64, uptr); - } - if (err) - break; - p = compat_alloc_user_space(sizeof(m64)); - if (copy_to_user(p, &m64, sizeof(m64))) - err = -EFAULT; - else - err = sys_msgctl(first, second, p); - break; - - case IPC_STAT: - case MSG_STAT: - p = compat_alloc_user_space(sizeof(m64)); - err = sys_msgctl(first, second, p); - if (err < 0) - break; - if (copy_from_user(&m64, p, sizeof(m64))) - err2 = -EFAULT; - else if (version == IPC_64) - err2 = put_compat_msqid64_ds(&m64, uptr); - else - err2 = put_compat_msqid_ds(&m64, uptr); - if (err2) - err = -EFAULT; - break; - - default: - err = -EINVAL; - break; - } - return err; -} - -COMPAT_SYSCALL_DEFINE3(shmat, int, shmid, compat_uptr_t, shmaddr, int, shmflg) -{ - unsigned long ret; - long err; - - err = do_shmat(shmid, compat_ptr(shmaddr), shmflg, &ret, COMPAT_SHMLBA); - if (err) - return err; - force_successful_syscall_return(); - return (long)ret; -} - -static inline int get_compat_shmid64_ds(struct shmid64_ds *s64, - struct compat_shmid64_ds __user *up64) -{ - if (!access_ok(VERIFY_READ, up64, sizeof(*up64))) - return -EFAULT; - return __get_compat_ipc64_perm(&s64->shm_perm, &up64->shm_perm); -} - -static inline int get_compat_shmid_ds(struct shmid64_ds *s, - struct compat_shmid_ds __user *up) -{ - if (!access_ok(VERIFY_READ, up, sizeof(*up))) - return -EFAULT; - return __get_compat_ipc_perm(&s->shm_perm, &up->shm_perm); -} - -static inline int put_compat_shmid64_ds(struct shmid64_ds *s64, - struct compat_shmid64_ds __user *up64) -{ - int err; - - if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64))) - return -EFAULT; - err = __put_compat_ipc64_perm(&s64->shm_perm, &up64->shm_perm); - err |= __put_user(s64->shm_atime, &up64->shm_atime); - err |= __put_user(s64->shm_dtime, &up64->shm_dtime); - err |= __put_user(s64->shm_ctime, &up64->shm_ctime); - err |= __put_user(s64->shm_segsz, &up64->shm_segsz); - err |= __put_user(s64->shm_nattch, &up64->shm_nattch); - err |= __put_user(s64->shm_cpid, &up64->shm_cpid); - err |= __put_user(s64->shm_lpid, &up64->shm_lpid); - return err; -} - -static inline int put_compat_shmid_ds(struct shmid64_ds *s, - struct compat_shmid_ds __user *up) -{ - int err; - - if (!access_ok(VERIFY_WRITE, up, sizeof(*up))) - return -EFAULT; - err = __put_compat_ipc_perm(&s->shm_perm, &up->shm_perm); - err |= __put_user(s->shm_atime, &up->shm_atime); - err |= __put_user(s->shm_dtime, &up->shm_dtime); - err |= __put_user(s->shm_ctime, &up->shm_ctime); - err |= __put_user(s->shm_segsz, &up->shm_segsz); - err |= __put_user(s->shm_nattch, &up->shm_nattch); - err |= __put_user(s->shm_cpid, &up->shm_cpid); - err |= __put_user(s->shm_lpid, &up->shm_lpid); - return err; -} - -static inline int put_compat_shminfo64(struct shminfo64 *smi, - struct compat_shminfo64 __user *up64) -{ - int err; - - if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64))) - return -EFAULT; - if (smi->shmmax > INT_MAX) - smi->shmmax = INT_MAX; - err = __put_user(smi->shmmax, &up64->shmmax); - err |= __put_user(smi->shmmin, &up64->shmmin); - err |= __put_user(smi->shmmni, &up64->shmmni); - err |= __put_user(smi->shmseg, &up64->shmseg); - err |= __put_user(smi->shmall, &up64->shmall); - return err; -} - -static inline int put_compat_shminfo(struct shminfo64 *smi, - struct shminfo __user *up) -{ - int err; - - if (!access_ok(VERIFY_WRITE, up, sizeof(*up))) - return -EFAULT; - if (smi->shmmax > INT_MAX) - smi->shmmax = INT_MAX; - err = __put_user(smi->shmmax, &up->shmmax); - err |= __put_user(smi->shmmin, &up->shmmin); - err |= __put_user(smi->shmmni, &up->shmmni); - err |= __put_user(smi->shmseg, &up->shmseg); - err |= __put_user(smi->shmall, &up->shmall); - return err; -} - -static inline int put_compat_shm_info(struct shm_info __user *ip, - struct compat_shm_info __user *uip) -{ - int err; - struct shm_info si; - - if (!access_ok(VERIFY_WRITE, uip, sizeof(*uip)) || - copy_from_user(&si, ip, sizeof(si))) - return -EFAULT; - err = __put_user(si.used_ids, &uip->used_ids); - err |= __put_user(si.shm_tot, &uip->shm_tot); - err |= __put_user(si.shm_rss, &uip->shm_rss); - err |= __put_user(si.shm_swp, &uip->shm_swp); - err |= __put_user(si.swap_attempts, &uip->swap_attempts); - err |= __put_user(si.swap_successes, &uip->swap_successes); - return err; -} - -long compat_sys_shmctl(int first, int second, void __user *uptr) -{ - void __user *p; - struct shmid64_ds s64; - struct shminfo64 smi; - int err, err2; - int version = compat_ipc_parse_version(&second); - - memset(&s64, 0, sizeof(s64)); - - switch (second & (~IPC_64)) { - case IPC_RMID: - case SHM_LOCK: - case SHM_UNLOCK: - err = sys_shmctl(first, second, uptr); - break; - - case IPC_INFO: - p = compat_alloc_user_space(sizeof(smi)); - err = sys_shmctl(first, second, p); - if (err < 0) - break; - if (copy_from_user(&smi, p, sizeof(smi))) - err2 = -EFAULT; - else if (version == IPC_64) - err2 = put_compat_shminfo64(&smi, uptr); - else - err2 = put_compat_shminfo(&smi, uptr); - if (err2) - err = -EFAULT; - break; - - - case IPC_SET: - if (version == IPC_64) { - err = get_compat_shmid64_ds(&s64, uptr); - } else { - err = get_compat_shmid_ds(&s64, uptr); - } - if (err) - break; - p = compat_alloc_user_space(sizeof(s64)); - if (copy_to_user(p, &s64, sizeof(s64))) - err = -EFAULT; - else - err = sys_shmctl(first, second, p); - break; - - case IPC_STAT: - case SHM_STAT: - p = compat_alloc_user_space(sizeof(s64)); - err = sys_shmctl(first, second, p); - if (err < 0) - break; - if (copy_from_user(&s64, p, sizeof(s64))) - err2 = -EFAULT; - else if (version == IPC_64) - err2 = put_compat_shmid64_ds(&s64, uptr); - else - err2 = put_compat_shmid_ds(&s64, uptr); - if (err2) - err = -EFAULT; - break; - - case SHM_INFO: - p = compat_alloc_user_space(sizeof(struct shm_info)); - err = sys_shmctl(first, second, p); - if (err < 0) - break; - err2 = put_compat_shm_info(p, uptr); - if (err2) - err = -EFAULT; - break; - - default: - err = -EINVAL; - break; - } - return err; + to->key = from->key; + to->uid = from->uid; + to->gid = from->gid; + to->cuid = from->cuid; + to->cgid = from->cgid; + to->mode = from->mode; + to->seq = from->seq; } -long compat_sys_semtimedop(int semid, struct sembuf __user *tsems, - unsigned nsops, const struct compat_timespec __user *timeout) +void to_compat_ipc_perm(struct compat_ipc_perm *to, struct ipc64_perm *from) { - struct timespec __user *ts64 = NULL; - if (timeout) { - struct timespec ts; - ts64 = compat_alloc_user_space(sizeof(*ts64)); - if (get_compat_timespec(&ts, timeout)) - return -EFAULT; - if (copy_to_user(ts64, &ts, sizeof(ts))) - return -EFAULT; - } - return sys_semtimedop(semid, tsems, nsops, ts64); + to->key = from->key; + SET_UID(to->uid, from->uid); + SET_GID(to->gid, from->gid); + SET_UID(to->cuid, from->cuid); + SET_GID(to->cgid, from->cgid); + to->mode = from->mode; + to->seq = from->seq; } diff --git a/ipc/compat_mq.c b/ipc/compat_mq.c deleted file mode 100644 index 380ea4fe08e7..000000000000 --- a/ipc/compat_mq.c +++ /dev/null @@ -1,151 +0,0 @@ -/* - * ipc/compat_mq.c - * 32 bit emulation for POSIX message queue system calls - * - * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author: Arnd Bergmann <arnd@arndb.de> - */ - -#include <linux/compat.h> -#include <linux/fs.h> -#include <linux/kernel.h> -#include <linux/mqueue.h> -#include <linux/syscalls.h> - -#include <asm/uaccess.h> - -struct compat_mq_attr { - compat_long_t mq_flags; /* message queue flags */ - compat_long_t mq_maxmsg; /* maximum number of messages */ - compat_long_t mq_msgsize; /* maximum message size */ - compat_long_t mq_curmsgs; /* number of messages currently queued */ - compat_long_t __reserved[4]; /* ignored for input, zeroed for output */ -}; - -static inline int get_compat_mq_attr(struct mq_attr *attr, - const struct compat_mq_attr __user *uattr) -{ - if (!access_ok(VERIFY_READ, uattr, sizeof *uattr)) - return -EFAULT; - - return __get_user(attr->mq_flags, &uattr->mq_flags) - | __get_user(attr->mq_maxmsg, &uattr->mq_maxmsg) - | __get_user(attr->mq_msgsize, &uattr->mq_msgsize) - | __get_user(attr->mq_curmsgs, &uattr->mq_curmsgs); -} - -static inline int put_compat_mq_attr(const struct mq_attr *attr, - struct compat_mq_attr __user *uattr) -{ - if (clear_user(uattr, sizeof *uattr)) - return -EFAULT; - - return __put_user(attr->mq_flags, &uattr->mq_flags) - | __put_user(attr->mq_maxmsg, &uattr->mq_maxmsg) - | __put_user(attr->mq_msgsize, &uattr->mq_msgsize) - | __put_user(attr->mq_curmsgs, &uattr->mq_curmsgs); -} - -asmlinkage long compat_sys_mq_open(const char __user *u_name, - int oflag, compat_mode_t mode, - struct compat_mq_attr __user *u_attr) -{ - void __user *p = NULL; - if (u_attr && oflag & O_CREAT) { - struct mq_attr attr; - - memset(&attr, 0, sizeof(attr)); - - p = compat_alloc_user_space(sizeof(attr)); - if (get_compat_mq_attr(&attr, u_attr) || - copy_to_user(p, &attr, sizeof(attr))) - return -EFAULT; - } - return sys_mq_open(u_name, oflag, mode, p); -} - -static int compat_prepare_timeout(struct timespec __user * *p, - const struct compat_timespec __user *u) -{ - struct timespec ts; - if (!u) { - *p = NULL; - return 0; - } - *p = compat_alloc_user_space(sizeof(ts)); - if (get_compat_timespec(&ts, u) || copy_to_user(*p, &ts, sizeof(ts))) - return -EFAULT; - return 0; -} - -asmlinkage long compat_sys_mq_timedsend(mqd_t mqdes, - const char __user *u_msg_ptr, - size_t msg_len, unsigned int msg_prio, - const struct compat_timespec __user *u_abs_timeout) -{ - struct timespec __user *u_ts; - - if (compat_prepare_timeout(&u_ts, u_abs_timeout)) - return -EFAULT; - - return sys_mq_timedsend(mqdes, u_msg_ptr, msg_len, - msg_prio, u_ts); -} - -asmlinkage ssize_t compat_sys_mq_timedreceive(mqd_t mqdes, - char __user *u_msg_ptr, - size_t msg_len, unsigned int __user *u_msg_prio, - const struct compat_timespec __user *u_abs_timeout) -{ - struct timespec __user *u_ts; - if (compat_prepare_timeout(&u_ts, u_abs_timeout)) - return -EFAULT; - - return sys_mq_timedreceive(mqdes, u_msg_ptr, msg_len, - u_msg_prio, u_ts); -} - -asmlinkage long compat_sys_mq_notify(mqd_t mqdes, - const struct compat_sigevent __user *u_notification) -{ - struct sigevent __user *p = NULL; - if (u_notification) { - struct sigevent n; - p = compat_alloc_user_space(sizeof(*p)); - if (get_compat_sigevent(&n, u_notification)) - return -EFAULT; - if (n.sigev_notify == SIGEV_THREAD) - n.sigev_value.sival_ptr = compat_ptr(n.sigev_value.sival_int); - if (copy_to_user(p, &n, sizeof(*p))) - return -EFAULT; - } - return sys_mq_notify(mqdes, p); -} - -asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes, - const struct compat_mq_attr __user *u_mqstat, - struct compat_mq_attr __user *u_omqstat) -{ - struct mq_attr mqstat; - struct mq_attr __user *p = compat_alloc_user_space(2 * sizeof(*p)); - long ret; - - memset(&mqstat, 0, sizeof(mqstat)); - - if (u_mqstat) { - if (get_compat_mq_attr(&mqstat, u_mqstat) || - copy_to_user(p, &mqstat, sizeof(mqstat))) - return -EFAULT; - } - ret = sys_mq_getsetattr(mqdes, - u_mqstat ? p : NULL, - u_omqstat ? p + 1 : NULL); - if (ret) - return ret; - if (u_omqstat) { - if (copy_from_user(&mqstat, p + 1, sizeof(mqstat)) || - put_compat_mq_attr(&mqstat, u_omqstat)) - return -EFAULT; - } - return 0; -} diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c index 130dfece27ac..15b17e86e198 100644 --- a/ipc/ipc_sysctl.c +++ b/ipc/ipc_sysctl.c @@ -1,12 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2007 * * Author: Eric Biederman <ebiederm@xmision.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation, version 2 of the - * License. */ #include <linux/module.h> @@ -14,46 +10,21 @@ #include <linux/nsproxy.h> #include <linux/sysctl.h> #include <linux/uaccess.h> +#include <linux/capability.h> #include <linux/ipc_namespace.h> #include <linux/msg.h> +#include <linux/slab.h> +#include <linux/cred.h> #include "util.h" -static void *get_ipc(ctl_table *table) -{ - char *which = table->data; - struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; - which = (which - (char *)&init_ipc_ns) + (char *)ipc_ns; - return which; -} - -#ifdef CONFIG_PROC_SYSCTL -static int proc_ipc_dointvec(ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) -{ - struct ctl_table ipc_table; - - memcpy(&ipc_table, table, sizeof(ipc_table)); - ipc_table.data = get_ipc(table); - - return proc_dointvec(&ipc_table, write, buffer, lenp, ppos); -} - -static int proc_ipc_dointvec_minmax(ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) +static int proc_ipc_dointvec_minmax_orphans(const struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos) { - struct ctl_table ipc_table; + struct ipc_namespace *ns = + container_of(table->data, struct ipc_namespace, shm_rmid_forced); + int err; - memcpy(&ipc_table, table, sizeof(ipc_table)); - ipc_table.data = get_ipc(table); - - return proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos); -} - -static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) -{ - struct ipc_namespace *ns = current->nsproxy->ipc_ns; - int err = proc_ipc_dointvec_minmax(table, write, buffer, lenp, ppos); + err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (err < 0) return err; @@ -62,127 +33,69 @@ static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write, return err; } -static int proc_ipc_callback_dointvec(ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) +static int proc_ipc_auto_msgmni(const struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table ipc_table; - size_t lenp_bef = *lenp; - int rc; + int dummy = 0; memcpy(&ipc_table, table, sizeof(ipc_table)); - ipc_table.data = get_ipc(table); + ipc_table.data = &dummy; - rc = proc_dointvec(&ipc_table, write, buffer, lenp, ppos); + if (write) + pr_info_once("writing to auto_msgmni has no effect"); - if (write && !rc && lenp_bef == *lenp) - /* - * Tunable has successfully been changed by hand. Disable its - * automatic adjustment. This simply requires unregistering - * the notifiers that trigger recalculation. - */ - unregister_ipcns_notifier(current->nsproxy->ipc_ns); - - return rc; + return proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos); } -static int proc_ipc_doulongvec_minmax(ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) +static int proc_ipc_sem_dointvec(const struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos) { - struct ctl_table ipc_table; - memcpy(&ipc_table, table, sizeof(ipc_table)); - ipc_table.data = get_ipc(table); + struct ipc_namespace *ns = + container_of(table->data, struct ipc_namespace, sem_ctls); + int ret, semmni; - return proc_doulongvec_minmax(&ipc_table, write, buffer, - lenp, ppos); -} + semmni = ns->sem_ctls[3]; + ret = proc_dointvec(table, write, buffer, lenp, ppos); -/* - * Routine that is called when the file "auto_msgmni" has successfully been - * written. - * Two values are allowed: - * 0: unregister msgmni's callback routine from the ipc namespace notifier - * chain. This means that msgmni won't be recomputed anymore upon memory - * add/remove or ipc namespace creation/removal. - * 1: register back the callback routine. - */ -static void ipc_auto_callback(int val) -{ - if (!val) - unregister_ipcns_notifier(current->nsproxy->ipc_ns); - else { - /* - * Re-enable automatic recomputing only if not already - * enabled. - */ - recompute_msgmni(current->nsproxy->ipc_ns); - cond_register_ipcns_notifier(current->nsproxy->ipc_ns); - } -} - -static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) -{ - struct ctl_table ipc_table; - size_t lenp_bef = *lenp; - int oldval; - int rc; - - memcpy(&ipc_table, table, sizeof(ipc_table)); - ipc_table.data = get_ipc(table); - oldval = *((int *)(ipc_table.data)); - - rc = proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos); - - if (write && !rc && lenp_bef == *lenp) { - int newval = *((int *)(ipc_table.data)); - /* - * The file "auto_msgmni" has correctly been set. - * React by (un)registering the corresponding tunable, if the - * value has changed. - */ - if (newval != oldval) - ipc_auto_callback(newval); - } + if (!ret) + ret = sem_check_semmni(ns); - return rc; + /* + * Reset the semmni value if an error happens. + */ + if (ret) + ns->sem_ctls[3] = semmni; + return ret; } -#else -#define proc_ipc_doulongvec_minmax NULL -#define proc_ipc_dointvec NULL -#define proc_ipc_dointvec_minmax NULL -#define proc_ipc_dointvec_minmax_orphans NULL -#define proc_ipc_callback_dointvec NULL -#define proc_ipcauto_dointvec_minmax NULL -#endif - -static int zero; -static int one = 1; -#ifdef CONFIG_CHECKPOINT_RESTORE -static int int_max = INT_MAX; -#endif +int ipc_mni = IPCMNI; +int ipc_mni_shift = IPCMNI_SHIFT; +int ipc_min_cycle = RADIX_TREE_MAP_SIZE; -static struct ctl_table ipc_kern_table[] = { +static const struct ctl_table ipc_sysctls[] = { { .procname = "shmmax", .data = &init_ipc_ns.shm_ctlmax, - .maxlen = sizeof (init_ipc_ns.shm_ctlmax), + .maxlen = sizeof(init_ipc_ns.shm_ctlmax), .mode = 0644, - .proc_handler = proc_ipc_doulongvec_minmax, + .proc_handler = proc_doulongvec_minmax, }, { .procname = "shmall", .data = &init_ipc_ns.shm_ctlall, - .maxlen = sizeof (init_ipc_ns.shm_ctlall), + .maxlen = sizeof(init_ipc_ns.shm_ctlall), .mode = 0644, - .proc_handler = proc_ipc_doulongvec_minmax, + .proc_handler = proc_doulongvec_minmax, }, { .procname = "shmmni", .data = &init_ipc_ns.shm_ctlmni, - .maxlen = sizeof (init_ipc_ns.shm_ctlmni), + .maxlen = sizeof(init_ipc_ns.shm_ctlmni), .mode = 0644, - .proc_handler = proc_ipc_dointvec, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = &ipc_mni, }, { .procname = "shm_rmid_forced", @@ -190,91 +103,232 @@ static struct ctl_table ipc_kern_table[] = { .maxlen = sizeof(init_ipc_ns.shm_rmid_forced), .mode = 0644, .proc_handler = proc_ipc_dointvec_minmax_orphans, - .extra1 = &zero, - .extra2 = &one, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, }, { .procname = "msgmax", .data = &init_ipc_ns.msg_ctlmax, - .maxlen = sizeof (init_ipc_ns.msg_ctlmax), + .maxlen = sizeof(init_ipc_ns.msg_ctlmax), .mode = 0644, - .proc_handler = proc_ipc_dointvec, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_INT_MAX, }, { .procname = "msgmni", .data = &init_ipc_ns.msg_ctlmni, - .maxlen = sizeof (init_ipc_ns.msg_ctlmni), + .maxlen = sizeof(init_ipc_ns.msg_ctlmni), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = &ipc_mni, + }, + { + .procname = "auto_msgmni", + .data = NULL, + .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_ipc_callback_dointvec, + .proc_handler = proc_ipc_auto_msgmni, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, }, { .procname = "msgmnb", .data = &init_ipc_ns.msg_ctlmnb, - .maxlen = sizeof (init_ipc_ns.msg_ctlmnb), + .maxlen = sizeof(init_ipc_ns.msg_ctlmnb), .mode = 0644, - .proc_handler = proc_ipc_dointvec, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_INT_MAX, }, { .procname = "sem", .data = &init_ipc_ns.sem_ctls, - .maxlen = 4*sizeof (int), + .maxlen = 4*sizeof(int), .mode = 0644, - .proc_handler = proc_ipc_dointvec, - }, - { - .procname = "auto_msgmni", - .data = &init_ipc_ns.auto_msgmni, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_ipcauto_dointvec_minmax, - .extra1 = &zero, - .extra2 = &one, + .proc_handler = proc_ipc_sem_dointvec, }, #ifdef CONFIG_CHECKPOINT_RESTORE { .procname = "sem_next_id", .data = &init_ipc_ns.ids[IPC_SEM_IDS].next_id, .maxlen = sizeof(init_ipc_ns.ids[IPC_SEM_IDS].next_id), - .mode = 0644, - .proc_handler = proc_ipc_dointvec_minmax, - .extra1 = &zero, - .extra2 = &int_max, + .mode = 0444, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_INT_MAX, }, { .procname = "msg_next_id", .data = &init_ipc_ns.ids[IPC_MSG_IDS].next_id, .maxlen = sizeof(init_ipc_ns.ids[IPC_MSG_IDS].next_id), - .mode = 0644, - .proc_handler = proc_ipc_dointvec_minmax, - .extra1 = &zero, - .extra2 = &int_max, + .mode = 0444, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_INT_MAX, }, { .procname = "shm_next_id", .data = &init_ipc_ns.ids[IPC_SHM_IDS].next_id, .maxlen = sizeof(init_ipc_ns.ids[IPC_SHM_IDS].next_id), - .mode = 0644, - .proc_handler = proc_ipc_dointvec_minmax, - .extra1 = &zero, - .extra2 = &int_max, + .mode = 0444, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_INT_MAX, }, #endif - {} }; -static struct ctl_table ipc_root_table[] = { +static struct ctl_table_set *set_lookup(struct ctl_table_root *root) +{ + return ¤t->nsproxy->ipc_ns->ipc_set; +} + +static int set_is_seen(struct ctl_table_set *set) +{ + return ¤t->nsproxy->ipc_ns->ipc_set == set; +} + +static void ipc_set_ownership(struct ctl_table_header *head, + kuid_t *uid, kgid_t *gid) +{ + struct ipc_namespace *ns = + container_of(head->set, struct ipc_namespace, ipc_set); + + kuid_t ns_root_uid = make_kuid(ns->user_ns, 0); + kgid_t ns_root_gid = make_kgid(ns->user_ns, 0); + + *uid = uid_valid(ns_root_uid) ? ns_root_uid : GLOBAL_ROOT_UID; + *gid = gid_valid(ns_root_gid) ? ns_root_gid : GLOBAL_ROOT_GID; +} + +static int ipc_permissions(struct ctl_table_header *head, const struct ctl_table *table) +{ + int mode = table->mode; + +#ifdef CONFIG_CHECKPOINT_RESTORE + struct ipc_namespace *ns = + container_of(head->set, struct ipc_namespace, ipc_set); + + if (((table->data == &ns->ids[IPC_SEM_IDS].next_id) || + (table->data == &ns->ids[IPC_MSG_IDS].next_id) || + (table->data == &ns->ids[IPC_SHM_IDS].next_id)) && + checkpoint_restore_ns_capable(ns->user_ns)) + mode = 0666; + else +#endif { - .procname = "kernel", - .mode = 0555, - .child = ipc_kern_table, - }, - {} + kuid_t ns_root_uid; + kgid_t ns_root_gid; + + ipc_set_ownership(head, &ns_root_uid, &ns_root_gid); + + if (uid_eq(current_euid(), ns_root_uid)) + mode >>= 6; + + else if (in_egroup_p(ns_root_gid)) + mode >>= 3; + } + + mode &= 7; + + return (mode << 6) | (mode << 3) | mode; +} + +static struct ctl_table_root set_root = { + .lookup = set_lookup, + .permissions = ipc_permissions, + .set_ownership = ipc_set_ownership, }; +bool setup_ipc_sysctls(struct ipc_namespace *ns) +{ + struct ctl_table *tbl; + + setup_sysctl_set(&ns->ipc_set, &set_root, set_is_seen); + + tbl = kmemdup(ipc_sysctls, sizeof(ipc_sysctls), GFP_KERNEL); + if (tbl) { + int i; + + for (i = 0; i < ARRAY_SIZE(ipc_sysctls); i++) { + if (tbl[i].data == &init_ipc_ns.shm_ctlmax) + tbl[i].data = &ns->shm_ctlmax; + + else if (tbl[i].data == &init_ipc_ns.shm_ctlall) + tbl[i].data = &ns->shm_ctlall; + + else if (tbl[i].data == &init_ipc_ns.shm_ctlmni) + tbl[i].data = &ns->shm_ctlmni; + + else if (tbl[i].data == &init_ipc_ns.shm_rmid_forced) + tbl[i].data = &ns->shm_rmid_forced; + + else if (tbl[i].data == &init_ipc_ns.msg_ctlmax) + tbl[i].data = &ns->msg_ctlmax; + + else if (tbl[i].data == &init_ipc_ns.msg_ctlmni) + tbl[i].data = &ns->msg_ctlmni; + + else if (tbl[i].data == &init_ipc_ns.msg_ctlmnb) + tbl[i].data = &ns->msg_ctlmnb; + + else if (tbl[i].data == &init_ipc_ns.sem_ctls) + tbl[i].data = &ns->sem_ctls; +#ifdef CONFIG_CHECKPOINT_RESTORE + else if (tbl[i].data == &init_ipc_ns.ids[IPC_SEM_IDS].next_id) + tbl[i].data = &ns->ids[IPC_SEM_IDS].next_id; + + else if (tbl[i].data == &init_ipc_ns.ids[IPC_MSG_IDS].next_id) + tbl[i].data = &ns->ids[IPC_MSG_IDS].next_id; + + else if (tbl[i].data == &init_ipc_ns.ids[IPC_SHM_IDS].next_id) + tbl[i].data = &ns->ids[IPC_SHM_IDS].next_id; +#endif + else + tbl[i].data = NULL; + } + + ns->ipc_sysctls = __register_sysctl_table(&ns->ipc_set, "kernel", tbl, + ARRAY_SIZE(ipc_sysctls)); + } + if (!ns->ipc_sysctls) { + kfree(tbl); + retire_sysctl_set(&ns->ipc_set); + return false; + } + + return true; +} + +void retire_ipc_sysctls(struct ipc_namespace *ns) +{ + const struct ctl_table *tbl; + + tbl = ns->ipc_sysctls->ctl_table_arg; + unregister_sysctl_table(ns->ipc_sysctls); + retire_sysctl_set(&ns->ipc_set); + kfree(tbl); +} + static int __init ipc_sysctl_init(void) { - register_sysctl_table(ipc_root_table); + if (!setup_ipc_sysctls(&init_ipc_ns)) { + pr_warn("ipc sysctl registration failed\n"); + return -ENOMEM; + } return 0; } -__initcall(ipc_sysctl_init); +device_initcall(ipc_sysctl_init); + +static int __init ipc_mni_extend(char *str) +{ + ipc_mni = IPCMNI_EXTEND; + ipc_mni_shift = IPCMNI_EXTEND_SHIFT; + ipc_min_cycle = IPCMNI_EXTEND_MIN_CYCLE; + pr_info("IPCMNI extended to %d.\n", ipc_mni); + return 0; +} +early_param("ipcmni_extend", ipc_mni_extend); diff --git a/ipc/ipcns_notifier.c b/ipc/ipcns_notifier.c deleted file mode 100644 index b9b31a4f77e1..000000000000 --- a/ipc/ipcns_notifier.c +++ /dev/null @@ -1,92 +0,0 @@ -/* - * linux/ipc/ipcns_notifier.c - * Copyright (C) 2007 BULL SA. Nadia Derbey - * - * Notification mechanism for ipc namespaces: - * The callback routine registered in the memory chain invokes the ipcns - * notifier chain with the IPCNS_MEMCHANGED event. - * Each callback routine registered in the ipcns namespace recomputes msgmni - * for the owning namespace. - */ - -#include <linux/msg.h> -#include <linux/rcupdate.h> -#include <linux/notifier.h> -#include <linux/nsproxy.h> -#include <linux/ipc_namespace.h> - -#include "util.h" - - - -static BLOCKING_NOTIFIER_HEAD(ipcns_chain); - - -static int ipcns_callback(struct notifier_block *self, - unsigned long action, void *arg) -{ - struct ipc_namespace *ns; - - switch (action) { - case IPCNS_MEMCHANGED: /* amount of lowmem has changed */ - case IPCNS_CREATED: - case IPCNS_REMOVED: - /* - * It's time to recompute msgmni - */ - ns = container_of(self, struct ipc_namespace, ipcns_nb); - /* - * No need to get a reference on the ns: the 1st job of - * free_ipc_ns() is to unregister the callback routine. - * blocking_notifier_chain_unregister takes the wr lock to do - * it. - * When this callback routine is called the rd lock is held by - * blocking_notifier_call_chain. - * So the ipc ns cannot be freed while we are here. - */ - recompute_msgmni(ns); - break; - default: - break; - } - - return NOTIFY_OK; -} - -int register_ipcns_notifier(struct ipc_namespace *ns) -{ - int rc; - - memset(&ns->ipcns_nb, 0, sizeof(ns->ipcns_nb)); - ns->ipcns_nb.notifier_call = ipcns_callback; - ns->ipcns_nb.priority = IPCNS_CALLBACK_PRI; - rc = blocking_notifier_chain_register(&ipcns_chain, &ns->ipcns_nb); - if (!rc) - ns->auto_msgmni = 1; - return rc; -} - -int cond_register_ipcns_notifier(struct ipc_namespace *ns) -{ - int rc; - - memset(&ns->ipcns_nb, 0, sizeof(ns->ipcns_nb)); - ns->ipcns_nb.notifier_call = ipcns_callback; - ns->ipcns_nb.priority = IPCNS_CALLBACK_PRI; - rc = blocking_notifier_chain_cond_register(&ipcns_chain, - &ns->ipcns_nb); - if (!rc) - ns->auto_msgmni = 1; - return rc; -} - -void unregister_ipcns_notifier(struct ipc_namespace *ns) -{ - blocking_notifier_chain_unregister(&ipcns_chain, &ns->ipcns_nb); - ns->auto_msgmni = 0; -} - -int ipcns_notify(unsigned long val) -{ - return blocking_notifier_call_chain(&ipcns_chain, val, NULL); -} diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c index 383d638340b8..0dd12e1c9f53 100644 --- a/ipc/mq_sysctl.c +++ b/ipc/mq_sysctl.c @@ -1,43 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2007 IBM Corporation * * Author: Cedric Le Goater <clg@fr.ibm.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation, version 2 of the - * License. */ #include <linux/nsproxy.h> #include <linux/ipc_namespace.h> #include <linux/sysctl.h> -#ifdef CONFIG_PROC_SYSCTL -static void *get_mq(ctl_table *table) -{ - char *which = table->data; - struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; - which = (which - (char *)&init_ipc_ns) + (char *)ipc_ns; - return which; -} - -static int proc_mq_dointvec_minmax(ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) -{ - struct ctl_table mq_table; - memcpy(&mq_table, table, sizeof(mq_table)); - mq_table.data = get_mq(table); - - return proc_dointvec_minmax(&mq_table, write, buffer, - lenp, ppos); -} -#else -#define proc_mq_dointvec_minmax NULL -#endif - -static int msg_queues_limit_min = MIN_QUEUESMAX; -static int msg_queues_limit_max = HARD_QUEUESMAX; +#include <linux/stat.h> +#include <linux/capability.h> +#include <linux/slab.h> +#include <linux/cred.h> static int msg_max_limit_min = MIN_MSGMAX; static int msg_max_limit_max = HARD_MSGMAX; @@ -45,22 +20,20 @@ static int msg_max_limit_max = HARD_MSGMAX; static int msg_maxsize_limit_min = MIN_MSGSIZEMAX; static int msg_maxsize_limit_max = HARD_MSGSIZEMAX; -static ctl_table mq_sysctls[] = { +static const struct ctl_table mq_sysctls[] = { { .procname = "queues_max", .data = &init_ipc_ns.mq_queues_max, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_mq_dointvec_minmax, - .extra1 = &msg_queues_limit_min, - .extra2 = &msg_queues_limit_max, + .proc_handler = proc_dointvec, }, { .procname = "msg_max", .data = &init_ipc_ns.mq_msg_max, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_mq_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &msg_max_limit_min, .extra2 = &msg_max_limit_max, }, @@ -69,7 +42,7 @@ static ctl_table mq_sysctls[] = { .data = &init_ipc_ns.mq_msgsize_max, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_mq_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &msg_maxsize_limit_min, .extra2 = &msg_maxsize_limit_max, }, @@ -78,7 +51,7 @@ static ctl_table mq_sysctls[] = { .data = &init_ipc_ns.mq_msg_default, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_mq_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &msg_max_limit_min, .extra2 = &msg_max_limit_max, }, @@ -87,32 +60,108 @@ static ctl_table mq_sysctls[] = { .data = &init_ipc_ns.mq_msgsize_default, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_mq_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &msg_maxsize_limit_min, .extra2 = &msg_maxsize_limit_max, }, - {} }; -static ctl_table mq_sysctl_dir[] = { - { - .procname = "mqueue", - .mode = 0555, - .child = mq_sysctls, - }, - {} -}; +static struct ctl_table_set *set_lookup(struct ctl_table_root *root) +{ + return ¤t->nsproxy->ipc_ns->mq_set; +} -static ctl_table mq_sysctl_root[] = { - { - .procname = "fs", - .mode = 0555, - .child = mq_sysctl_dir, - }, - {} +static int set_is_seen(struct ctl_table_set *set) +{ + return ¤t->nsproxy->ipc_ns->mq_set == set; +} + +static void mq_set_ownership(struct ctl_table_header *head, + kuid_t *uid, kgid_t *gid) +{ + struct ipc_namespace *ns = + container_of(head->set, struct ipc_namespace, mq_set); + + kuid_t ns_root_uid = make_kuid(ns->user_ns, 0); + kgid_t ns_root_gid = make_kgid(ns->user_ns, 0); + + *uid = uid_valid(ns_root_uid) ? ns_root_uid : GLOBAL_ROOT_UID; + *gid = gid_valid(ns_root_gid) ? ns_root_gid : GLOBAL_ROOT_GID; +} + +static int mq_permissions(struct ctl_table_header *head, const struct ctl_table *table) +{ + int mode = table->mode; + kuid_t ns_root_uid; + kgid_t ns_root_gid; + + mq_set_ownership(head, &ns_root_uid, &ns_root_gid); + + if (uid_eq(current_euid(), ns_root_uid)) + mode >>= 6; + + else if (in_egroup_p(ns_root_gid)) + mode >>= 3; + + mode &= 7; + + return (mode << 6) | (mode << 3) | mode; +} + +static struct ctl_table_root set_root = { + .lookup = set_lookup, + .permissions = mq_permissions, + .set_ownership = mq_set_ownership, }; -struct ctl_table_header *mq_register_sysctl_table(void) +bool setup_mq_sysctls(struct ipc_namespace *ns) +{ + struct ctl_table *tbl; + + setup_sysctl_set(&ns->mq_set, &set_root, set_is_seen); + + tbl = kmemdup(mq_sysctls, sizeof(mq_sysctls), GFP_KERNEL); + if (tbl) { + int i; + + for (i = 0; i < ARRAY_SIZE(mq_sysctls); i++) { + if (tbl[i].data == &init_ipc_ns.mq_queues_max) + tbl[i].data = &ns->mq_queues_max; + + else if (tbl[i].data == &init_ipc_ns.mq_msg_max) + tbl[i].data = &ns->mq_msg_max; + + else if (tbl[i].data == &init_ipc_ns.mq_msgsize_max) + tbl[i].data = &ns->mq_msgsize_max; + + else if (tbl[i].data == &init_ipc_ns.mq_msg_default) + tbl[i].data = &ns->mq_msg_default; + + else if (tbl[i].data == &init_ipc_ns.mq_msgsize_default) + tbl[i].data = &ns->mq_msgsize_default; + else + tbl[i].data = NULL; + } + + ns->mq_sysctls = __register_sysctl_table(&ns->mq_set, + "fs/mqueue", tbl, + ARRAY_SIZE(mq_sysctls)); + } + if (!ns->mq_sysctls) { + kfree(tbl); + retire_sysctl_set(&ns->mq_set); + return false; + } + + return true; +} + +void retire_mq_sysctls(struct ipc_namespace *ns) { - return register_sysctl_table(mq_sysctl_root); + const struct ctl_table *tbl; + + tbl = ns->mq_sysctls->ctl_table_arg; + unregister_sysctl_table(ns->mq_sysctls); + retire_sysctl_set(&ns->mq_set); + kfree(tbl); } diff --git a/ipc/mqueue.c b/ipc/mqueue.c index ae1996d3c539..c4f6d65596cf 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -6,7 +6,7 @@ * * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com) * Lockless receive & send, fd based notify: - * Manfred Spraul (manfred@colorfullife.com) + * Manfred Spraul (manfred@colorfullife.com) * * Audit: George Wilson (ltcgcw@us.ibm.com) * @@ -18,6 +18,7 @@ #include <linux/pagemap.h> #include <linux/file.h> #include <linux/mount.h> +#include <linux/fs_context.h> #include <linux/namei.h> #include <linux/sysctl.h> #include <linux/poll.h> @@ -35,10 +36,18 @@ #include <linux/ipc_namespace.h> #include <linux/user_namespace.h> #include <linux/slab.h> +#include <linux/sched/wake_q.h> +#include <linux/sched/signal.h> +#include <linux/sched/user.h> #include <net/sock.h> #include "util.h" +struct mqueue_fs_context { + struct ipc_namespace *ipc_ns; + bool newns; /* Set if newly created ipc namespace */ +}; + #define MQUEUE_MAGIC 0x19800202 #define DIRENT_SIZE 20 #define FILENT_SIZE 80 @@ -47,8 +56,7 @@ #define RECV 1 #define STATE_NONE 0 -#define STATE_PENDING 1 -#define STATE_READY 2 +#define STATE_READY 1 struct posix_msg_tree_node { struct rb_node rb_node; @@ -56,6 +64,66 @@ struct posix_msg_tree_node { int priority; }; +/* + * Locking: + * + * Accesses to a message queue are synchronized by acquiring info->lock. + * + * There are two notable exceptions: + * - The actual wakeup of a sleeping task is performed using the wake_q + * framework. info->lock is already released when wake_up_q is called. + * - The exit codepaths after sleeping check ext_wait_queue->state without + * any locks. If it is STATE_READY, then the syscall is completed without + * acquiring info->lock. + * + * MQ_BARRIER: + * To achieve proper release/acquire memory barrier pairing, the state is set to + * STATE_READY with smp_store_release(), and it is read with READ_ONCE followed + * by smp_acquire__after_ctrl_dep(). In addition, wake_q_add_safe() is used. + * + * This prevents the following races: + * + * 1) With the simple wake_q_add(), the task could be gone already before + * the increase of the reference happens + * Thread A + * Thread B + * WRITE_ONCE(wait.state, STATE_NONE); + * schedule_hrtimeout() + * wake_q_add(A) + * if (cmpxchg()) // success + * ->state = STATE_READY (reordered) + * <timeout returns> + * if (wait.state == STATE_READY) return; + * sysret to user space + * sys_exit() + * get_task_struct() // UaF + * + * Solution: Use wake_q_add_safe() and perform the get_task_struct() before + * the smp_store_release() that does ->state = STATE_READY. + * + * 2) Without proper _release/_acquire barriers, the woken up task + * could read stale data + * + * Thread A + * Thread B + * do_mq_timedreceive + * WRITE_ONCE(wait.state, STATE_NONE); + * schedule_hrtimeout() + * state = STATE_READY; + * <timeout returns> + * if (wait.state == STATE_READY) return; + * msg_ptr = wait.msg; // Access to stale data! + * receiver->msg = message; (reordered) + * + * Solution: use _release and _acquire barriers. + * + * 3) There is intentionally no barrier when setting current->state + * to TASK_INTERRUPTIBLE: spin_unlock(&info->lock) provides the + * release memory barrier, and the wakeup is triggered when holding + * info->lock, i.e. spin_lock(&info->lock) provided a pairing + * acquire memory barrier. + */ + struct ext_wait_queue { /* queue of sleeping tasks */ struct task_struct *task; struct list_head list; @@ -69,13 +137,15 @@ struct mqueue_inode_info { wait_queue_head_t wait_q; struct rb_root msg_tree; + struct rb_node *msg_tree_rightmost; struct posix_msg_tree_node *node_cache; struct mq_attr attr; struct sigevent notify; - struct pid* notify_owner; + struct pid *notify_owner; + u32 notify_self_exec_id; struct user_namespace *notify_user_ns; - struct user_struct *user; /* user who created, for accounting */ + struct ucounts *ucounts; /* user who created, for accounting */ struct sock *notify_sock; struct sk_buff *notify_cookie; @@ -85,15 +155,15 @@ struct mqueue_inode_info { unsigned long qsize; /* size of queue in memory (sum of all msgs) */ }; +static struct file_system_type mqueue_fs_type; static const struct inode_operations mqueue_dir_inode_operations; static const struct file_operations mqueue_file_operations; static const struct super_operations mqueue_super_ops; +static const struct fs_context_operations mqueue_fs_context_ops; static void remove_notification(struct mqueue_inode_info *info); static struct kmem_cache *mqueue_inode_cachep; -static struct ctl_table_header * mq_sysctl_table; - static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode) { return container_of(inode, struct mqueue_inode_info, vfs_inode); @@ -122,6 +192,7 @@ static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info) { struct rb_node **p, *parent = NULL; struct posix_msg_tree_node *leaf; + bool rightmost = true; p = &info->msg_tree.rb_node; while (*p) { @@ -130,9 +201,10 @@ static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info) if (likely(leaf->priority == msg->m_type)) goto insert_msg; - else if (msg->m_type < leaf->priority) + else if (msg->m_type < leaf->priority) { p = &(*p)->rb_left; - else + rightmost = false; + } else p = &(*p)->rb_right; } if (info->node_cache) { @@ -143,9 +215,12 @@ static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info) if (!leaf) return -ENOMEM; INIT_LIST_HEAD(&leaf->msg_list); - info->qsize += sizeof(*leaf); } leaf->priority = msg->m_type; + + if (rightmost) + info->msg_tree_rightmost = &leaf->rb_node; + rb_link_node(&leaf->rb_node, parent, p); rb_insert_color(&leaf->rb_node, &info->msg_tree); insert_msg: @@ -155,23 +230,34 @@ insert_msg: return 0; } +static inline void msg_tree_erase(struct posix_msg_tree_node *leaf, + struct mqueue_inode_info *info) +{ + struct rb_node *node = &leaf->rb_node; + + if (info->msg_tree_rightmost == node) + info->msg_tree_rightmost = rb_prev(node); + + rb_erase(node, &info->msg_tree); + if (info->node_cache) + kfree(leaf); + else + info->node_cache = leaf; +} + static inline struct msg_msg *msg_get(struct mqueue_inode_info *info) { - struct rb_node **p, *parent = NULL; + struct rb_node *parent = NULL; struct posix_msg_tree_node *leaf; struct msg_msg *msg; try_again: - p = &info->msg_tree.rb_node; - while (*p) { - parent = *p; - /* - * During insert, low priorities go to the left and high to the - * right. On receive, we want the highest priorities first, so - * walk all the way to the right. - */ - p = &(*p)->rb_right; - } + /* + * During insert, low priorities go to the left and high to the + * right. On receive, we want the highest priorities first, so + * walk all the way to the right. + */ + parent = info->msg_tree_rightmost; if (!parent) { if (info->attr.mq_curmsgs) { pr_warn_once("Inconsistency in POSIX message queue, " @@ -186,26 +272,14 @@ try_again: pr_warn_once("Inconsistency in POSIX message queue, " "empty leaf node but we haven't implemented " "lazy leaf delete!\n"); - rb_erase(&leaf->rb_node, &info->msg_tree); - if (info->node_cache) { - info->qsize -= sizeof(*leaf); - kfree(leaf); - } else { - info->node_cache = leaf; - } + msg_tree_erase(leaf, info); goto try_again; } else { msg = list_first_entry(&leaf->msg_list, struct msg_msg, m_list); list_del(&msg->m_list); if (list_empty(&leaf->msg_list)) { - rb_erase(&leaf->rb_node, &info->msg_tree); - if (info->node_cache) { - info->qsize -= sizeof(*leaf); - kfree(leaf); - } else { - info->node_cache = leaf; - } + msg_tree_erase(leaf, info); } } info->attr.mq_curmsgs--; @@ -217,7 +291,6 @@ static struct inode *mqueue_get_inode(struct super_block *sb, struct ipc_namespace *ipc_ns, umode_t mode, struct mq_attr *attr) { - struct user_struct *u = current_user(); struct inode *inode; int ret = -ENOMEM; @@ -229,7 +302,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb, inode->i_mode = mode; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); - inode->i_mtime = inode->i_ctime = inode->i_atime = CURRENT_TIME; + simple_inode_init_ts(inode); if (S_ISREG(mode)) { struct mqueue_inode_info *info; @@ -246,8 +319,9 @@ static struct inode *mqueue_get_inode(struct super_block *sb, info->notify_owner = NULL; info->notify_user_ns = NULL; info->qsize = 0; - info->user = NULL; /* set when all is ok */ + info->ucounts = NULL; /* set when all is ok */ info->msg_tree = RB_ROOT; + info->msg_tree_rightmost = NULL; info->node_cache = NULL; memset(&info->attr, 0, sizeof(info->attr)); info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max, @@ -271,26 +345,47 @@ static struct inode *mqueue_get_inode(struct super_block *sb, * that means the min(mq_maxmsg, max_priorities) * struct * posix_msg_tree_node. */ + + ret = -EINVAL; + if (info->attr.mq_maxmsg <= 0 || info->attr.mq_msgsize <= 0) + goto out_inode; + if (capable(CAP_SYS_RESOURCE)) { + if (info->attr.mq_maxmsg > HARD_MSGMAX || + info->attr.mq_msgsize > HARD_MSGSIZEMAX) + goto out_inode; + } else { + if (info->attr.mq_maxmsg > ipc_ns->mq_msg_max || + info->attr.mq_msgsize > ipc_ns->mq_msgsize_max) + goto out_inode; + } + ret = -EOVERFLOW; + /* check for overflow */ + if (info->attr.mq_msgsize > ULONG_MAX/info->attr.mq_maxmsg) + goto out_inode; mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * sizeof(struct posix_msg_tree_node); - - mq_bytes = mq_treesize + (info->attr.mq_maxmsg * - info->attr.mq_msgsize); - - spin_lock(&mq_lock); - if (u->mq_bytes + mq_bytes < u->mq_bytes || - u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) { - spin_unlock(&mq_lock); - /* mqueue_evict_inode() releases info->messages */ - ret = -EMFILE; + mq_bytes = info->attr.mq_maxmsg * info->attr.mq_msgsize; + if (mq_bytes + mq_treesize < mq_bytes) goto out_inode; + mq_bytes += mq_treesize; + info->ucounts = get_ucounts(current_ucounts()); + if (info->ucounts) { + long msgqueue; + + spin_lock(&mq_lock); + msgqueue = inc_rlimit_ucounts(info->ucounts, UCOUNT_RLIMIT_MSGQUEUE, mq_bytes); + if (msgqueue == LONG_MAX || msgqueue > rlimit(RLIMIT_MSGQUEUE)) { + dec_rlimit_ucounts(info->ucounts, UCOUNT_RLIMIT_MSGQUEUE, mq_bytes); + spin_unlock(&mq_lock); + put_ucounts(info->ucounts); + info->ucounts = NULL; + /* mqueue_evict_inode() releases info->messages */ + ret = -EMFILE; + goto out_inode; + } + spin_unlock(&mq_lock); } - u->mq_bytes += mq_bytes; - spin_unlock(&mq_lock); - - /* all is ok */ - info->user = get_uid(u); } else if (S_ISDIR(mode)) { inc_nlink(inode); /* Some things misbehave if size == 0 on a directory */ @@ -306,15 +401,17 @@ err: return ERR_PTR(ret); } -static int mqueue_fill_super(struct super_block *sb, void *data, int silent) +static int mqueue_fill_super(struct super_block *sb, struct fs_context *fc) { struct inode *inode; - struct ipc_namespace *ns = data; + struct ipc_namespace *ns = sb->s_fs_info; - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV; + sb->s_blocksize = PAGE_SIZE; + sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = MQUEUE_MAGIC; sb->s_op = &mqueue_super_ops; + sb->s_d_flags = DCACHE_DONTCACHE; inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL); if (IS_ERR(inode)) @@ -326,26 +423,74 @@ static int mqueue_fill_super(struct super_block *sb, void *data, int silent) return 0; } -static struct dentry *mqueue_mount(struct file_system_type *fs_type, - int flags, const char *dev_name, - void *data) +static int mqueue_get_tree(struct fs_context *fc) { - if (!(flags & MS_KERNMOUNT)) { - struct ipc_namespace *ns = current->nsproxy->ipc_ns; - /* Don't allow mounting unless the caller has CAP_SYS_ADMIN - * over the ipc namespace. - */ - if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN)) - return ERR_PTR(-EPERM); + struct mqueue_fs_context *ctx = fc->fs_private; - data = ns; + /* + * With a newly created ipc namespace, we don't need to do a search + * for an ipc namespace match, but we still need to set s_fs_info. + */ + if (ctx->newns) { + fc->s_fs_info = ctx->ipc_ns; + return get_tree_nodev(fc, mqueue_fill_super); } - return mount_ns(fs_type, flags, data, mqueue_fill_super); + return get_tree_keyed(fc, mqueue_fill_super, ctx->ipc_ns); +} + +static void mqueue_fs_context_free(struct fs_context *fc) +{ + struct mqueue_fs_context *ctx = fc->fs_private; + + put_ipc_ns(ctx->ipc_ns); + kfree(ctx); +} + +static int mqueue_init_fs_context(struct fs_context *fc) +{ + struct mqueue_fs_context *ctx; + + ctx = kzalloc(sizeof(struct mqueue_fs_context), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns); + put_user_ns(fc->user_ns); + fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns); + fc->fs_private = ctx; + fc->ops = &mqueue_fs_context_ops; + return 0; +} + +/* + * mq_init_ns() is currently the only caller of mq_create_mount(). + * So the ns parameter is always a newly created ipc namespace. + */ +static struct vfsmount *mq_create_mount(struct ipc_namespace *ns) +{ + struct mqueue_fs_context *ctx; + struct fs_context *fc; + struct vfsmount *mnt; + + fc = fs_context_for_mount(&mqueue_fs_type, SB_KERNMOUNT); + if (IS_ERR(fc)) + return ERR_CAST(fc); + + ctx = fc->fs_private; + ctx->newns = true; + put_ipc_ns(ctx->ipc_ns); + ctx->ipc_ns = get_ipc_ns(ns); + put_user_ns(fc->user_ns); + fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns); + + mnt = fc_mount_longterm(fc); + put_fs_context(fc); + return mnt; } static void init_once(void *foo) { - struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; + struct mqueue_inode_info *p = foo; inode_init_once(&p->vfs_inode); } @@ -354,30 +499,23 @@ static struct inode *mqueue_alloc_inode(struct super_block *sb) { struct mqueue_inode_info *ei; - ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL); + ei = alloc_inode_sb(sb, mqueue_inode_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; } -static void mqueue_i_callback(struct rcu_head *head) +static void mqueue_free_inode(struct inode *inode) { - struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode)); } -static void mqueue_destroy_inode(struct inode *inode) -{ - call_rcu(&inode->i_rcu, mqueue_i_callback); -} - static void mqueue_evict_inode(struct inode *inode) { struct mqueue_inode_info *info; - struct user_struct *user; - unsigned long mq_bytes, mq_treesize; struct ipc_namespace *ipc_ns; - struct msg_msg *msg; + struct msg_msg *msg, *nmsg; + LIST_HEAD(tmp_msg); clear_inode(inode); @@ -388,22 +526,28 @@ static void mqueue_evict_inode(struct inode *inode) info = MQUEUE_I(inode); spin_lock(&info->lock); while ((msg = msg_get(info)) != NULL) - free_msg(msg); + list_add_tail(&msg->m_list, &tmp_msg); kfree(info->node_cache); spin_unlock(&info->lock); - /* Total amount of bytes accounted for the mqueue */ - mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + - min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * - sizeof(struct posix_msg_tree_node); + list_for_each_entry_safe(msg, nmsg, &tmp_msg, m_list) { + list_del(&msg->m_list); + free_msg(msg); + } + + if (info->ucounts) { + unsigned long mq_bytes, mq_treesize; - mq_bytes = mq_treesize + (info->attr.mq_maxmsg * - info->attr.mq_msgsize); + /* Total amount of bytes accounted for the mqueue */ + mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + + min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * + sizeof(struct posix_msg_tree_node); + + mq_bytes = mq_treesize + (info->attr.mq_maxmsg * + info->attr.mq_msgsize); - user = info->user; - if (user) { spin_lock(&mq_lock); - user->mq_bytes -= mq_bytes; + dec_rlimit_ucounts(info->ucounts, UCOUNT_RLIMIT_MSGQUEUE, mq_bytes); /* * get_ns_from_inode() ensures that the * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns @@ -413,17 +557,18 @@ static void mqueue_evict_inode(struct inode *inode) if (ipc_ns) ipc_ns->mq_queues_count--; spin_unlock(&mq_lock); - free_uid(user); + put_ucounts(info->ucounts); + info->ucounts = NULL; } if (ipc_ns) put_ipc_ns(ipc_ns); } -static int mqueue_create(struct inode *dir, struct dentry *dentry, - umode_t mode, bool excl) +static int mqueue_create_attr(struct dentry *dentry, umode_t mode, void *arg) { + struct inode *dir = dentry->d_parent->d_inode; struct inode *inode; - struct mq_attr *attr = dentry->d_fsdata; + struct mq_attr *attr = arg; int error; struct ipc_namespace *ipc_ns; @@ -433,9 +578,9 @@ static int mqueue_create(struct inode *dir, struct dentry *dentry, error = -EACCES; goto out_unlock; } - if (ipc_ns->mq_queues_count >= HARD_QUEUESMAX || - (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max && - !capable(CAP_SYS_RESOURCE))) { + + if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max && + !capable(CAP_SYS_RESOURCE)) { error = -ENOSPC; goto out_unlock; } @@ -452,10 +597,9 @@ static int mqueue_create(struct inode *dir, struct dentry *dentry, put_ipc_ns(ipc_ns); dir->i_size += DIRENT_SIZE; - dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; + simple_inode_init_ts(dir); - d_instantiate(dentry, inode); - dget(dentry); + d_make_persistent(dentry, inode); return 0; out_unlock: spin_unlock(&mq_lock); @@ -464,15 +608,16 @@ out_unlock: return error; } -static int mqueue_unlink(struct inode *dir, struct dentry *dentry) +static int mqueue_create(struct mnt_idmap *idmap, struct inode *dir, + struct dentry *dentry, umode_t mode, bool excl) { - struct inode *inode = dentry->d_inode; + return mqueue_create_attr(dentry, mode, NULL); +} - dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; +static int mqueue_unlink(struct inode *dir, struct dentry *dentry) +{ dir->i_size -= DIRENT_SIZE; - drop_nlink(inode); - dput(dentry); - return 0; + return simple_unlink(dir, dentry); } /* @@ -485,7 +630,8 @@ static int mqueue_unlink(struct inode *dir, struct dentry *dentry) static ssize_t mqueue_read_file(struct file *filp, char __user *u_data, size_t count, loff_t *off) { - struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); + struct inode *inode = file_inode(filp); + struct mqueue_inode_info *info = MQUEUE_I(inode); char buffer[FILENT_SIZE]; ssize_t ret; @@ -506,7 +652,7 @@ static ssize_t mqueue_read_file(struct file *filp, char __user *u_data, if (ret <= 0) return ret; - file_inode(filp)->i_atime = file_inode(filp)->i_ctime = CURRENT_TIME; + inode_set_atime_to_ts(inode, inode_set_ctime_current(inode)); return ret; } @@ -522,19 +668,19 @@ static int mqueue_flush_file(struct file *filp, fl_owner_t id) return 0; } -static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab) +static __poll_t mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab) { struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); - int retval = 0; + __poll_t retval = 0; poll_wait(filp, &info->wait_q, poll_tab); spin_lock(&info->lock); if (info->attr.mq_curmsgs) - retval = POLLIN | POLLRDNORM; + retval = EPOLLIN | EPOLLRDNORM; if (info->attr.mq_curmsgs < info->attr.mq_maxmsg) - retval |= POLLOUT | POLLWRNORM; + retval |= EPOLLOUT | EPOLLWRNORM; spin_unlock(&info->lock); return retval; @@ -546,10 +692,8 @@ static void wq_add(struct mqueue_inode_info *info, int sr, { struct ext_wait_queue *walk; - ewp->task = current; - list_for_each_entry(walk, &info->e_wait_q[sr].list, list) { - if (walk->task->static_prio <= current->static_prio) { + if (walk->task->prio <= current->prio) { list_add_tail(&ewp->list, &walk->list); return; } @@ -564,6 +708,7 @@ static void wq_add(struct mqueue_inode_info *info, int sr, */ static int wq_sleep(struct mqueue_inode_info *info, int sr, ktime_t *timeout, struct ext_wait_queue *ewp) + __releases(&info->lock) { int retval; signed long time; @@ -571,21 +716,23 @@ static int wq_sleep(struct mqueue_inode_info *info, int sr, wq_add(info, sr, ewp); for (;;) { - set_current_state(TASK_INTERRUPTIBLE); + /* memory barrier not required, we hold info->lock */ + __set_current_state(TASK_INTERRUPTIBLE); spin_unlock(&info->lock); time = schedule_hrtimeout_range_clock(timeout, 0, HRTIMER_MODE_ABS, CLOCK_REALTIME); - while (ewp->state == STATE_PENDING) - cpu_relax(); - - if (ewp->state == STATE_READY) { + if (READ_ONCE(ewp->state) == STATE_READY) { + /* see MQ_BARRIER for purpose/pairing */ + smp_acquire__after_ctrl_dep(); retval = 0; goto out; } spin_lock(&info->lock); - if (ewp->state == STATE_READY) { + + /* we hold info->lock, so no memory barrier required */ + if (READ_ONCE(ewp->state) == STATE_READY) { retval = 0; goto out_unlock; } @@ -622,7 +769,7 @@ static struct ext_wait_queue *wq_get_first_waiter( static inline void set_cookie(struct sk_buff *skb, char code) { - ((char*)skb->data)[NOTIFY_COOKIE_LEN-1] = code; + ((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code; } /* @@ -637,27 +784,44 @@ static void __do_notify(struct mqueue_inode_info *info) * synchronously. */ if (info->notify_owner && info->attr.mq_curmsgs == 1) { - struct siginfo sig_i; switch (info->notify.sigev_notify) { case SIGEV_NONE: break; - case SIGEV_SIGNAL: - /* sends signal */ + case SIGEV_SIGNAL: { + struct kernel_siginfo sig_i; + struct task_struct *task; + + /* do_mq_notify() accepts sigev_signo == 0, why?? */ + if (!info->notify.sigev_signo) + break; + clear_siginfo(&sig_i); sig_i.si_signo = info->notify.sigev_signo; sig_i.si_errno = 0; sig_i.si_code = SI_MESGQ; sig_i.si_value = info->notify.sigev_value; - /* map current pid/uid into info->owner's namespaces */ rcu_read_lock(); + /* map current pid/uid into info->owner's namespaces */ sig_i.si_pid = task_tgid_nr_ns(current, ns_of_pid(info->notify_owner)); - sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid()); + sig_i.si_uid = from_kuid_munged(info->notify_user_ns, + current_uid()); + /* + * We can't use kill_pid_info(), this signal should + * bypass check_kill_permission(). It is from kernel + * but si_fromuser() can't know this. + * We do check the self_exec_id, to avoid sending + * signals to programs that don't expect them. + */ + task = pid_task(info->notify_owner, PIDTYPE_TGID); + if (task && task->self_exec_id == + info->notify_self_exec_id) { + do_send_sig_info(info->notify.sigev_signo, + &sig_i, task, PIDTYPE_TGID); + } rcu_read_unlock(); - - kill_pid_info(info->notify.sigev_signo, - &sig_i, info->notify_owner); break; + } case SIGEV_THREAD: set_cookie(info->notify_cookie, NOTIFY_WOKENUP); netlink_sendskb(info->notify_sock, info->notify_cookie); @@ -672,15 +836,13 @@ static void __do_notify(struct mqueue_inode_info *info) wake_up(&info->wait_q); } -static int prepare_timeout(const struct timespec __user *u_abs_timeout, - ktime_t *expires, struct timespec *ts) +static int prepare_timeout(const struct __kernel_timespec __user *u_abs_timeout, + struct timespec64 *ts) { - if (copy_from_user(ts, u_abs_timeout, sizeof(struct timespec))) + if (get_timespec64(ts, u_abs_timeout)) return -EFAULT; - if (!timespec_valid(ts)) + if (!timespec64_valid(ts)) return -EINVAL; - - *expires = timespec_to_ktime(*ts); return 0; } @@ -697,163 +859,84 @@ static void remove_notification(struct mqueue_inode_info *info) info->notify_user_ns = NULL; } -static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr) +static int prepare_open(struct dentry *dentry, int oflag, int ro, + umode_t mode, struct filename *name, + struct mq_attr *attr) { - int mq_treesize; - unsigned long total_size; + static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, + MAY_READ | MAY_WRITE }; + int acc; - if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0) - return -EINVAL; - if (capable(CAP_SYS_RESOURCE)) { - if (attr->mq_maxmsg > HARD_MSGMAX || - attr->mq_msgsize > HARD_MSGSIZEMAX) - return -EINVAL; - } else { - if (attr->mq_maxmsg > ipc_ns->mq_msg_max || - attr->mq_msgsize > ipc_ns->mq_msgsize_max) - return -EINVAL; + if (d_really_is_negative(dentry)) { + if (!(oflag & O_CREAT)) + return -ENOENT; + if (ro) + return ro; + audit_inode_parent_hidden(name, dentry->d_parent); + return vfs_mkobj(dentry, mode & ~current_umask(), + mqueue_create_attr, attr); } - /* check for overflow */ - if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg) - return -EOVERFLOW; - mq_treesize = attr->mq_maxmsg * sizeof(struct msg_msg) + - min_t(unsigned int, attr->mq_maxmsg, MQ_PRIO_MAX) * - sizeof(struct posix_msg_tree_node); - total_size = attr->mq_maxmsg * attr->mq_msgsize; - if (total_size + mq_treesize < total_size) - return -EOVERFLOW; - return 0; + /* it already existed */ + audit_inode(name, dentry, 0); + if ((oflag & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL)) + return -EEXIST; + if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) + return -EINVAL; + acc = oflag2acc[oflag & O_ACCMODE]; + return inode_permission(&nop_mnt_idmap, d_inode(dentry), acc); } -/* - * Invoked when creating a new queue via sys_mq_open - */ -static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir, - struct path *path, int oflag, umode_t mode, - struct mq_attr *attr) +static struct file *mqueue_file_open(struct filename *name, + struct vfsmount *mnt, int oflag, int ro, + umode_t mode, struct mq_attr *attr) { - const struct cred *cred = current_cred(); + struct dentry *dentry; + struct file *file; int ret; - if (attr) { - ret = mq_attr_ok(ipc_ns, attr); - if (ret) - return ERR_PTR(ret); - /* store for use during create */ - path->dentry->d_fsdata = attr; - } else { - struct mq_attr def_attr; - - def_attr.mq_maxmsg = min(ipc_ns->mq_msg_max, - ipc_ns->mq_msg_default); - def_attr.mq_msgsize = min(ipc_ns->mq_msgsize_max, - ipc_ns->mq_msgsize_default); - ret = mq_attr_ok(ipc_ns, &def_attr); - if (ret) - return ERR_PTR(ret); + dentry = start_creating_noperm(mnt->mnt_root, &QSTR(name->name)); + if (IS_ERR(dentry)) + return ERR_CAST(dentry); + + ret = prepare_open(dentry, oflag, ro, mode, name, attr); + file = ERR_PTR(ret); + if (!ret) { + const struct path path = { .mnt = mnt, .dentry = dentry }; + file = dentry_open(&path, oflag, current_cred()); } - mode &= ~current_umask(); - ret = vfs_create(dir, path->dentry, mode, true); - path->dentry->d_fsdata = NULL; - if (ret) - return ERR_PTR(ret); - return dentry_open(path, oflag, cred); + end_creating(dentry); + return file; } -/* Opens existing queue */ -static struct file *do_open(struct path *path, int oflag) +static int do_mq_open(const char __user *u_name, int oflag, umode_t mode, + struct mq_attr *attr) { - static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, - MAY_READ | MAY_WRITE }; - int acc; - if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) - return ERR_PTR(-EINVAL); - acc = oflag2acc[oflag & O_ACCMODE]; - if (inode_permission(path->dentry->d_inode, acc)) - return ERR_PTR(-EACCES); - return dentry_open(path, oflag, current_cred()); + struct filename *name __free(putname) = NULL;; + struct vfsmount *mnt = current->nsproxy->ipc_ns->mq_mnt; + int fd, ro; + + audit_mq_open(oflag, mode, attr); + + name = getname(u_name); + if (IS_ERR(name)) + return PTR_ERR(name); + + ro = mnt_want_write(mnt); /* we'll drop it in any case */ + fd = FD_ADD(O_CLOEXEC, mqueue_file_open(name, mnt, oflag, ro, mode, attr)); + if (!ro) + mnt_drop_write(mnt); + return fd; } SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode, struct mq_attr __user *, u_attr) { - struct path path; - struct file *filp; - struct filename *name; struct mq_attr attr; - int fd, error; - struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; - struct vfsmount *mnt = ipc_ns->mq_mnt; - struct dentry *root = mnt->mnt_root; - int ro; - if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr))) return -EFAULT; - audit_mq_open(oflag, mode, u_attr ? &attr : NULL); - - if (IS_ERR(name = getname(u_name))) - return PTR_ERR(name); - - fd = get_unused_fd_flags(O_CLOEXEC); - if (fd < 0) - goto out_putname; - - ro = mnt_want_write(mnt); /* we'll drop it in any case */ - error = 0; - mutex_lock(&root->d_inode->i_mutex); - path.dentry = lookup_one_len(name->name, root, strlen(name->name)); - if (IS_ERR(path.dentry)) { - error = PTR_ERR(path.dentry); - goto out_putfd; - } - path.mnt = mntget(mnt); - - if (oflag & O_CREAT) { - if (path.dentry->d_inode) { /* entry already exists */ - audit_inode(name, path.dentry, 0); - if (oflag & O_EXCL) { - error = -EEXIST; - goto out; - } - filp = do_open(&path, oflag); - } else { - if (ro) { - error = ro; - goto out; - } - audit_inode_parent_hidden(name, root); - filp = do_create(ipc_ns, root->d_inode, - &path, oflag, mode, - u_attr ? &attr : NULL); - } - } else { - if (!path.dentry->d_inode) { - error = -ENOENT; - goto out; - } - audit_inode(name, path.dentry, 0); - filp = do_open(&path, oflag); - } - - if (!IS_ERR(filp)) - fd_install(fd, filp); - else - error = PTR_ERR(filp); -out: - path_put(&path); -out_putfd: - if (error) { - put_unused_fd(fd); - fd = error; - } - mutex_unlock(&root->d_inode->i_mutex); - if (!ro) - mnt_drop_write(mnt); -out_putname: - putname(name); - return fd; + return do_mq_open(u_name, oflag, mode, u_attr ? &attr : NULL); } SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name) @@ -861,7 +944,7 @@ SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name) int err; struct filename *name; struct dentry *dentry; - struct inode *inode = NULL; + struct inode *inode; struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; struct vfsmount *mnt = ipc_ns->mq_mnt; @@ -873,27 +956,20 @@ SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name) err = mnt_want_write(mnt); if (err) goto out_name; - mutex_lock_nested(&mnt->mnt_root->d_inode->i_mutex, I_MUTEX_PARENT); - dentry = lookup_one_len(name->name, mnt->mnt_root, - strlen(name->name)); + dentry = start_removing_noperm(mnt->mnt_root, &QSTR(name->name)); if (IS_ERR(dentry)) { err = PTR_ERR(dentry); - goto out_unlock; + goto out_drop_write; } - inode = dentry->d_inode; - if (!inode) { - err = -ENOENT; - } else { - ihold(inode); - err = vfs_unlink(dentry->d_parent->d_inode, dentry); - } - dput(dentry); + inode = d_inode(dentry); + ihold(inode); + err = vfs_unlink(&nop_mnt_idmap, d_inode(mnt->mnt_root), + dentry, NULL); + end_removing(dentry); + iput(inode); -out_unlock: - mutex_unlock(&mnt->mnt_root->d_inode->i_mutex); - if (inode) - iput(inode); +out_drop_write: mnt_drop_write(mnt); out_name: putname(name); @@ -907,33 +983,49 @@ out_name: * list of waiting receivers. A sender checks that list before adding the new * message into the message array. If there is a waiting receiver, then it * bypasses the message array and directly hands the message over to the - * receiver. - * The receiver accepts the message and returns without grabbing the queue - * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers - * are necessary. The same algorithm is used for sysv semaphores, see - * ipc/sem.c for more details. + * receiver. The receiver accepts the message and returns without grabbing the + * queue spinlock: + * + * - Set pointer to message. + * - Queue the receiver task for later wakeup (without the info->lock). + * - Update its state to STATE_READY. Now the receiver can continue. + * - Wake up the process after the lock is dropped. Should the process wake up + * before this wakeup (due to a timeout or a signal) it will either see + * STATE_READY and continue or acquire the lock to check the state again. * * The same algorithm is used for senders. */ +static inline void __pipelined_op(struct wake_q_head *wake_q, + struct mqueue_inode_info *info, + struct ext_wait_queue *this) +{ + struct task_struct *task; + + list_del(&this->list); + task = get_task_struct(this->task); + + /* see MQ_BARRIER for purpose/pairing */ + smp_store_release(&this->state, STATE_READY); + wake_q_add_safe(wake_q, task); +} + /* pipelined_send() - send a message directly to the task waiting in * sys_mq_timedreceive() (without inserting message into a queue). */ -static inline void pipelined_send(struct mqueue_inode_info *info, +static inline void pipelined_send(struct wake_q_head *wake_q, + struct mqueue_inode_info *info, struct msg_msg *message, struct ext_wait_queue *receiver) { receiver->msg = message; - list_del(&receiver->list); - receiver->state = STATE_PENDING; - wake_up_process(receiver->task); - smp_wmb(); - receiver->state = STATE_READY; + __pipelined_op(wake_q, info, receiver); } /* pipelined_receive() - if there is task waiting in sys_mq_timedsend() * gets its message and put to the queue (we have one free place for sure). */ -static inline void pipelined_receive(struct mqueue_inode_info *info) +static inline void pipelined_receive(struct wake_q_head *wake_q, + struct mqueue_inode_info *info) { struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND); @@ -944,71 +1036,55 @@ static inline void pipelined_receive(struct mqueue_inode_info *info) } if (msg_insert(sender->msg, info)) return; - list_del(&sender->list); - sender->state = STATE_PENDING; - wake_up_process(sender->task); - smp_wmb(); - sender->state = STATE_READY; + + __pipelined_op(wake_q, info, sender); } -SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, - size_t, msg_len, unsigned int, msg_prio, - const struct timespec __user *, u_abs_timeout) +static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr, + size_t msg_len, unsigned int msg_prio, + struct timespec64 *ts) { - struct fd f; struct inode *inode; struct ext_wait_queue wait; struct ext_wait_queue *receiver; struct msg_msg *msg_ptr; struct mqueue_inode_info *info; ktime_t expires, *timeout = NULL; - struct timespec ts; struct posix_msg_tree_node *new_leaf = NULL; int ret = 0; - - if (u_abs_timeout) { - int res = prepare_timeout(u_abs_timeout, &expires, &ts); - if (res) - return res; - timeout = &expires; - } + DEFINE_WAKE_Q(wake_q); if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX)) return -EINVAL; - audit_mq_sendrecv(mqdes, msg_len, msg_prio, timeout ? &ts : NULL); - - f = fdget(mqdes); - if (unlikely(!f.file)) { - ret = -EBADF; - goto out; + if (ts) { + expires = timespec64_to_ktime(*ts); + timeout = &expires; } - inode = file_inode(f.file); - if (unlikely(f.file->f_op != &mqueue_file_operations)) { - ret = -EBADF; - goto out_fput; - } + audit_mq_sendrecv(mqdes, msg_len, msg_prio, ts); + + CLASS(fd, f)(mqdes); + if (fd_empty(f)) + return -EBADF; + + inode = file_inode(fd_file(f)); + if (unlikely(fd_file(f)->f_op != &mqueue_file_operations)) + return -EBADF; info = MQUEUE_I(inode); - audit_inode(NULL, f.file->f_path.dentry, 0); + audit_file(fd_file(f)); - if (unlikely(!(f.file->f_mode & FMODE_WRITE))) { - ret = -EBADF; - goto out_fput; - } + if (unlikely(!(fd_file(f)->f_mode & FMODE_WRITE))) + return -EBADF; - if (unlikely(msg_len > info->attr.mq_msgsize)) { - ret = -EMSGSIZE; - goto out_fput; - } + if (unlikely(msg_len > info->attr.mq_msgsize)) + return -EMSGSIZE; /* First try to allocate memory, before doing anything with * existing queues. */ msg_ptr = load_msg(u_msg_ptr, msg_len); - if (IS_ERR(msg_ptr)) { - ret = PTR_ERR(msg_ptr); - goto out_fput; - } + if (IS_ERR(msg_ptr)) + return PTR_ERR(msg_ptr); msg_ptr->m_ts = msg_len; msg_ptr->m_type = msg_prio; @@ -1026,19 +1102,20 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, /* Save our speculative allocation into the cache */ INIT_LIST_HEAD(&new_leaf->msg_list); info->node_cache = new_leaf; - info->qsize += sizeof(*new_leaf); new_leaf = NULL; } else { kfree(new_leaf); } if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) { - if (f.file->f_flags & O_NONBLOCK) { + if (fd_file(f)->f_flags & O_NONBLOCK) { ret = -EAGAIN; } else { wait.task = current; wait.msg = (void *) msg_ptr; - wait.state = STATE_NONE; + + /* memory barrier not required, we hold info->lock */ + WRITE_ONCE(wait.state, STATE_NONE); ret = wq_sleep(info, SEND, timeout, &wait); /* * wq_sleep must be called with info->lock held, and @@ -1049,7 +1126,7 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, } else { receiver = wq_get_first_waiter(info, RECV); if (receiver) { - pipelined_send(info, msg_ptr, receiver); + pipelined_send(&wake_q, info, msg_ptr, receiver); } else { /* adds message to the queue */ ret = msg_insert(msg_ptr, info); @@ -1057,67 +1134,52 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, goto out_unlock; __do_notify(info); } - inode->i_atime = inode->i_mtime = inode->i_ctime = - CURRENT_TIME; + simple_inode_init_ts(inode); } out_unlock: spin_unlock(&info->lock); + wake_up_q(&wake_q); out_free: if (ret) free_msg(msg_ptr); -out_fput: - fdput(f); -out: return ret; } -SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, - size_t, msg_len, unsigned int __user *, u_msg_prio, - const struct timespec __user *, u_abs_timeout) +static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr, + size_t msg_len, unsigned int __user *u_msg_prio, + struct timespec64 *ts) { ssize_t ret; struct msg_msg *msg_ptr; - struct fd f; struct inode *inode; struct mqueue_inode_info *info; struct ext_wait_queue wait; ktime_t expires, *timeout = NULL; - struct timespec ts; struct posix_msg_tree_node *new_leaf = NULL; - if (u_abs_timeout) { - int res = prepare_timeout(u_abs_timeout, &expires, &ts); - if (res) - return res; + if (ts) { + expires = timespec64_to_ktime(*ts); timeout = &expires; } - audit_mq_sendrecv(mqdes, msg_len, 0, timeout ? &ts : NULL); + audit_mq_sendrecv(mqdes, msg_len, 0, ts); - f = fdget(mqdes); - if (unlikely(!f.file)) { - ret = -EBADF; - goto out; - } + CLASS(fd, f)(mqdes); + if (fd_empty(f)) + return -EBADF; - inode = file_inode(f.file); - if (unlikely(f.file->f_op != &mqueue_file_operations)) { - ret = -EBADF; - goto out_fput; - } + inode = file_inode(fd_file(f)); + if (unlikely(fd_file(f)->f_op != &mqueue_file_operations)) + return -EBADF; info = MQUEUE_I(inode); - audit_inode(NULL, f.file->f_path.dentry, 0); + audit_file(fd_file(f)); - if (unlikely(!(f.file->f_mode & FMODE_READ))) { - ret = -EBADF; - goto out_fput; - } + if (unlikely(!(fd_file(f)->f_mode & FMODE_READ))) + return -EBADF; /* checks if buffer is big enough */ - if (unlikely(msg_len < info->attr.mq_msgsize)) { - ret = -EMSGSIZE; - goto out_fput; - } + if (unlikely(msg_len < info->attr.mq_msgsize)) + return -EMSGSIZE; /* * msg_insert really wants us to have a valid, spare node struct so @@ -1133,30 +1195,33 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, /* Save our speculative allocation into the cache */ INIT_LIST_HEAD(&new_leaf->msg_list); info->node_cache = new_leaf; - info->qsize += sizeof(*new_leaf); } else { kfree(new_leaf); } if (info->attr.mq_curmsgs == 0) { - if (f.file->f_flags & O_NONBLOCK) { + if (fd_file(f)->f_flags & O_NONBLOCK) { spin_unlock(&info->lock); ret = -EAGAIN; } else { wait.task = current; - wait.state = STATE_NONE; + + /* memory barrier not required, we hold info->lock */ + WRITE_ONCE(wait.state, STATE_NONE); ret = wq_sleep(info, RECV, timeout, &wait); msg_ptr = wait.msg; } } else { + DEFINE_WAKE_Q(wake_q); + msg_ptr = msg_get(info); - inode->i_atime = inode->i_mtime = inode->i_ctime = - CURRENT_TIME; + simple_inode_init_ts(inode); /* There is now free space in queue. */ - pipelined_receive(info); + pipelined_receive(&wake_q, info); spin_unlock(&info->lock); + wake_up_q(&wake_q); ret = 0; } if (ret == 0) { @@ -1168,116 +1233,122 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, } free_msg(msg_ptr); } -out_fput: - fdput(f); -out: return ret; } +SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, + size_t, msg_len, unsigned int, msg_prio, + const struct __kernel_timespec __user *, u_abs_timeout) +{ + struct timespec64 ts, *p = NULL; + if (u_abs_timeout) { + int res = prepare_timeout(u_abs_timeout, &ts); + if (res) + return res; + p = &ts; + } + return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p); +} + +SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, + size_t, msg_len, unsigned int __user *, u_msg_prio, + const struct __kernel_timespec __user *, u_abs_timeout) +{ + struct timespec64 ts, *p = NULL; + if (u_abs_timeout) { + int res = prepare_timeout(u_abs_timeout, &ts); + if (res) + return res; + p = &ts; + } + return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p); +} + /* * Notes: the case when user wants us to deregister (with NULL as pointer) * and he isn't currently owner of notification, will be silently discarded. * It isn't explicitly defined in the POSIX. */ -SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, - const struct sigevent __user *, u_notification) +static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification) { int ret; - struct fd f; struct sock *sock; struct inode *inode; - struct sigevent notification; struct mqueue_inode_info *info; struct sk_buff *nc; - if (u_notification) { - if (copy_from_user(¬ification, u_notification, - sizeof(struct sigevent))) - return -EFAULT; - } - - audit_mq_notify(mqdes, u_notification ? ¬ification : NULL); + audit_mq_notify(mqdes, notification); nc = NULL; sock = NULL; - if (u_notification != NULL) { - if (unlikely(notification.sigev_notify != SIGEV_NONE && - notification.sigev_notify != SIGEV_SIGNAL && - notification.sigev_notify != SIGEV_THREAD)) + if (notification != NULL) { + if (unlikely(notification->sigev_notify != SIGEV_NONE && + notification->sigev_notify != SIGEV_SIGNAL && + notification->sigev_notify != SIGEV_THREAD)) return -EINVAL; - if (notification.sigev_notify == SIGEV_SIGNAL && - !valid_signal(notification.sigev_signo)) { + if (notification->sigev_notify == SIGEV_SIGNAL && + !valid_signal(notification->sigev_signo)) { return -EINVAL; } - if (notification.sigev_notify == SIGEV_THREAD) { + if (notification->sigev_notify == SIGEV_THREAD) { long timeo; /* create the notify skb */ nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL); - if (!nc) { - ret = -ENOMEM; - goto out; - } + if (!nc) + return -ENOMEM; + if (copy_from_user(nc->data, - notification.sigev_value.sival_ptr, + notification->sigev_value.sival_ptr, NOTIFY_COOKIE_LEN)) { - ret = -EFAULT; - goto out; + kfree_skb(nc); + return -EFAULT; } /* TODO: add a header? */ skb_put(nc, NOTIFY_COOKIE_LEN); /* and attach it to the socket */ retry: - f = fdget(notification.sigev_signo); - if (!f.file) { - ret = -EBADF; - goto out; - } - sock = netlink_getsockbyfilp(f.file); - fdput(f); + sock = netlink_getsockbyfd(notification->sigev_signo); if (IS_ERR(sock)) { - ret = PTR_ERR(sock); - sock = NULL; - goto out; + kfree_skb(nc); + return PTR_ERR(sock); } timeo = MAX_SCHEDULE_TIMEOUT; ret = netlink_attachskb(sock, nc, &timeo, NULL); if (ret == 1) goto retry; - if (ret) { - sock = NULL; - nc = NULL; - goto out; - } + if (ret) + return ret; } } - f = fdget(mqdes); - if (!f.file) { + CLASS(fd, f)(mqdes); + if (fd_empty(f)) { ret = -EBADF; goto out; } - inode = file_inode(f.file); - if (unlikely(f.file->f_op != &mqueue_file_operations)) { + inode = file_inode(fd_file(f)); + if (unlikely(fd_file(f)->f_op != &mqueue_file_operations)) { ret = -EBADF; - goto out_fput; + goto out; } info = MQUEUE_I(inode); ret = 0; spin_lock(&info->lock); - if (u_notification == NULL) { + if (notification == NULL) { if (info->notify_owner == task_tgid(current)) { remove_notification(info); - inode->i_atime = inode->i_ctime = CURRENT_TIME; + inode_set_atime_to_ts(inode, + inode_set_ctime_current(inode)); } } else if (info->notify_owner != NULL) { ret = -EBUSY; } else { - switch (notification.sigev_notify) { + switch (notification->sigev_notify) { case SIGEV_NONE: info->notify.sigev_notify = SIGEV_NONE; break; @@ -1289,87 +1360,237 @@ retry: info->notify.sigev_notify = SIGEV_THREAD; break; case SIGEV_SIGNAL: - info->notify.sigev_signo = notification.sigev_signo; - info->notify.sigev_value = notification.sigev_value; + info->notify.sigev_signo = notification->sigev_signo; + info->notify.sigev_value = notification->sigev_value; info->notify.sigev_notify = SIGEV_SIGNAL; + info->notify_self_exec_id = current->self_exec_id; break; } info->notify_owner = get_pid(task_tgid(current)); info->notify_user_ns = get_user_ns(current_user_ns()); - inode->i_atime = inode->i_ctime = CURRENT_TIME; + inode_set_atime_to_ts(inode, inode_set_ctime_current(inode)); } spin_unlock(&info->lock); -out_fput: - fdput(f); out: - if (sock) { + if (sock) netlink_detachskb(sock, nc); - } else if (nc) { - dev_kfree_skb(nc); - } return ret; } +SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, + const struct sigevent __user *, u_notification) +{ + struct sigevent n, *p = NULL; + if (u_notification) { + if (copy_from_user(&n, u_notification, sizeof(struct sigevent))) + return -EFAULT; + p = &n; + } + return do_mq_notify(mqdes, p); +} + +static int do_mq_getsetattr(int mqdes, struct mq_attr *new, struct mq_attr *old) +{ + struct inode *inode; + struct mqueue_inode_info *info; + + if (new && (new->mq_flags & (~O_NONBLOCK))) + return -EINVAL; + + CLASS(fd, f)(mqdes); + if (fd_empty(f)) + return -EBADF; + + if (unlikely(fd_file(f)->f_op != &mqueue_file_operations)) + return -EBADF; + + inode = file_inode(fd_file(f)); + info = MQUEUE_I(inode); + + spin_lock(&info->lock); + + if (old) { + *old = info->attr; + old->mq_flags = fd_file(f)->f_flags & O_NONBLOCK; + } + if (new) { + audit_mq_getsetattr(mqdes, new); + spin_lock(&fd_file(f)->f_lock); + if (new->mq_flags & O_NONBLOCK) + fd_file(f)->f_flags |= O_NONBLOCK; + else + fd_file(f)->f_flags &= ~O_NONBLOCK; + spin_unlock(&fd_file(f)->f_lock); + + inode_set_atime_to_ts(inode, inode_set_ctime_current(inode)); + } + + spin_unlock(&info->lock); + return 0; +} + SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, const struct mq_attr __user *, u_mqstat, struct mq_attr __user *, u_omqstat) { int ret; struct mq_attr mqstat, omqstat; - struct fd f; - struct inode *inode; - struct mqueue_inode_info *info; + struct mq_attr *new = NULL, *old = NULL; - if (u_mqstat != NULL) { - if (copy_from_user(&mqstat, u_mqstat, sizeof(struct mq_attr))) + if (u_mqstat) { + new = &mqstat; + if (copy_from_user(new, u_mqstat, sizeof(struct mq_attr))) return -EFAULT; - if (mqstat.mq_flags & (~O_NONBLOCK)) - return -EINVAL; } + if (u_omqstat) + old = &omqstat; - f = fdget(mqdes); - if (!f.file) { - ret = -EBADF; - goto out; + ret = do_mq_getsetattr(mqdes, new, old); + if (ret || !old) + return ret; + + if (copy_to_user(u_omqstat, old, sizeof(struct mq_attr))) + return -EFAULT; + return 0; +} + +#ifdef CONFIG_COMPAT + +struct compat_mq_attr { + compat_long_t mq_flags; /* message queue flags */ + compat_long_t mq_maxmsg; /* maximum number of messages */ + compat_long_t mq_msgsize; /* maximum message size */ + compat_long_t mq_curmsgs; /* number of messages currently queued */ + compat_long_t __reserved[4]; /* ignored for input, zeroed for output */ +}; + +static inline int get_compat_mq_attr(struct mq_attr *attr, + const struct compat_mq_attr __user *uattr) +{ + struct compat_mq_attr v; + + if (copy_from_user(&v, uattr, sizeof(*uattr))) + return -EFAULT; + + memset(attr, 0, sizeof(*attr)); + attr->mq_flags = v.mq_flags; + attr->mq_maxmsg = v.mq_maxmsg; + attr->mq_msgsize = v.mq_msgsize; + attr->mq_curmsgs = v.mq_curmsgs; + return 0; +} + +static inline int put_compat_mq_attr(const struct mq_attr *attr, + struct compat_mq_attr __user *uattr) +{ + struct compat_mq_attr v; + + memset(&v, 0, sizeof(v)); + v.mq_flags = attr->mq_flags; + v.mq_maxmsg = attr->mq_maxmsg; + v.mq_msgsize = attr->mq_msgsize; + v.mq_curmsgs = attr->mq_curmsgs; + if (copy_to_user(uattr, &v, sizeof(*uattr))) + return -EFAULT; + return 0; +} + +COMPAT_SYSCALL_DEFINE4(mq_open, const char __user *, u_name, + int, oflag, compat_mode_t, mode, + struct compat_mq_attr __user *, u_attr) +{ + struct mq_attr attr, *p = NULL; + if (u_attr && oflag & O_CREAT) { + p = &attr; + if (get_compat_mq_attr(&attr, u_attr)) + return -EFAULT; } + return do_mq_open(u_name, oflag, mode, p); +} - inode = file_inode(f.file); - if (unlikely(f.file->f_op != &mqueue_file_operations)) { - ret = -EBADF; - goto out_fput; +COMPAT_SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, + const struct compat_sigevent __user *, u_notification) +{ + struct sigevent n, *p = NULL; + if (u_notification) { + if (get_compat_sigevent(&n, u_notification)) + return -EFAULT; + if (n.sigev_notify == SIGEV_THREAD) + n.sigev_value.sival_ptr = compat_ptr(n.sigev_value.sival_int); + p = &n; } - info = MQUEUE_I(inode); + return do_mq_notify(mqdes, p); +} - spin_lock(&info->lock); +COMPAT_SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, + const struct compat_mq_attr __user *, u_mqstat, + struct compat_mq_attr __user *, u_omqstat) +{ + int ret; + struct mq_attr mqstat, omqstat; + struct mq_attr *new = NULL, *old = NULL; - omqstat = info->attr; - omqstat.mq_flags = f.file->f_flags & O_NONBLOCK; if (u_mqstat) { - audit_mq_getsetattr(mqdes, &mqstat); - spin_lock(&f.file->f_lock); - if (mqstat.mq_flags & O_NONBLOCK) - f.file->f_flags |= O_NONBLOCK; - else - f.file->f_flags &= ~O_NONBLOCK; - spin_unlock(&f.file->f_lock); - - inode->i_atime = inode->i_ctime = CURRENT_TIME; + new = &mqstat; + if (get_compat_mq_attr(new, u_mqstat)) + return -EFAULT; } + if (u_omqstat) + old = &omqstat; - spin_unlock(&info->lock); + ret = do_mq_getsetattr(mqdes, new, old); + if (ret || !old) + return ret; - ret = 0; - if (u_omqstat != NULL && copy_to_user(u_omqstat, &omqstat, - sizeof(struct mq_attr))) - ret = -EFAULT; + if (put_compat_mq_attr(old, u_omqstat)) + return -EFAULT; + return 0; +} +#endif -out_fput: - fdput(f); -out: - return ret; +#ifdef CONFIG_COMPAT_32BIT_TIME +static int compat_prepare_timeout(const struct old_timespec32 __user *p, + struct timespec64 *ts) +{ + if (get_old_timespec32(ts, p)) + return -EFAULT; + if (!timespec64_valid(ts)) + return -EINVAL; + return 0; } +SYSCALL_DEFINE5(mq_timedsend_time32, mqd_t, mqdes, + const char __user *, u_msg_ptr, + unsigned int, msg_len, unsigned int, msg_prio, + const struct old_timespec32 __user *, u_abs_timeout) +{ + struct timespec64 ts, *p = NULL; + if (u_abs_timeout) { + int res = compat_prepare_timeout(u_abs_timeout, &ts); + if (res) + return res; + p = &ts; + } + return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p); +} + +SYSCALL_DEFINE5(mq_timedreceive_time32, mqd_t, mqdes, + char __user *, u_msg_ptr, + unsigned int, msg_len, unsigned int __user *, u_msg_prio, + const struct old_timespec32 __user *, u_abs_timeout) +{ + struct timespec64 ts, *p = NULL; + if (u_abs_timeout) { + int res = compat_prepare_timeout(u_abs_timeout, &ts); + if (res) + return res; + p = &ts; + } + return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p); +} +#endif + static const struct inode_operations mqueue_dir_inode_operations = { .lookup = simple_lookup, .create = mqueue_create, @@ -1385,20 +1606,27 @@ static const struct file_operations mqueue_file_operations = { static const struct super_operations mqueue_super_ops = { .alloc_inode = mqueue_alloc_inode, - .destroy_inode = mqueue_destroy_inode, + .free_inode = mqueue_free_inode, .evict_inode = mqueue_evict_inode, .statfs = simple_statfs, }; +static const struct fs_context_operations mqueue_fs_context_ops = { + .free = mqueue_fs_context_free, + .get_tree = mqueue_get_tree, +}; + static struct file_system_type mqueue_fs_type = { - .name = "mqueue", - .mount = mqueue_mount, - .kill_sb = kill_litter_super, - .fs_flags = FS_USERNS_MOUNT, + .name = "mqueue", + .init_fs_context = mqueue_init_fs_context, + .kill_sb = kill_anon_super, + .fs_flags = FS_USERNS_MOUNT, }; int mq_init_ns(struct ipc_namespace *ns) { + struct vfsmount *m; + ns->mq_queues_count = 0; ns->mq_queues_max = DFLT_QUEUESMAX; ns->mq_msg_max = DFLT_MSGMAX; @@ -1406,12 +1634,10 @@ int mq_init_ns(struct ipc_namespace *ns) ns->mq_msg_default = DFLT_MSG; ns->mq_msgsize_default = DFLT_MSGSIZE; - ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns); - if (IS_ERR(ns->mq_mnt)) { - int err = PTR_ERR(ns->mq_mnt); - ns->mq_mnt = NULL; - return err; - } + m = mq_create_mount(ns); + if (IS_ERR(m)) + return PTR_ERR(m); + ns->mq_mnt = m; return 0; } @@ -1420,23 +1646,21 @@ void mq_clear_sbinfo(struct ipc_namespace *ns) ns->mq_mnt->mnt_sb->s_fs_info = NULL; } -void mq_put_mnt(struct ipc_namespace *ns) -{ - kern_unmount(ns->mq_mnt); -} - static int __init init_mqueue_fs(void) { int error; mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache", sizeof(struct mqueue_inode_info), 0, - SLAB_HWCACHE_ALIGN, init_once); + SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, init_once); if (mqueue_inode_cachep == NULL) return -ENOMEM; - /* ignore failures - they are not fatal */ - mq_sysctl_table = mq_register_sysctl_table(); + if (!setup_mq_sysctls(&init_ipc_ns)) { + pr_warn("sysctl registration failed\n"); + error = -ENOMEM; + goto out_kmem; + } error = register_filesystem(&mqueue_fs_type); if (error) @@ -1453,10 +1677,10 @@ static int __init init_mqueue_fs(void) out_filesystem: unregister_filesystem(&mqueue_fs_type); out_sysctl: - if (mq_sysctl_table) - unregister_sysctl_table(mq_sysctl_table); + retire_mq_sysctls(&init_ipc_ns); +out_kmem: kmem_cache_destroy(mqueue_inode_cachep); return error; } -__initcall(init_mqueue_fs); +device_initcall(init_mqueue_fs); diff --git a/ipc/msg.c b/ipc/msg.c index bd60d7e159e8..ee6af4fe52bf 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * linux/ipc/msg.c * Copyright (C) 1992 Krishna Balasubramanian @@ -30,21 +31,48 @@ #include <linux/proc_fs.h> #include <linux/list.h> #include <linux/security.h> -#include <linux/sched.h> +#include <linux/sched/wake_q.h> #include <linux/syscalls.h> #include <linux/audit.h> #include <linux/seq_file.h> #include <linux/rwsem.h> #include <linux/nsproxy.h> #include <linux/ipc_namespace.h> +#include <linux/rhashtable.h> +#include <linux/percpu_counter.h> #include <asm/current.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include "util.h" +/* one msq_queue structure for each present queue on the system */ +struct msg_queue { + struct kern_ipc_perm q_perm; + time64_t q_stime; /* last msgsnd time */ + time64_t q_rtime; /* last msgrcv time */ + time64_t q_ctime; /* last change time */ + unsigned long q_cbytes; /* current number of bytes on queue */ + unsigned long q_qnum; /* number of messages in queue */ + unsigned long q_qbytes; /* max number of bytes on queue */ + struct pid *q_lspid; /* pid of last msgsnd */ + struct pid *q_lrpid; /* last receive pid */ + + struct list_head q_messages; + struct list_head q_receivers; + struct list_head q_senders; +} __randomize_layout; + /* - * one msg_receiver structure for each sleeping receiver: + * MSG_BARRIER Locking: + * + * Similar to the optimization used in ipc/mqueue.c, one syscall return path + * does not acquire any locks when it sees that a message exists in + * msg_receiver.r_msg. Therefore r_msg is set using smp_store_release() + * and accessed using READ_ONCE()+smp_acquire__after_ctrl_dep(). In addition, + * wake_q_add_safe() is used. See ipc/mqueue.c for more details */ + +/* one msg_receiver structure for each sleeping receiver */ struct msg_receiver { struct list_head r_list; struct task_struct *r_tsk; @@ -53,13 +81,14 @@ struct msg_receiver { long r_msgtype; long r_maxsize; - struct msg_msg *volatile r_msg; + struct msg_msg *r_msg; }; /* one msg_sender for each sleeping sender */ struct msg_sender { struct list_head list; struct task_struct *tsk; + size_t msgsz; }; #define SEARCH_ANY 1 @@ -70,80 +99,9 @@ struct msg_sender { #define msg_ids(ns) ((ns)->ids[IPC_MSG_IDS]) -#define msg_unlock(msq) ipc_unlock(&(msq)->q_perm) - -static void freeque(struct ipc_namespace *, struct kern_ipc_perm *); -static int newque(struct ipc_namespace *, struct ipc_params *); -#ifdef CONFIG_PROC_FS -static int sysvipc_msg_proc_show(struct seq_file *s, void *it); -#endif - -/* - * Scale msgmni with the available lowmem size: the memory dedicated to msg - * queues should occupy at most 1/MSG_MEM_SCALE of lowmem. - * Also take into account the number of nsproxies created so far. - * This should be done staying within the (MSGMNI , IPCMNI/nr_ipc_ns) range. - */ -void recompute_msgmni(struct ipc_namespace *ns) -{ - struct sysinfo i; - unsigned long allowed; - int nb_ns; - - si_meminfo(&i); - allowed = (((i.totalram - i.totalhigh) / MSG_MEM_SCALE) * i.mem_unit) - / MSGMNB; - nb_ns = atomic_read(&nr_ipc_ns); - allowed /= nb_ns; - - if (allowed < MSGMNI) { - ns->msg_ctlmni = MSGMNI; - return; - } - - if (allowed > IPCMNI / nb_ns) { - ns->msg_ctlmni = IPCMNI / nb_ns; - return; - } - - ns->msg_ctlmni = allowed; -} - -void msg_init_ns(struct ipc_namespace *ns) -{ - ns->msg_ctlmax = MSGMAX; - ns->msg_ctlmnb = MSGMNB; - - recompute_msgmni(ns); - - atomic_set(&ns->msg_bytes, 0); - atomic_set(&ns->msg_hdrs, 0); - ipc_init_ids(&ns->ids[IPC_MSG_IDS]); -} - -#ifdef CONFIG_IPC_NS -void msg_exit_ns(struct ipc_namespace *ns) -{ - free_ipcs(ns, &msg_ids(ns), freeque); - idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr); -} -#endif - -void __init msg_init(void) -{ - msg_init_ns(&init_ipc_ns); - - printk(KERN_INFO "msgmni has been set to %d\n", - init_ipc_ns.msg_ctlmni); - - ipc_init_proc_interface("sysvipc/msg", - " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n", - IPC_MSG_IDS, sysvipc_msg_proc_show); -} - static inline struct msg_queue *msq_obtain_object(struct ipc_namespace *ns, int id) { - struct kern_ipc_perm *ipcp = ipc_obtain_object(&msg_ids(ns), id); + struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&msg_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); @@ -167,90 +125,140 @@ static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s) ipc_rmid(&msg_ids(ns), &s->q_perm); } +static void msg_rcu_free(struct rcu_head *head) +{ + struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu); + struct msg_queue *msq = container_of(p, struct msg_queue, q_perm); + + security_msg_queue_free(&msq->q_perm); + kfree(msq); +} + /** * newque - Create a new msg queue * @ns: namespace * @params: ptr to the structure that contains the key and msgflg * - * Called with msg_ids.rw_mutex held (writer) + * Called with msg_ids.rwsem held (writer) */ static int newque(struct ipc_namespace *ns, struct ipc_params *params) { struct msg_queue *msq; - int id, retval; + int retval; key_t key = params->key; int msgflg = params->flg; - msq = ipc_rcu_alloc(sizeof(*msq)); - if (!msq) + msq = kmalloc(sizeof(*msq), GFP_KERNEL_ACCOUNT); + if (unlikely(!msq)) return -ENOMEM; msq->q_perm.mode = msgflg & S_IRWXUGO; msq->q_perm.key = key; msq->q_perm.security = NULL; - retval = security_msg_queue_alloc(msq); + retval = security_msg_queue_alloc(&msq->q_perm); if (retval) { - ipc_rcu_putref(msq); + kfree(msq); return retval; } - /* ipc_addid() locks msq upon success. */ - id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); - if (id < 0) { - security_msg_queue_free(msq); - ipc_rcu_putref(msq); - return id; - } - msq->q_stime = msq->q_rtime = 0; - msq->q_ctime = get_seconds(); + msq->q_ctime = ktime_get_real_seconds(); msq->q_cbytes = msq->q_qnum = 0; msq->q_qbytes = ns->msg_ctlmnb; - msq->q_lspid = msq->q_lrpid = 0; + msq->q_lspid = msq->q_lrpid = NULL; INIT_LIST_HEAD(&msq->q_messages); INIT_LIST_HEAD(&msq->q_receivers); INIT_LIST_HEAD(&msq->q_senders); + /* ipc_addid() locks msq upon success. */ + retval = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); + if (retval < 0) { + ipc_rcu_putref(&msq->q_perm, msg_rcu_free); + return retval; + } + ipc_unlock_object(&msq->q_perm); rcu_read_unlock(); return msq->q_perm.id; } -static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss) +static inline bool msg_fits_inqueue(struct msg_queue *msq, size_t msgsz) +{ + return msgsz + msq->q_cbytes <= msq->q_qbytes && + 1 + msq->q_qnum <= msq->q_qbytes; +} + +static inline void ss_add(struct msg_queue *msq, + struct msg_sender *mss, size_t msgsz) { mss->tsk = current; - current->state = TASK_INTERRUPTIBLE; + mss->msgsz = msgsz; + /* + * No memory barrier required: we did ipc_lock_object(), + * and the waker obtains that lock before calling wake_q_add(). + */ + __set_current_state(TASK_INTERRUPTIBLE); list_add_tail(&mss->list, &msq->q_senders); } static inline void ss_del(struct msg_sender *mss) { - if (mss->list.next != NULL) + if (mss->list.next) list_del(&mss->list); } -static void ss_wakeup(struct list_head *h, int kill) +static void ss_wakeup(struct msg_queue *msq, + struct wake_q_head *wake_q, bool kill) { struct msg_sender *mss, *t; + struct task_struct *stop_tsk = NULL; + struct list_head *h = &msq->q_senders; list_for_each_entry_safe(mss, t, h, list) { if (kill) mss->list.next = NULL; - wake_up_process(mss->tsk); + + /* + * Stop at the first task we don't wakeup, + * we've already iterated the original + * sender queue. + */ + else if (stop_tsk == mss->tsk) + break; + /* + * We are not in an EIDRM scenario here, therefore + * verify that we really need to wakeup the task. + * To maintain current semantics and wakeup order, + * move the sender to the tail on behalf of the + * blocked task. + */ + else if (!msg_fits_inqueue(msq, mss->msgsz)) { + if (!stop_tsk) + stop_tsk = mss->tsk; + + list_move_tail(&mss->list, &msq->q_senders); + continue; + } + + wake_q_add(wake_q, mss->tsk); } } -static void expunge_all(struct msg_queue *msq, int res) +static void expunge_all(struct msg_queue *msq, int res, + struct wake_q_head *wake_q) { struct msg_receiver *msr, *t; list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) { - msr->r_msg = NULL; - wake_up_process(msr->r_tsk); - smp_mb(); - msr->r_msg = ERR_PTR(res); + struct task_struct *r_tsk; + + r_tsk = get_task_struct(msr->r_tsk); + + /* see MSG_BARRIER for purpose/pairing */ + smp_store_release(&msr->r_msg, ERR_PTR(res)); + wake_q_add_safe(wake_q, r_tsk); } } @@ -259,60 +267,60 @@ static void expunge_all(struct msg_queue *msq, int res) * removes the message queue from message queue ID IDR, and cleans up all the * messages associated with this queue. * - * msg_ids.rw_mutex (writer) and the spinlock for this message queue are held - * before freeque() is called. msg_ids.rw_mutex remains locked on exit. + * msg_ids.rwsem (writer) and the spinlock for this message queue are held + * before freeque() is called. msg_ids.rwsem remains locked on exit. */ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) + __releases(RCU) + __releases(&msq->q_perm) { struct msg_msg *msg, *t; struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm); + DEFINE_WAKE_Q(wake_q); - expunge_all(msq, -EIDRM); - ss_wakeup(&msq->q_senders, 1); + expunge_all(msq, -EIDRM, &wake_q); + ss_wakeup(msq, &wake_q, true); msg_rmid(ns, msq); - msg_unlock(msq); + ipc_unlock_object(&msq->q_perm); + wake_up_q(&wake_q); + rcu_read_unlock(); list_for_each_entry_safe(msg, t, &msq->q_messages, m_list) { - atomic_dec(&ns->msg_hdrs); + percpu_counter_sub_local(&ns->percpu_msg_hdrs, 1); free_msg(msg); } - atomic_sub(msq->q_cbytes, &ns->msg_bytes); - security_msg_queue_free(msq); - ipc_rcu_putref(msq); + percpu_counter_sub_local(&ns->percpu_msg_bytes, msq->q_cbytes); + ipc_update_pid(&msq->q_lspid, NULL); + ipc_update_pid(&msq->q_lrpid, NULL); + ipc_rcu_putref(&msq->q_perm, msg_rcu_free); } -/* - * Called with msg_ids.rw_mutex and ipcp locked. - */ -static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg) -{ - struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm); - - return security_msg_queue_associate(msq, msgflg); -} - -SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg) +long ksys_msgget(key_t key, int msgflg) { struct ipc_namespace *ns; - struct ipc_ops msg_ops; + static const struct ipc_ops msg_ops = { + .getnew = newque, + .associate = security_msg_queue_associate, + }; struct ipc_params msg_params; ns = current->nsproxy->ipc_ns; - msg_ops.getnew = newque; - msg_ops.associate = msg_security; - msg_ops.more_checks = NULL; - msg_params.key = key; msg_params.flg = msgflg; return ipcget(ns, &msg_ids(ns), &msg_ops, &msg_params); } +SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg) +{ + return ksys_msgget(key, msgflg); +} + static inline unsigned long copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version) { - switch(version) { + switch (version) { case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: @@ -357,7 +365,7 @@ copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version) static inline unsigned long copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version) { - switch(version) { + switch (version) { case IPC_64: if (copy_from_user(out, buf, sizeof(*out))) return -EFAULT; @@ -369,9 +377,9 @@ copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version) if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) return -EFAULT; - out->msg_perm.uid = tbuf_old.msg_perm.uid; - out->msg_perm.gid = tbuf_old.msg_perm.gid; - out->msg_perm.mode = tbuf_old.msg_perm.mode; + out->msg_perm.uid = tbuf_old.msg_perm.uid; + out->msg_perm.gid = tbuf_old.msg_perm.gid; + out->msg_perm.mode = tbuf_old.msg_perm.mode; if (tbuf_old.msg_qbytes == 0) out->msg_qbytes = tbuf_old.msg_lqbytes; @@ -386,28 +394,22 @@ copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version) } /* - * This function handles some msgctl commands which require the rw_mutex + * This function handles some msgctl commands which require the rwsem * to be held in write mode. - * NOTE: no locks must be held, the rw_mutex is taken inside this function. + * NOTE: no locks must be held, the rwsem is taken inside this function. */ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd, - struct msqid_ds __user *buf, int version) + struct ipc64_perm *perm, int msg_qbytes) { struct kern_ipc_perm *ipcp; - struct msqid64_ds uninitialized_var(msqid64); struct msg_queue *msq; int err; - if (cmd == IPC_SET) { - if (copy_msqid_from_user(&msqid64, buf, version)) - return -EFAULT; - } - - down_write(&msg_ids(ns).rw_mutex); + down_write(&msg_ids(ns).rwsem); rcu_read_lock(); - ipcp = ipcctl_pre_down_nolock(ns, &msg_ids(ns), msqid, cmd, - &msqid64.msg_perm, msqid64.msg_qbytes); + ipcp = ipcctl_obtain_check(ns, &msg_ids(ns), msqid, cmd, + perm, msg_qbytes); if (IS_ERR(ipcp)) { err = PTR_ERR(ipcp); goto out_unlock1; @@ -415,7 +417,7 @@ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd, msq = container_of(ipcp, struct msg_queue, q_perm); - err = security_msg_queue_msgctl(msq, cmd); + err = security_msg_queue_msgctl(&msq->q_perm, cmd); if (err) goto out_unlock1; @@ -426,29 +428,38 @@ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd, freeque(ns, ipcp); goto out_up; case IPC_SET: - if (msqid64.msg_qbytes > ns->msg_ctlmnb && + { + DEFINE_WAKE_Q(wake_q); + + if (msg_qbytes > ns->msg_ctlmnb && !capable(CAP_SYS_RESOURCE)) { err = -EPERM; goto out_unlock1; } ipc_lock_object(&msq->q_perm); - err = ipc_update_perm(&msqid64.msg_perm, ipcp); + err = ipc_update_perm(perm, ipcp); if (err) goto out_unlock0; - msq->q_qbytes = msqid64.msg_qbytes; + msq->q_qbytes = msg_qbytes; - msq->q_ctime = get_seconds(); - /* sleeping receivers might be excluded by + msq->q_ctime = ktime_get_real_seconds(); + /* + * Sleeping receivers might be excluded by * stricter permissions. */ - expunge_all(msq, -EAGAIN); - /* sleeping senders might be able to send + expunge_all(msq, -EAGAIN, &wake_q); + /* + * Sleeping senders might be able to send * due to a larger queue size. */ - ss_wakeup(&msq->q_senders, 0); - break; + ss_wakeup(msq, &wake_q, false); + ipc_unlock_object(&msq->q_perm); + wake_up_q(&wake_q); + + goto out_unlock1; + } default: err = -EINVAL; goto out_unlock1; @@ -459,205 +470,389 @@ out_unlock0: out_unlock1: rcu_read_unlock(); out_up: - up_write(&msg_ids(ns).rw_mutex); + up_write(&msg_ids(ns).rwsem); return err; } -static int msgctl_nolock(struct ipc_namespace *ns, int msqid, - int cmd, int version, void __user *buf) +static int msgctl_info(struct ipc_namespace *ns, int msqid, + int cmd, struct msginfo *msginfo) { int err; - struct msg_queue *msq; - - switch (cmd) { - case IPC_INFO: - case MSG_INFO: - { - struct msginfo msginfo; - int max_id; + int max_idx; - if (!buf) - return -EFAULT; - - /* - * We must not return kernel stack data. - * due to padding, it's not enough - * to set all member fields. - */ - err = security_msg_queue_msgctl(NULL, cmd); - if (err) - return err; - - memset(&msginfo, 0, sizeof(msginfo)); - msginfo.msgmni = ns->msg_ctlmni; - msginfo.msgmax = ns->msg_ctlmax; - msginfo.msgmnb = ns->msg_ctlmnb; - msginfo.msgssz = MSGSSZ; - msginfo.msgseg = MSGSEG; - down_read(&msg_ids(ns).rw_mutex); - if (cmd == MSG_INFO) { - msginfo.msgpool = msg_ids(ns).in_use; - msginfo.msgmap = atomic_read(&ns->msg_hdrs); - msginfo.msgtql = atomic_read(&ns->msg_bytes); - } else { - msginfo.msgmap = MSGMAP; - msginfo.msgpool = MSGPOOL; - msginfo.msgtql = MSGTQL; - } - max_id = ipc_get_maxid(&msg_ids(ns)); - up_read(&msg_ids(ns).rw_mutex); - if (copy_to_user(buf, &msginfo, sizeof(struct msginfo))) - return -EFAULT; - return (max_id < 0) ? 0 : max_id; + /* + * We must not return kernel stack data. + * due to padding, it's not enough + * to set all member fields. + */ + err = security_msg_queue_msgctl(NULL, cmd); + if (err) + return err; + + memset(msginfo, 0, sizeof(*msginfo)); + msginfo->msgmni = ns->msg_ctlmni; + msginfo->msgmax = ns->msg_ctlmax; + msginfo->msgmnb = ns->msg_ctlmnb; + msginfo->msgssz = MSGSSZ; + msginfo->msgseg = MSGSEG; + down_read(&msg_ids(ns).rwsem); + if (cmd == MSG_INFO) + msginfo->msgpool = msg_ids(ns).in_use; + max_idx = ipc_get_maxidx(&msg_ids(ns)); + up_read(&msg_ids(ns).rwsem); + if (cmd == MSG_INFO) { + msginfo->msgmap = min_t(int, + percpu_counter_sum(&ns->percpu_msg_hdrs), + INT_MAX); + msginfo->msgtql = min_t(int, + percpu_counter_sum(&ns->percpu_msg_bytes), + INT_MAX); + } else { + msginfo->msgmap = MSGMAP; + msginfo->msgpool = MSGPOOL; + msginfo->msgtql = MSGTQL; } + return (max_idx < 0) ? 0 : max_idx; +} - case MSG_STAT: - case IPC_STAT: - { - struct msqid64_ds tbuf; - int success_return; - - if (!buf) - return -EFAULT; +static int msgctl_stat(struct ipc_namespace *ns, int msqid, + int cmd, struct msqid64_ds *p) +{ + struct msg_queue *msq; + int err; - memset(&tbuf, 0, sizeof(tbuf)); + memset(p, 0, sizeof(*p)); - rcu_read_lock(); - if (cmd == MSG_STAT) { - msq = msq_obtain_object(ns, msqid); - if (IS_ERR(msq)) { - err = PTR_ERR(msq); - goto out_unlock; - } - success_return = msq->q_perm.id; - } else { - msq = msq_obtain_object_check(ns, msqid); - if (IS_ERR(msq)) { - err = PTR_ERR(msq); - goto out_unlock; - } - success_return = 0; + rcu_read_lock(); + if (cmd == MSG_STAT || cmd == MSG_STAT_ANY) { + msq = msq_obtain_object(ns, msqid); + if (IS_ERR(msq)) { + err = PTR_ERR(msq); + goto out_unlock; + } + } else { /* IPC_STAT */ + msq = msq_obtain_object_check(ns, msqid); + if (IS_ERR(msq)) { + err = PTR_ERR(msq); + goto out_unlock; } + } + /* see comment for SHM_STAT_ANY */ + if (cmd == MSG_STAT_ANY) + audit_ipc_obj(&msq->q_perm); + else { err = -EACCES; if (ipcperms(ns, &msq->q_perm, S_IRUGO)) goto out_unlock; + } - err = security_msg_queue_msgctl(msq, cmd); - if (err) - goto out_unlock; + err = security_msg_queue_msgctl(&msq->q_perm, cmd); + if (err) + goto out_unlock; - kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm); - tbuf.msg_stime = msq->q_stime; - tbuf.msg_rtime = msq->q_rtime; - tbuf.msg_ctime = msq->q_ctime; - tbuf.msg_cbytes = msq->q_cbytes; - tbuf.msg_qnum = msq->q_qnum; - tbuf.msg_qbytes = msq->q_qbytes; - tbuf.msg_lspid = msq->q_lspid; - tbuf.msg_lrpid = msq->q_lrpid; - rcu_read_unlock(); + ipc_lock_object(&msq->q_perm); - if (copy_msqid_to_user(buf, &tbuf, version)) - return -EFAULT; - return success_return; + if (!ipc_valid_object(&msq->q_perm)) { + ipc_unlock_object(&msq->q_perm); + err = -EIDRM; + goto out_unlock; } - default: - return -EINVAL; + kernel_to_ipc64_perm(&msq->q_perm, &p->msg_perm); + p->msg_stime = msq->q_stime; + p->msg_rtime = msq->q_rtime; + p->msg_ctime = msq->q_ctime; +#ifndef CONFIG_64BIT + p->msg_stime_high = msq->q_stime >> 32; + p->msg_rtime_high = msq->q_rtime >> 32; + p->msg_ctime_high = msq->q_ctime >> 32; +#endif + p->msg_cbytes = msq->q_cbytes; + p->msg_qnum = msq->q_qnum; + p->msg_qbytes = msq->q_qbytes; + p->msg_lspid = pid_vnr(msq->q_lspid); + p->msg_lrpid = pid_vnr(msq->q_lrpid); + + if (cmd == IPC_STAT) { + /* + * As defined in SUS: + * Return 0 on success + */ + err = 0; + } else { + /* + * MSG_STAT and MSG_STAT_ANY (both Linux specific) + * Return the full id, including the sequence number + */ + err = msq->q_perm.id; } - return err; + ipc_unlock_object(&msq->q_perm); out_unlock: rcu_read_unlock(); return err; } -SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf) +static long ksys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf, int version) { - int version; struct ipc_namespace *ns; + struct msqid64_ds msqid64; + int err; if (msqid < 0 || cmd < 0) return -EINVAL; - version = ipc_parse_version(&cmd); ns = current->nsproxy->ipc_ns; switch (cmd) { case IPC_INFO: - case MSG_INFO: + case MSG_INFO: { + struct msginfo msginfo; + err = msgctl_info(ns, msqid, cmd, &msginfo); + if (err < 0) + return err; + if (copy_to_user(buf, &msginfo, sizeof(struct msginfo))) + err = -EFAULT; + return err; + } case MSG_STAT: /* msqid is an index rather than a msg queue id */ + case MSG_STAT_ANY: case IPC_STAT: - return msgctl_nolock(ns, msqid, cmd, version, buf); + err = msgctl_stat(ns, msqid, cmd, &msqid64); + if (err < 0) + return err; + if (copy_msqid_to_user(buf, &msqid64, version)) + err = -EFAULT; + return err; case IPC_SET: + if (copy_msqid_from_user(&msqid64, buf, version)) + return -EFAULT; + return msgctl_down(ns, msqid, cmd, &msqid64.msg_perm, + msqid64.msg_qbytes); case IPC_RMID: - return msgctl_down(ns, msqid, cmd, buf, version); + return msgctl_down(ns, msqid, cmd, NULL, 0); default: return -EINVAL; } } +SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf) +{ + return ksys_msgctl(msqid, cmd, buf, IPC_64); +} + +#ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION +long ksys_old_msgctl(int msqid, int cmd, struct msqid_ds __user *buf) +{ + int version = ipc_parse_version(&cmd); + + return ksys_msgctl(msqid, cmd, buf, version); +} + +SYSCALL_DEFINE3(old_msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf) +{ + return ksys_old_msgctl(msqid, cmd, buf); +} +#endif + +#ifdef CONFIG_COMPAT + +struct compat_msqid_ds { + struct compat_ipc_perm msg_perm; + compat_uptr_t msg_first; + compat_uptr_t msg_last; + old_time32_t msg_stime; + old_time32_t msg_rtime; + old_time32_t msg_ctime; + compat_ulong_t msg_lcbytes; + compat_ulong_t msg_lqbytes; + unsigned short msg_cbytes; + unsigned short msg_qnum; + unsigned short msg_qbytes; + compat_ipc_pid_t msg_lspid; + compat_ipc_pid_t msg_lrpid; +}; + +static int copy_compat_msqid_from_user(struct msqid64_ds *out, void __user *buf, + int version) +{ + memset(out, 0, sizeof(*out)); + if (version == IPC_64) { + struct compat_msqid64_ds __user *p = buf; + if (get_compat_ipc64_perm(&out->msg_perm, &p->msg_perm)) + return -EFAULT; + if (get_user(out->msg_qbytes, &p->msg_qbytes)) + return -EFAULT; + } else { + struct compat_msqid_ds __user *p = buf; + if (get_compat_ipc_perm(&out->msg_perm, &p->msg_perm)) + return -EFAULT; + if (get_user(out->msg_qbytes, &p->msg_qbytes)) + return -EFAULT; + } + return 0; +} + +static int copy_compat_msqid_to_user(void __user *buf, struct msqid64_ds *in, + int version) +{ + if (version == IPC_64) { + struct compat_msqid64_ds v; + memset(&v, 0, sizeof(v)); + to_compat_ipc64_perm(&v.msg_perm, &in->msg_perm); + v.msg_stime = lower_32_bits(in->msg_stime); + v.msg_stime_high = upper_32_bits(in->msg_stime); + v.msg_rtime = lower_32_bits(in->msg_rtime); + v.msg_rtime_high = upper_32_bits(in->msg_rtime); + v.msg_ctime = lower_32_bits(in->msg_ctime); + v.msg_ctime_high = upper_32_bits(in->msg_ctime); + v.msg_cbytes = in->msg_cbytes; + v.msg_qnum = in->msg_qnum; + v.msg_qbytes = in->msg_qbytes; + v.msg_lspid = in->msg_lspid; + v.msg_lrpid = in->msg_lrpid; + return copy_to_user(buf, &v, sizeof(v)); + } else { + struct compat_msqid_ds v; + memset(&v, 0, sizeof(v)); + to_compat_ipc_perm(&v.msg_perm, &in->msg_perm); + v.msg_stime = in->msg_stime; + v.msg_rtime = in->msg_rtime; + v.msg_ctime = in->msg_ctime; + v.msg_cbytes = in->msg_cbytes; + v.msg_qnum = in->msg_qnum; + v.msg_qbytes = in->msg_qbytes; + v.msg_lspid = in->msg_lspid; + v.msg_lrpid = in->msg_lrpid; + return copy_to_user(buf, &v, sizeof(v)); + } +} + +static long compat_ksys_msgctl(int msqid, int cmd, void __user *uptr, int version) +{ + struct ipc_namespace *ns; + int err; + struct msqid64_ds msqid64; + + ns = current->nsproxy->ipc_ns; + + if (msqid < 0 || cmd < 0) + return -EINVAL; + + switch (cmd & (~IPC_64)) { + case IPC_INFO: + case MSG_INFO: { + struct msginfo msginfo; + err = msgctl_info(ns, msqid, cmd, &msginfo); + if (err < 0) + return err; + if (copy_to_user(uptr, &msginfo, sizeof(struct msginfo))) + err = -EFAULT; + return err; + } + case IPC_STAT: + case MSG_STAT: + case MSG_STAT_ANY: + err = msgctl_stat(ns, msqid, cmd, &msqid64); + if (err < 0) + return err; + if (copy_compat_msqid_to_user(uptr, &msqid64, version)) + err = -EFAULT; + return err; + case IPC_SET: + if (copy_compat_msqid_from_user(&msqid64, uptr, version)) + return -EFAULT; + return msgctl_down(ns, msqid, cmd, &msqid64.msg_perm, msqid64.msg_qbytes); + case IPC_RMID: + return msgctl_down(ns, msqid, cmd, NULL, 0); + default: + return -EINVAL; + } +} + +COMPAT_SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, void __user *, uptr) +{ + return compat_ksys_msgctl(msqid, cmd, uptr, IPC_64); +} + +#ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION +long compat_ksys_old_msgctl(int msqid, int cmd, void __user *uptr) +{ + int version = compat_ipc_parse_version(&cmd); + + return compat_ksys_msgctl(msqid, cmd, uptr, version); +} + +COMPAT_SYSCALL_DEFINE3(old_msgctl, int, msqid, int, cmd, void __user *, uptr) +{ + return compat_ksys_old_msgctl(msqid, cmd, uptr); +} +#endif +#endif + static int testmsg(struct msg_msg *msg, long type, int mode) { - switch(mode) - { - case SEARCH_ANY: - case SEARCH_NUMBER: + switch (mode) { + case SEARCH_ANY: + case SEARCH_NUMBER: + return 1; + case SEARCH_LESSEQUAL: + if (msg->m_type <= type) return 1; - case SEARCH_LESSEQUAL: - if (msg->m_type <=type) - return 1; - break; - case SEARCH_EQUAL: - if (msg->m_type == type) - return 1; - break; - case SEARCH_NOTEQUAL: - if (msg->m_type != type) - return 1; - break; + break; + case SEARCH_EQUAL: + if (msg->m_type == type) + return 1; + break; + case SEARCH_NOTEQUAL: + if (msg->m_type != type) + return 1; + break; } return 0; } -static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg) +static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg, + struct wake_q_head *wake_q) { struct msg_receiver *msr, *t; list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) { if (testmsg(msg, msr->r_msgtype, msr->r_mode) && - !security_msg_queue_msgrcv(msq, msg, msr->r_tsk, + !security_msg_queue_msgrcv(&msq->q_perm, msg, msr->r_tsk, msr->r_msgtype, msr->r_mode)) { list_del(&msr->r_list); if (msr->r_maxsize < msg->m_ts) { - msr->r_msg = NULL; - wake_up_process(msr->r_tsk); - smp_mb(); - msr->r_msg = ERR_PTR(-E2BIG); + wake_q_add(wake_q, msr->r_tsk); + + /* See expunge_all regarding memory barrier */ + smp_store_release(&msr->r_msg, ERR_PTR(-E2BIG)); } else { - msr->r_msg = NULL; - msq->q_lrpid = task_pid_vnr(msr->r_tsk); - msq->q_rtime = get_seconds(); - wake_up_process(msr->r_tsk); - smp_mb(); - msr->r_msg = msg; + ipc_update_pid(&msq->q_lrpid, task_pid(msr->r_tsk)); + msq->q_rtime = ktime_get_real_seconds(); + + wake_q_add(wake_q, msr->r_tsk); + /* See expunge_all regarding memory barrier */ + smp_store_release(&msr->r_msg, msg); return 1; } } } + return 0; } -long do_msgsnd(int msqid, long mtype, void __user *mtext, +static long do_msgsnd(int msqid, long mtype, void __user *mtext, size_t msgsz, int msgflg) { struct msg_queue *msq; struct msg_msg *msg; int err; struct ipc_namespace *ns; + DEFINE_WAKE_Q(wake_q); ns = current->nsproxy->ipc_ns; @@ -680,32 +875,38 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, goto out_unlock1; } + ipc_lock_object(&msq->q_perm); + for (;;) { struct msg_sender s; err = -EACCES; if (ipcperms(ns, &msq->q_perm, S_IWUGO)) - goto out_unlock1; + goto out_unlock0; - err = security_msg_queue_msgsnd(msq, msg, msgflg); + /* raced with RMID? */ + if (!ipc_valid_object(&msq->q_perm)) { + err = -EIDRM; + goto out_unlock0; + } + + err = security_msg_queue_msgsnd(&msq->q_perm, msg, msgflg); if (err) - goto out_unlock1; + goto out_unlock0; - if (msgsz + msq->q_cbytes <= msq->q_qbytes && - 1 + msq->q_qnum <= msq->q_qbytes) { + if (msg_fits_inqueue(msq, msgsz)) break; - } /* queue full, wait: */ if (msgflg & IPC_NOWAIT) { err = -EAGAIN; - goto out_unlock1; + goto out_unlock0; } - ipc_lock_object(&msq->q_perm); - ss_add(msq, &s); + /* enqueue the sender and prepare to block */ + ss_add(msq, &s, msgsz); - if (!ipc_rcu_getref(msq)) { + if (!ipc_rcu_getref(&msq->q_perm)) { err = -EIDRM; goto out_unlock0; } @@ -717,12 +918,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, rcu_read_lock(); ipc_lock_object(&msq->q_perm); - ipc_rcu_putref(msq); - if (msq->q_perm.deleted) { + ipc_rcu_putref(&msq->q_perm, msg_rcu_free); + /* raced with RMID? */ + if (!ipc_valid_object(&msq->q_perm)) { err = -EIDRM; goto out_unlock0; } - ss_del(&s); if (signal_pending(current)) { @@ -730,20 +931,18 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, goto out_unlock0; } - ipc_unlock_object(&msq->q_perm); } - ipc_lock_object(&msq->q_perm); - msq->q_lspid = task_tgid_vnr(current); - msq->q_stime = get_seconds(); + ipc_update_pid(&msq->q_lspid, task_tgid(current)); + msq->q_stime = ktime_get_real_seconds(); - if (!pipelined_send(msq, msg)) { + if (!pipelined_send(msq, msg, &wake_q)) { /* no one is waiting for this message, enqueue it */ list_add_tail(&msg->m_list, &msq->q_messages); msq->q_cbytes += msgsz; msq->q_qnum++; - atomic_add(msgsz, &ns->msg_bytes); - atomic_inc(&ns->msg_hdrs); + percpu_counter_add_local(&ns->percpu_msg_bytes, msgsz); + percpu_counter_add_local(&ns->percpu_msg_hdrs, 1); } err = 0; @@ -751,6 +950,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, out_unlock0: ipc_unlock_object(&msq->q_perm); + wake_up_q(&wake_q); out_unlock1: rcu_read_unlock(); if (msg != NULL) @@ -758,8 +958,8 @@ out_unlock1: return err; } -SYSCALL_DEFINE4(msgsnd, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz, - int, msgflg) +long ksys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz, + int msgflg) { long mtype; @@ -768,6 +968,37 @@ SYSCALL_DEFINE4(msgsnd, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz, return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg); } +SYSCALL_DEFINE4(msgsnd, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz, + int, msgflg) +{ + return ksys_msgsnd(msqid, msgp, msgsz, msgflg); +} + +#ifdef CONFIG_COMPAT + +struct compat_msgbuf { + compat_long_t mtype; + char mtext[]; +}; + +long compat_ksys_msgsnd(int msqid, compat_uptr_t msgp, + compat_ssize_t msgsz, int msgflg) +{ + struct compat_msgbuf __user *up = compat_ptr(msgp); + compat_long_t mtype; + + if (get_user(mtype, &up->mtype)) + return -EFAULT; + return do_msgsnd(msqid, mtype, up->mtext, (ssize_t)msgsz, msgflg); +} + +COMPAT_SYSCALL_DEFINE4(msgsnd, int, msqid, compat_uptr_t, msgp, + compat_ssize_t, msgsz, int, msgflg) +{ + return compat_ksys_msgsnd(msqid, msgp, msgsz, msgflg); +} +#endif + static inline int convert_mode(long *msgtyp, int msgflg) { if (msgflg & MSG_COPY) @@ -781,7 +1012,10 @@ static inline int convert_mode(long *msgtyp, int msgflg) if (*msgtyp == 0) return SEARCH_ANY; if (*msgtyp < 0) { - *msgtyp = -*msgtyp; + if (*msgtyp == LONG_MIN) /* -LONG_MIN is undefined */ + *msgtyp = LONG_MAX; + else + *msgtyp = -*msgtyp; return SEARCH_LESSEQUAL; } if (msgflg & MSG_EXCEPT) @@ -839,15 +1073,16 @@ static inline void free_copy(struct msg_msg *copy) static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode) { - struct msg_msg *msg; + struct msg_msg *msg, *found = NULL; long count = 0; list_for_each_entry(msg, &msq->q_messages, m_list) { if (testmsg(msg, *msgtyp, mode) && - !security_msg_queue_msgrcv(msq, msg, current, + !security_msg_queue_msgrcv(&msq->q_perm, msg, current, *msgtyp, mode)) { if (mode == SEARCH_LESSEQUAL && msg->m_type != 1) { *msgtyp = msg->m_type - 1; + found = msg; } else if (mode == SEARCH_NUMBER) { if (*msgtyp == count) return msg; @@ -857,16 +1092,17 @@ static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode) } } - return ERR_PTR(-EAGAIN); + return found ?: ERR_PTR(-EAGAIN); } -long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgflg, +static long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgflg, long (*msg_handler)(void __user *, struct msg_msg *, size_t)) { int mode; struct msg_queue *msq; struct ipc_namespace *ns; struct msg_msg *msg, *copy = NULL; + DEFINE_WAKE_Q(wake_q); ns = current->nsproxy->ipc_ns; @@ -874,6 +1110,8 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl return -EINVAL; if (msgflg & MSG_COPY) { + if ((msgflg & MSG_EXCEPT) || !(msgflg & IPC_NOWAIT)) + return -EINVAL; copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax)); if (IS_ERR(copy)) return PTR_ERR(copy); @@ -896,6 +1134,13 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl goto out_unlock1; ipc_lock_object(&msq->q_perm); + + /* raced with RMID? */ + if (!ipc_valid_object(&msq->q_perm)) { + msg = ERR_PTR(-EIDRM); + goto out_unlock0; + } + msg = find_msg(msq, &msgtyp, mode); if (!IS_ERR(msg)) { /* @@ -917,12 +1162,12 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl list_del(&msg->m_list); msq->q_qnum--; - msq->q_rtime = get_seconds(); - msq->q_lrpid = task_tgid_vnr(current); + msq->q_rtime = ktime_get_real_seconds(); + ipc_update_pid(&msq->q_lrpid, task_tgid(current)); msq->q_cbytes -= msg->m_ts; - atomic_sub(msg->m_ts, &ns->msg_bytes); - atomic_dec(&ns->msg_hdrs); - ss_wakeup(&msq->q_senders, 0); + percpu_counter_sub_local(&ns->percpu_msg_bytes, msg->m_ts); + percpu_counter_sub_local(&ns->percpu_msg_hdrs, 1); + ss_wakeup(msq, &wake_q, false); goto out_unlock0; } @@ -941,52 +1186,53 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl msr_d.r_maxsize = INT_MAX; else msr_d.r_maxsize = bufsz; - msr_d.r_msg = ERR_PTR(-EAGAIN); - current->state = TASK_INTERRUPTIBLE; + + /* memory barrier not require due to ipc_lock_object() */ + WRITE_ONCE(msr_d.r_msg, ERR_PTR(-EAGAIN)); + + /* memory barrier not required, we own ipc_lock_object() */ + __set_current_state(TASK_INTERRUPTIBLE); ipc_unlock_object(&msq->q_perm); rcu_read_unlock(); schedule(); - /* Lockless receive, part 1: - * Disable preemption. We don't hold a reference to the queue - * and getting a reference would defeat the idea of a lockless - * operation, thus the code relies on rcu to guarantee the - * existence of msq: + /* + * Lockless receive, part 1: + * We don't hold a reference to the queue and getting a + * reference would defeat the idea of a lockless operation, + * thus the code relies on rcu to guarantee the existence of + * msq: * Prior to destruction, expunge_all(-EIRDM) changes r_msg. * Thus if r_msg is -EAGAIN, then the queue not yet destroyed. - * rcu_read_lock() prevents preemption between reading r_msg - * and acquiring the q_perm.lock in ipc_lock_object(). */ rcu_read_lock(); - /* Lockless receive, part 2: - * Wait until pipelined_send or expunge_all are outside of - * wake_up_process(). There is a race with exit(), see - * ipc/mqueue.c for the details. + /* + * Lockless receive, part 2: + * The work in pipelined_send() and expunge_all(): + * - Set pointer to message + * - Queue the receiver task for later wakeup + * - Wake up the process after the lock is dropped. + * + * Should the process wake up before this wakeup (due to a + * signal) it will either see the message and continue ... */ - msg = (struct msg_msg*)msr_d.r_msg; - while (msg == NULL) { - cpu_relax(); - msg = (struct msg_msg *)msr_d.r_msg; - } + msg = READ_ONCE(msr_d.r_msg); + if (msg != ERR_PTR(-EAGAIN)) { + /* see MSG_BARRIER for purpose/pairing */ + smp_acquire__after_ctrl_dep(); - /* Lockless receive, part 3: - * If there is a message or an error then accept it without - * locking. - */ - if (msg != ERR_PTR(-EAGAIN)) goto out_unlock1; + } - /* Lockless receive, part 3: - * Acquire the queue spinlock. - */ + /* + * ... or see -EAGAIN, acquire the lock to check the message + * again. + */ ipc_lock_object(&msq->q_perm); - /* Lockless receive, part 4: - * Repeat test after acquiring the spinlock. - */ - msg = (struct msg_msg*)msr_d.r_msg; + msg = READ_ONCE(msr_d.r_msg); if (msg != ERR_PTR(-EAGAIN)) goto out_unlock0; @@ -1001,6 +1247,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl out_unlock0: ipc_unlock_object(&msq->q_perm); + wake_up_q(&wake_q); out_unlock1: rcu_read_unlock(); if (IS_ERR(msg)) { @@ -1014,33 +1261,116 @@ out_unlock1: return bufsz; } +long ksys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz, + long msgtyp, int msgflg) +{ + return do_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg, do_msg_fill); +} + SYSCALL_DEFINE5(msgrcv, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz, long, msgtyp, int, msgflg) { - return do_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg, do_msg_fill); + return ksys_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg); +} + +#ifdef CONFIG_COMPAT +static long compat_do_msg_fill(void __user *dest, struct msg_msg *msg, size_t bufsz) +{ + struct compat_msgbuf __user *msgp = dest; + size_t msgsz; + + if (put_user(msg->m_type, &msgp->mtype)) + return -EFAULT; + + msgsz = (bufsz > msg->m_ts) ? msg->m_ts : bufsz; + if (store_msg(msgp->mtext, msg, msgsz)) + return -EFAULT; + return msgsz; +} + +long compat_ksys_msgrcv(int msqid, compat_uptr_t msgp, compat_ssize_t msgsz, + compat_long_t msgtyp, int msgflg) +{ + return do_msgrcv(msqid, compat_ptr(msgp), (ssize_t)msgsz, (long)msgtyp, + msgflg, compat_do_msg_fill); +} + +COMPAT_SYSCALL_DEFINE5(msgrcv, int, msqid, compat_uptr_t, msgp, + compat_ssize_t, msgsz, compat_long_t, msgtyp, + int, msgflg) +{ + return compat_ksys_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg); +} +#endif + +int msg_init_ns(struct ipc_namespace *ns) +{ + int ret; + + ns->msg_ctlmax = MSGMAX; + ns->msg_ctlmnb = MSGMNB; + ns->msg_ctlmni = MSGMNI; + + ret = percpu_counter_init(&ns->percpu_msg_bytes, 0, GFP_KERNEL); + if (ret) + goto fail_msg_bytes; + ret = percpu_counter_init(&ns->percpu_msg_hdrs, 0, GFP_KERNEL); + if (ret) + goto fail_msg_hdrs; + ipc_init_ids(&ns->ids[IPC_MSG_IDS]); + return 0; + +fail_msg_hdrs: + percpu_counter_destroy(&ns->percpu_msg_bytes); +fail_msg_bytes: + return ret; } +#ifdef CONFIG_IPC_NS +void msg_exit_ns(struct ipc_namespace *ns) +{ + free_ipcs(ns, &msg_ids(ns), freeque); + idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr); + rhashtable_destroy(&ns->ids[IPC_MSG_IDS].key_ht); + percpu_counter_destroy(&ns->percpu_msg_bytes); + percpu_counter_destroy(&ns->percpu_msg_hdrs); +} +#endif + #ifdef CONFIG_PROC_FS static int sysvipc_msg_proc_show(struct seq_file *s, void *it) { + struct pid_namespace *pid_ns = ipc_seq_pid_ns(s); struct user_namespace *user_ns = seq_user_ns(s); - struct msg_queue *msq = it; - - return seq_printf(s, - "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n", - msq->q_perm.key, - msq->q_perm.id, - msq->q_perm.mode, - msq->q_cbytes, - msq->q_qnum, - msq->q_lspid, - msq->q_lrpid, - from_kuid_munged(user_ns, msq->q_perm.uid), - from_kgid_munged(user_ns, msq->q_perm.gid), - from_kuid_munged(user_ns, msq->q_perm.cuid), - from_kgid_munged(user_ns, msq->q_perm.cgid), - msq->q_stime, - msq->q_rtime, - msq->q_ctime); + struct kern_ipc_perm *ipcp = it; + struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm); + + seq_printf(s, + "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10llu %10llu %10llu\n", + msq->q_perm.key, + msq->q_perm.id, + msq->q_perm.mode, + msq->q_cbytes, + msq->q_qnum, + pid_nr_ns(msq->q_lspid, pid_ns), + pid_nr_ns(msq->q_lrpid, pid_ns), + from_kuid_munged(user_ns, msq->q_perm.uid), + from_kgid_munged(user_ns, msq->q_perm.gid), + from_kuid_munged(user_ns, msq->q_perm.cuid), + from_kgid_munged(user_ns, msq->q_perm.cgid), + msq->q_stime, + msq->q_rtime, + msq->q_ctime); + + return 0; } #endif + +void __init msg_init(void) +{ + msg_init_ns(&init_ipc_ns); + + ipc_init_proc_interface("sysvipc/msg", + " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n", + IPC_MSG_IDS, sysvipc_msg_proc_show); +} diff --git a/ipc/msgutil.c b/ipc/msgutil.c index 491e71f2a1b8..e28f0cecb2ec 100644 --- a/ipc/msgutil.c +++ b/ipc/msgutil.c @@ -1,11 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * linux/ipc/msgutil.c * Copyright (C) 1999, 2004 Manfred Spraul - * - * This file is released under GNU General Public Licence version 2 or - * (at your option) any later version. - * - * See the file COPYING for more details. */ #include <linux/spinlock.h> @@ -18,6 +14,8 @@ #include <linux/utsname.h> #include <linux/proc_ns.h> #include <linux/uaccess.h> +#include <linux/sched.h> +#include <linux/nstree.h> #include "util.h" @@ -29,30 +27,38 @@ DEFINE_SPINLOCK(mq_lock); * and not CONFIG_IPC_NS. */ struct ipc_namespace init_ipc_ns = { - .count = ATOMIC_INIT(1), + .ns = NS_COMMON_INIT(init_ipc_ns), .user_ns = &init_user_ns, - .proc_inum = PROC_IPC_INIT_INO, }; -atomic_t nr_ipc_ns = ATOMIC_INIT(1); - struct msg_msgseg { struct msg_msgseg *next; /* the next part of the message follows immediately */ }; -#define DATALEN_MSG (int)(PAGE_SIZE-sizeof(struct msg_msg)) -#define DATALEN_SEG (int)(PAGE_SIZE-sizeof(struct msg_msgseg)) +#define DATALEN_MSG ((size_t)PAGE_SIZE-sizeof(struct msg_msg)) +#define DATALEN_SEG ((size_t)PAGE_SIZE-sizeof(struct msg_msgseg)) +static kmem_buckets *msg_buckets __ro_after_init; -static struct msg_msg *alloc_msg(int len) +static int __init init_msg_buckets(void) +{ + msg_buckets = kmem_buckets_create("msg_msg", SLAB_ACCOUNT, + sizeof(struct msg_msg), + DATALEN_MSG, NULL); + + return 0; +} +subsys_initcall(init_msg_buckets); + +static struct msg_msg *alloc_msg(size_t len) { struct msg_msg *msg; struct msg_msgseg **pseg; - int alen; + size_t alen; alen = min(len, DATALEN_MSG); - msg = kmalloc(sizeof(*msg) + alen, GFP_KERNEL); + msg = kmem_buckets_alloc(msg_buckets, sizeof(*msg) + alen, GFP_KERNEL); if (msg == NULL) return NULL; @@ -63,8 +69,11 @@ static struct msg_msg *alloc_msg(int len) pseg = &msg->next; while (len > 0) { struct msg_msgseg *seg; + + cond_resched(); + alen = min(len, DATALEN_SEG); - seg = kmalloc(sizeof(*seg) + alen, GFP_KERNEL); + seg = kmalloc(sizeof(*seg) + alen, GFP_KERNEL_ACCOUNT); if (seg == NULL) goto out_err; *pseg = seg; @@ -80,12 +89,12 @@ out_err: return NULL; } -struct msg_msg *load_msg(const void __user *src, int len) +struct msg_msg *load_msg(const void __user *src, size_t len) { struct msg_msg *msg; struct msg_msgseg *seg; int err = -EFAULT; - int alen; + size_t alen; msg = alloc_msg(len); if (msg == NULL) @@ -117,10 +126,9 @@ out_err: struct msg_msg *copy_msg(struct msg_msg *src, struct msg_msg *dst) { struct msg_msgseg *dst_pseg, *src_pseg; - int len = src->m_ts; - int alen; + size_t len = src->m_ts; + size_t alen; - BUG_ON(dst == NULL); if (src->m_ts > dst->m_ts) return ERR_PTR(-EINVAL); @@ -147,9 +155,9 @@ struct msg_msg *copy_msg(struct msg_msg *src, struct msg_msg *dst) return ERR_PTR(-ENOSYS); } #endif -int store_msg(void __user *dest, struct msg_msg *msg, int len) +int store_msg(void __user *dest, struct msg_msg *msg, size_t len) { - int alen; + size_t alen; struct msg_msgseg *seg; alen = min(len, DATALEN_MSG); @@ -176,6 +184,8 @@ void free_msg(struct msg_msg *msg) kfree(msg); while (seg != NULL) { struct msg_msgseg *tmp = seg->next; + + cond_resched(); kfree(seg); seg = tmp; } diff --git a/ipc/namespace.c b/ipc/namespace.c index 7ee61bf44933..535f16ea40e1 100644 --- a/ipc/namespace.c +++ b/ipc/namespace.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * linux/ipc/namespace.c * Copyright (C) 2006 Pavel Emelyanov <xemul@openvz.org> OpenVZ, SWsoft Inc. @@ -9,56 +10,105 @@ #include <linux/rcupdate.h> #include <linux/nsproxy.h> #include <linux/slab.h> +#include <linux/cred.h> #include <linux/fs.h> #include <linux/mount.h> #include <linux/user_namespace.h> #include <linux/proc_ns.h> +#include <linux/nstree.h> +#include <linux/sched/task.h> #include "util.h" +/* + * The work queue is used to avoid the cost of synchronize_rcu in kern_unmount. + */ +static void free_ipc(struct work_struct *unused); +static DECLARE_WORK(free_ipc_work, free_ipc); + +static struct ucounts *inc_ipc_namespaces(struct user_namespace *ns) +{ + return inc_ucount(ns, current_euid(), UCOUNT_IPC_NAMESPACES); +} + +static void dec_ipc_namespaces(struct ucounts *ucounts) +{ + dec_ucount(ucounts, UCOUNT_IPC_NAMESPACES); +} + static struct ipc_namespace *create_ipc_ns(struct user_namespace *user_ns, struct ipc_namespace *old_ns) { struct ipc_namespace *ns; + struct ucounts *ucounts; int err; - ns = kmalloc(sizeof(struct ipc_namespace), GFP_KERNEL); + err = -ENOSPC; + again: + ucounts = inc_ipc_namespaces(user_ns); + if (!ucounts) { + /* + * IPC namespaces are freed asynchronously, by free_ipc_work. + * If frees were pending, flush_work will wait, and + * return true. Fail the allocation if no frees are pending. + */ + if (flush_work(&free_ipc_work)) + goto again; + goto fail; + } + + err = -ENOMEM; + ns = kzalloc(sizeof(struct ipc_namespace), GFP_KERNEL_ACCOUNT); if (ns == NULL) - return ERR_PTR(-ENOMEM); + goto fail_dec; - err = proc_alloc_inum(&ns->proc_inum); - if (err) { - kfree(ns); - return ERR_PTR(err); - } + err = ns_common_init(ns); + if (err) + goto fail_free; + + ns_tree_gen_id(ns); + ns->user_ns = get_user_ns(user_ns); + ns->ucounts = ucounts; - atomic_set(&ns->count, 1); err = mq_init_ns(ns); - if (err) { - proc_free_inum(ns->proc_inum); - kfree(ns); - return ERR_PTR(err); - } - atomic_inc(&nr_ipc_ns); + if (err) + goto fail_put; - sem_init_ns(ns); - msg_init_ns(ns); - shm_init_ns(ns); + err = -ENOMEM; + if (!setup_mq_sysctls(ns)) + goto fail_mq_mount; - /* - * msgmni has already been computed for the new ipc ns. - * Thus, do the ipcns creation notification before registering that - * new ipcns in the chain. - */ - ipcns_notify(IPCNS_CREATED); - register_ipcns_notifier(ns); + if (!setup_ipc_sysctls(ns)) + goto fail_mq_sysctls; - ns->user_ns = get_user_ns(user_ns); + err = msg_init_ns(ns); + if (err) + goto fail_ipc; + + sem_init_ns(ns); + shm_init_ns(ns); + ns_tree_add_raw(ns); return ns; + +fail_ipc: + retire_ipc_sysctls(ns); +fail_mq_sysctls: + retire_mq_sysctls(ns); +fail_mq_mount: + mntput(ns->mq_mnt); +fail_put: + put_user_ns(ns->user_ns); + ns_common_free(ns); +fail_free: + kfree(ns); +fail_dec: + dec_ipc_namespaces(ucounts); +fail: + return ERR_PTR(err); } -struct ipc_namespace *copy_ipcs(unsigned long flags, +struct ipc_namespace *copy_ipcs(u64 flags, struct user_namespace *user_ns, struct ipc_namespace *ns) { if (!(flags & CLONE_NEWIPC)) @@ -81,7 +131,7 @@ void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids, int next_id; int total, in_use; - down_write(&ids->rw_mutex); + down_write(&ids->rwsem); in_use = ids->in_use; @@ -89,39 +139,50 @@ void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids, perm = idr_find(&ids->ipcs_idr, next_id); if (perm == NULL) continue; - ipc_lock_by_ptr(perm); + rcu_read_lock(); + ipc_lock_object(perm); free(ns, perm); total++; } - up_write(&ids->rw_mutex); + up_write(&ids->rwsem); } static void free_ipc_ns(struct ipc_namespace *ns) { /* - * Unregistering the hotplug notifier at the beginning guarantees - * that the ipc namespace won't be freed while we are inside the - * callback routine. Since the blocking_notifier_chain_XXX routines - * hold a rw lock on the notifier list, unregister_ipcns_notifier() - * won't take the rw lock before blocking_notifier_call_chain() has - * released the rd lock. + * Caller needs to wait for an RCU grace period to have passed + * after making the mount point inaccessible to new accesses. */ - unregister_ipcns_notifier(ns); + mntput(ns->mq_mnt); sem_exit_ns(ns); msg_exit_ns(ns); shm_exit_ns(ns); - atomic_dec(&nr_ipc_ns); - /* - * Do the ipcns removal notification after decrementing nr_ipc_ns in - * order to have a correct value when recomputing msgmni. - */ - ipcns_notify(IPCNS_REMOVED); + retire_mq_sysctls(ns); + retire_ipc_sysctls(ns); + + dec_ipc_namespaces(ns->ucounts); put_user_ns(ns->user_ns); - proc_free_inum(ns->proc_inum); + ns_common_free(ns); kfree(ns); } +static LLIST_HEAD(free_ipc_list); +static void free_ipc(struct work_struct *unused) +{ + struct llist_node *node = llist_del_all(&free_ipc_list); + struct ipc_namespace *n, *t; + + llist_for_each_entry_safe(n, t, node, mnt_llist) + mnt_make_shortterm(n->mq_mnt); + + /* Wait for any last users to have gone away. */ + synchronize_rcu(); + + llist_for_each_entry_safe(n, t, node, mnt_llist) + free_ipc_ns(n); +} + /* * put_ipc_ns - drop a reference to an ipc namespace. * @ns: the namespace to put @@ -140,59 +201,57 @@ static void free_ipc_ns(struct ipc_namespace *ns) */ void put_ipc_ns(struct ipc_namespace *ns) { - if (atomic_dec_and_lock(&ns->count, &mq_lock)) { + if (ns_ref_put_and_lock(ns, &mq_lock)) { mq_clear_sbinfo(ns); spin_unlock(&mq_lock); - mq_put_mnt(ns); - free_ipc_ns(ns); + + ns_tree_remove(ns); + if (llist_add(&ns->mnt_llist, &free_ipc_list)) + schedule_work(&free_ipc_work); } } -static void *ipcns_get(struct task_struct *task) +static struct ns_common *ipcns_get(struct task_struct *task) { struct ipc_namespace *ns = NULL; struct nsproxy *nsproxy; - rcu_read_lock(); - nsproxy = task_nsproxy(task); + task_lock(task); + nsproxy = task->nsproxy; if (nsproxy) ns = get_ipc_ns(nsproxy->ipc_ns); - rcu_read_unlock(); + task_unlock(task); - return ns; + return ns ? &ns->ns : NULL; } -static void ipcns_put(void *ns) +static void ipcns_put(struct ns_common *ns) { - return put_ipc_ns(ns); + return put_ipc_ns(to_ipc_ns(ns)); } -static int ipcns_install(struct nsproxy *nsproxy, void *new) +static int ipcns_install(struct nsset *nsset, struct ns_common *new) { - struct ipc_namespace *ns = new; + struct nsproxy *nsproxy = nsset->nsproxy; + struct ipc_namespace *ns = to_ipc_ns(new); if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN) || - !nsown_capable(CAP_SYS_ADMIN)) + !ns_capable(nsset->cred->user_ns, CAP_SYS_ADMIN)) return -EPERM; - /* Ditch state from the old ipc namespace */ - exit_sem(current); put_ipc_ns(nsproxy->ipc_ns); nsproxy->ipc_ns = get_ipc_ns(ns); return 0; } -static unsigned int ipcns_inum(void *vp) +static struct user_namespace *ipcns_owner(struct ns_common *ns) { - struct ipc_namespace *ns = vp; - - return ns->proc_inum; + return to_ipc_ns(ns)->user_ns; } const struct proc_ns_operations ipcns_operations = { .name = "ipc", - .type = CLONE_NEWIPC, .get = ipcns_get, .put = ipcns_put, .install = ipcns_install, - .inum = ipcns_inum, + .owner = ipcns_owner, }; diff --git a/ipc/sem.c b/ipc/sem.c index 41088899783d..0f06e4bd4673 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * linux/ipc/sem.c * Copyright (C) 1992 Krishna Balasubramanian @@ -11,6 +12,7 @@ * (c) 2001 Red Hat Inc * Lockless wakeup * (c) 2003 Manfred Spraul <manfred@colorfullife.com> + * (c) 2016 Davidlohr Bueso <dave@stgolabs.net> * Further wakeup optimizations, documentation * (c) 2010 Manfred Spraul <manfred@colorfullife.com> * @@ -34,7 +36,7 @@ * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO. * - undo adjustments at process exit are limited to 0..SEMVMX. * - namespace are supported. - * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing + * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtime by writing * to /proc/sys/kernel/sem. * - statistics about the usage are reported in /proc/sysvipc/sem. * @@ -47,22 +49,17 @@ * Thus: Perfect SMP scaling between independent semaphore arrays. * If multiple semaphores in one array are used, then cache line * trashing on the semaphore array spinlock will limit the scaling. - * - semncnt and semzcnt are calculated on demand in count_semncnt() and - * count_semzcnt() + * - semncnt and semzcnt are calculated on demand in count_semcnt() * - the task that performs a successful semop() scans the list of all * sleeping tasks and completes any pending operations that can be fulfilled. * Semaphores are actively given to waiting tasks (necessary for FIFO). * (see update_queue()) * - To improve the scalability, the actual wake-up calls are performed after - * dropping all locks. (see wake_up_sem_queue_prepare(), - * wake_up_sem_queue_do()) + * dropping all locks. (see wake_up_sem_queue_prepare()) * - All work is done by the waker, the woken up task does not have to do * anything - not even acquiring a lock or dropping a refcount. * - A woken up task may not even touch the semaphore array anymore, it may * have been destroyed already by a semctl(RMID). - * - The synchronizations between wake-ups due to a timeout/signal and a - * wake-up due to a completed semaphore operation is achieved by using an - * intermediate state (IN_WAKEUP). * - UNDO values are stored in an array (one per process and per * semaphore array, lazily allocated). For backwards compatibility, multiple * modes for the UNDO variables are supported (per process, per thread) @@ -73,6 +70,7 @@ * The worst-case behavior is nevertheless O(N^2) for N wakeups. */ +#include <linux/compat.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/init.h> @@ -86,32 +84,60 @@ #include <linux/rwsem.h> #include <linux/nsproxy.h> #include <linux/ipc_namespace.h> +#include <linux/sched/wake_q.h> +#include <linux/nospec.h> +#include <linux/rhashtable.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include "util.h" /* One semaphore structure for each semaphore in the system. */ struct sem { int semval; /* current value */ - int sempid; /* pid of last operation */ + /* + * PID of the process that last modified the semaphore. For + * Linux, specifically these are: + * - semop + * - semctl, via SETVAL and SETALL. + * - at task exit when performing undo adjustments (see exit_sem). + */ + struct pid *sempid; spinlock_t lock; /* spinlock for fine-grained semtimedop */ struct list_head pending_alter; /* pending single-sop operations */ /* that alter the semaphore */ struct list_head pending_const; /* pending single-sop operations */ /* that do not alter the semaphore*/ - time_t sem_otime; /* candidate for sem_otime */ + time64_t sem_otime; /* candidate for sem_otime */ } ____cacheline_aligned_in_smp; +/* One sem_array data structure for each set of semaphores in the system. */ +struct sem_array { + struct kern_ipc_perm sem_perm; /* permissions .. see ipc.h */ + time64_t sem_ctime; /* create/last semctl() time */ + struct list_head pending_alter; /* pending operations */ + /* that alter the array */ + struct list_head pending_const; /* pending complex operations */ + /* that do not alter semvals */ + struct list_head list_id; /* undo requests on this array */ + int sem_nsems; /* no. of semaphores in array */ + int complex_count; /* pending complex operations */ + unsigned int use_global_lock;/* >0: global lock required */ + + struct sem sems[]; +} __randomize_layout; + /* One queue for each sleeping process in the system. */ struct sem_queue { struct list_head list; /* queue of pending operations */ struct task_struct *sleeper; /* this process */ struct sem_undo *undo; /* undo structure */ - int pid; /* process id of requesting process */ + struct pid *pid; /* process id of requesting process */ int status; /* completion status of operation */ struct sembuf *sops; /* array of pending operations */ + struct sembuf *blocking; /* the operation that blocked */ int nsops; /* number of operations */ - int alter; /* does *sops alter the array? */ + bool alter; /* does *sops alter the array? */ + bool dupsop; /* sops on more than one sem_num */ }; /* Each task has a list of undo requests. They are executed automatically @@ -126,7 +152,7 @@ struct sem_undo { struct list_head list_id; /* per semaphore array list: * all undos for one array */ int semid; /* semaphore set identifier */ - short *semadj; /* array of adjustments */ + short semadj[]; /* array of adjustments */ /* one per semaphore */ }; @@ -134,7 +160,7 @@ struct sem_undo { * that may be shared among all a CLONE_SYSVSEM task group. */ struct sem_undo_list { - atomic_t refcnt; + refcount_t refcnt; spinlock_t lock; struct list_head list_proc; }; @@ -142,8 +168,6 @@ struct sem_undo_list { #define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS]) -#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid) - static int newary(struct ipc_namespace *, struct ipc_params *); static void freeary(struct ipc_namespace *, struct kern_ipc_perm *); #ifdef CONFIG_PROC_FS @@ -154,15 +178,67 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it); #define SEMOPM_FAST 64 /* ~ 372 bytes on stack */ /* + * Switching from the mode suitable for simple ops + * to the mode for complex ops is costly. Therefore: + * use some hysteresis + */ +#define USE_GLOBAL_LOCK_HYSTERESIS 10 + +/* * Locking: + * a) global sem_lock() for read/write * sem_undo.id_next, * sem_array.complex_count, - * sem_array.pending{_alter,_cont}, - * sem_array.sem_undo: global sem_lock() for read/write - * sem_undo.proc_next: only "current" is allowed to read/write that field. - * - * sem_array.sem_base[i].pending_{const,alter}: - * global or semaphore sem_lock() for read/write + * sem_array.pending{_alter,_const}, + * sem_array.sem_undo + * + * b) global or semaphore sem_lock() for read/write: + * sem_array.sems[i].pending_{const,alter}: + * + * c) special: + * sem_undo_list.list_proc: + * * undo_list->lock for write + * * rcu for read + * use_global_lock: + * * global sem_lock() for write + * * either local or global sem_lock() for read. + * + * Memory ordering: + * Most ordering is enforced by using spin_lock() and spin_unlock(). + * + * Exceptions: + * 1) use_global_lock: (SEM_BARRIER_1) + * Setting it from non-zero to 0 is a RELEASE, this is ensured by + * using smp_store_release(): Immediately after setting it to 0, + * a simple op can start. + * Testing if it is non-zero is an ACQUIRE, this is ensured by using + * smp_load_acquire(). + * Setting it from 0 to non-zero must be ordered with regards to + * this smp_load_acquire(), this is guaranteed because the smp_load_acquire() + * is inside a spin_lock() and after a write from 0 to non-zero a + * spin_lock()+spin_unlock() is done. + * To prevent the compiler/cpu temporarily writing 0 to use_global_lock, + * READ_ONCE()/WRITE_ONCE() is used. + * + * 2) queue.status: (SEM_BARRIER_2) + * Initialization is done while holding sem_lock(), so no further barrier is + * required. + * Setting it to a result code is a RELEASE, this is ensured by both a + * smp_store_release() (for case a) and while holding sem_lock() + * (for case b). + * The ACQUIRE when reading the result code without holding sem_lock() is + * achieved by using READ_ONCE() + smp_acquire__after_ctrl_dep(). + * (case a above). + * Reading the result code while holding sem_lock() needs no further barriers, + * the locks inside sem_lock() enforce ordering (case b above) + * + * 3) current->state: + * current->state is set to TASK_INTERRUPTIBLE while holding sem_lock(). + * The wakeup is handled using the wake_q infrastructure. wake_q wakeups may + * happen immediately after calling wake_q_add. As wake_q_add_safe() is called + * when holding sem_lock(), no further barriers are required. + * + * See also ipc/mqueue.c for more details on the covered races. */ #define sc_semmsl sem_ctls[0] @@ -185,10 +261,11 @@ void sem_exit_ns(struct ipc_namespace *ns) { free_ipcs(ns, &sem_ids(ns), freeary); idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr); + rhashtable_destroy(&ns->ids[IPC_SEM_IDS].key_ht); } #endif -void __init sem_init (void) +void __init sem_init(void) { sem_init_ns(&init_ipc_ns); ipc_init_proc_interface("sysvipc/sem", @@ -217,7 +294,7 @@ static void unmerge_queues(struct sem_array *sma) */ list_for_each_entry_safe(q, tq, &sma->pending_alter, list) { struct sem *curr; - curr = &sma->sem_base[q->sops[0].sem_num]; + curr = &sma->sems[q->sops[0].sem_num]; list_add_tail(&q->list, &curr->pending_alter); } @@ -225,7 +302,7 @@ static void unmerge_queues(struct sem_array *sma) } /** - * merge_queues - Merge single semop queues into global queue + * merge_queues - merge single semop queues into global queue * @sma: semaphore array * * This function merges all per-semaphore queues into the global queue. @@ -237,122 +314,170 @@ static void merge_queues(struct sem_array *sma) { int i; for (i = 0; i < sma->sem_nsems; i++) { - struct sem *sem = sma->sem_base + i; + struct sem *sem = &sma->sems[i]; list_splice_init(&sem->pending_alter, &sma->pending_alter); } } +static void sem_rcu_free(struct rcu_head *head) +{ + struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu); + struct sem_array *sma = container_of(p, struct sem_array, sem_perm); + + security_sem_free(&sma->sem_perm); + kvfree(sma); +} + +/* + * Enter the mode suitable for non-simple operations: + * Caller must own sem_perm.lock. + */ +static void complexmode_enter(struct sem_array *sma) +{ + int i; + struct sem *sem; + + if (sma->use_global_lock > 0) { + /* + * We are already in global lock mode. + * Nothing to do, just reset the + * counter until we return to simple mode. + */ + WRITE_ONCE(sma->use_global_lock, USE_GLOBAL_LOCK_HYSTERESIS); + return; + } + WRITE_ONCE(sma->use_global_lock, USE_GLOBAL_LOCK_HYSTERESIS); + + for (i = 0; i < sma->sem_nsems; i++) { + sem = &sma->sems[i]; + spin_lock(&sem->lock); + spin_unlock(&sem->lock); + } +} + +/* + * Try to leave the mode that disallows simple operations: + * Caller must own sem_perm.lock. + */ +static void complexmode_tryleave(struct sem_array *sma) +{ + if (sma->complex_count) { + /* Complex ops are sleeping. + * We must stay in complex mode + */ + return; + } + if (sma->use_global_lock == 1) { + + /* See SEM_BARRIER_1 for purpose/pairing */ + smp_store_release(&sma->use_global_lock, 0); + } else { + WRITE_ONCE(sma->use_global_lock, + sma->use_global_lock-1); + } +} + +#define SEM_GLOBAL_LOCK (-1) /* * If the request contains only one semaphore operation, and there are * no complex transactions pending, lock only the semaphore involved. * Otherwise, lock the entire semaphore array, since we either have * multiple semaphores in our own semops, or we need to look at * semaphores from other pending complex operations. - * - * Carefully guard against sma->complex_count changing between zero - * and non-zero while we are spinning for the lock. The value of - * sma->complex_count cannot change while we are holding the lock, - * so sem_unlock should be fine. - * - * The global lock path checks that all the local locks have been released, - * checking each local lock once. This means that the local lock paths - * cannot start their critical sections while the global lock is held. */ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, int nsops) { - int locknum; - again: - if (nsops == 1 && !sma->complex_count) { - struct sem *sem = sma->sem_base + sops->sem_num; + struct sem *sem; + int idx; - /* Lock just the semaphore we are interested in. */ - spin_lock(&sem->lock); + if (nsops != 1) { + /* Complex operation - acquire a full lock */ + ipc_lock_object(&sma->sem_perm); + /* Prevent parallel simple ops */ + complexmode_enter(sma); + return SEM_GLOBAL_LOCK; + } + + /* + * Only one semaphore affected - try to optimize locking. + * Optimized locking is possible if no complex operation + * is either enqueued or processed right now. + * + * Both facts are tracked by use_global_mode. + */ + idx = array_index_nospec(sops->sem_num, sma->sem_nsems); + sem = &sma->sems[idx]; + + /* + * Initial check for use_global_lock. Just an optimization, + * no locking, no memory barrier. + */ + if (!READ_ONCE(sma->use_global_lock)) { /* - * If sma->complex_count was set while we were spinning, - * we may need to look at things we did not lock here. + * It appears that no complex operation is around. + * Acquire the per-semaphore lock. */ - if (unlikely(sma->complex_count)) { - spin_unlock(&sem->lock); - goto lock_array; + spin_lock(&sem->lock); + + /* see SEM_BARRIER_1 for purpose/pairing */ + if (!smp_load_acquire(&sma->use_global_lock)) { + /* fast path successful! */ + return sops->sem_num; } + spin_unlock(&sem->lock); + } + + /* slow path: acquire the full lock */ + ipc_lock_object(&sma->sem_perm); + if (sma->use_global_lock == 0) { /* - * Another process is holding the global lock on the - * sem_array; we cannot enter our critical section, - * but have to wait for the global lock to be released. + * The use_global_lock mode ended while we waited for + * sma->sem_perm.lock. Thus we must switch to locking + * with sem->lock. + * Unlike in the fast path, there is no need to recheck + * sma->use_global_lock after we have acquired sem->lock: + * We own sma->sem_perm.lock, thus use_global_lock cannot + * change. */ - if (unlikely(spin_is_locked(&sma->sem_perm.lock))) { - spin_unlock(&sem->lock); - spin_unlock_wait(&sma->sem_perm.lock); - goto again; - } + spin_lock(&sem->lock); - locknum = sops->sem_num; + ipc_unlock_object(&sma->sem_perm); + return sops->sem_num; } else { - int i; /* - * Lock the semaphore array, and wait for all of the - * individual semaphore locks to go away. The code - * above ensures no new single-lock holders will enter - * their critical section while the array lock is held. + * Not a false alarm, thus continue to use the global lock + * mode. No need for complexmode_enter(), this was done by + * the caller that has set use_global_mode to non-zero. */ - lock_array: - ipc_lock_object(&sma->sem_perm); - for (i = 0; i < sma->sem_nsems; i++) { - struct sem *sem = sma->sem_base + i; - spin_unlock_wait(&sem->lock); - } - locknum = -1; + return SEM_GLOBAL_LOCK; } - return locknum; } static inline void sem_unlock(struct sem_array *sma, int locknum) { - if (locknum == -1) { + if (locknum == SEM_GLOBAL_LOCK) { unmerge_queues(sma); + complexmode_tryleave(sma); ipc_unlock_object(&sma->sem_perm); } else { - struct sem *sem = sma->sem_base + locknum; + struct sem *sem = &sma->sems[locknum]; spin_unlock(&sem->lock); } } /* - * sem_lock_(check_) routines are called in the paths where the rw_mutex + * sem_lock_(check_) routines are called in the paths where the rwsem * is not held. * * The caller holds the RCU read lock. */ -static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, - int id, struct sembuf *sops, int nsops, int *locknum) -{ - struct kern_ipc_perm *ipcp; - struct sem_array *sma; - - ipcp = ipc_obtain_object(&sem_ids(ns), id); - if (IS_ERR(ipcp)) - return ERR_CAST(ipcp); - - sma = container_of(ipcp, struct sem_array, sem_perm); - *locknum = sem_lock(sma, sops, nsops); - - /* ipc_rmid() may have already freed the ID while sem_lock - * was spinning: verify that the structure is still valid - */ - if (!ipcp->deleted) - return container_of(ipcp, struct sem_array, sem_perm); - - sem_unlock(sma, *locknum); - return ERR_PTR(-EINVAL); -} - static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id) { - struct kern_ipc_perm *ipcp = ipc_obtain_object(&sem_ids(ns), id); + struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&sem_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); @@ -374,12 +499,7 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns static inline void sem_lock_and_putref(struct sem_array *sma) { sem_lock(sma, NULL, -1); - ipc_rcu_putref(sma); -} - -static inline void sem_putref(struct sem_array *sma) -{ - ipc_rcu_putref(sma); + ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); } static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) @@ -387,54 +507,31 @@ static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) ipc_rmid(&sem_ids(ns), &s->sem_perm); } -/* - * Lockless wakeup algorithm: - * Without the check/retry algorithm a lockless wakeup is possible: - * - queue.status is initialized to -EINTR before blocking. - * - wakeup is performed by - * * unlinking the queue entry from the pending list - * * setting queue.status to IN_WAKEUP - * This is the notification for the blocked thread that a - * result value is imminent. - * * call wake_up_process - * * set queue.status to the final value. - * - the previously blocked thread checks queue.status: - * * if it's IN_WAKEUP, then it must wait until the value changes - * * if it's not -EINTR, then the operation was completed by - * update_queue. semtimedop can return queue.status without - * performing any operation on the sem array. - * * otherwise it must acquire the spinlock and check what's up. - * - * The two-stage algorithm is necessary to protect against the following - * races: - * - if queue.status is set after wake_up_process, then the woken up idle - * thread could race forward and try (and fail) to acquire sma->lock - * before update_queue had a chance to set queue.status - * - if queue.status is written before wake_up_process and if the - * blocked process is woken up by a signal between writing - * queue.status and the wake_up_process, then the woken up - * process could return from semtimedop and die by calling - * sys_exit before wake_up_process is called. Then wake_up_process - * will oops, because the task structure is already invalid. - * (yes, this happened on s390 with sysv msg). - * - */ -#define IN_WAKEUP 1 +static struct sem_array *sem_alloc(size_t nsems) +{ + struct sem_array *sma; + + if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0])) + return NULL; + + sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL_ACCOUNT); + if (unlikely(!sma)) + return NULL; + + return sma; +} /** * newary - Create a new semaphore set * @ns: namespace * @params: ptr to the structure that contains key, semflg and nsems * - * Called with sem_ids.rw_mutex held (as a writer) + * Called with sem_ids.rwsem held (as a writer) */ - static int newary(struct ipc_namespace *ns, struct ipc_params *params) { - int id; int retval; struct sem_array *sma; - int size; key_t key = params->key; int nsems = params->u.nsems; int semflg = params->flg; @@ -445,45 +542,42 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) if (ns->used_sems + nsems > ns->sc_semmns) return -ENOSPC; - size = sizeof (*sma) + nsems * sizeof (struct sem); - sma = ipc_rcu_alloc(size); - if (!sma) { + sma = sem_alloc(nsems); + if (!sma) return -ENOMEM; - } - memset (sma, 0, size); sma->sem_perm.mode = (semflg & S_IRWXUGO); sma->sem_perm.key = key; sma->sem_perm.security = NULL; - retval = security_sem_alloc(sma); + retval = security_sem_alloc(&sma->sem_perm); if (retval) { - ipc_rcu_putref(sma); + kvfree(sma); return retval; } - id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni); - if (id < 0) { - security_sem_free(sma); - ipc_rcu_putref(sma); - return id; - } - ns->used_sems += nsems; - - sma->sem_base = (struct sem *) &sma[1]; - for (i = 0; i < nsems; i++) { - INIT_LIST_HEAD(&sma->sem_base[i].pending_alter); - INIT_LIST_HEAD(&sma->sem_base[i].pending_const); - spin_lock_init(&sma->sem_base[i].lock); + INIT_LIST_HEAD(&sma->sems[i].pending_alter); + INIT_LIST_HEAD(&sma->sems[i].pending_const); + spin_lock_init(&sma->sems[i].lock); } sma->complex_count = 0; + sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS; INIT_LIST_HEAD(&sma->pending_alter); INIT_LIST_HEAD(&sma->pending_const); INIT_LIST_HEAD(&sma->list_id); sma->sem_nsems = nsems; - sma->sem_ctime = get_seconds(); + sma->sem_ctime = ktime_get_real_seconds(); + + /* ipc_addid() locks sma upon success. */ + retval = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni); + if (retval < 0) { + ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); + return retval; + } + ns->used_sems += nsems; + sem_unlock(sma, -1); rcu_read_unlock(); @@ -492,21 +586,9 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) /* - * Called with sem_ids.rw_mutex and ipcp locked. - */ -static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg) -{ - struct sem_array *sma; - - sma = container_of(ipcp, struct sem_array, sem_perm); - return security_sem_associate(sma, semflg); -} - -/* - * Called with sem_ids.rw_mutex and ipcp locked. + * Called with sem_ids.rwsem and ipcp locked. */ -static inline int sem_more_checks(struct kern_ipc_perm *ipcp, - struct ipc_params *params) +static int sem_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params) { struct sem_array *sma; @@ -517,10 +599,14 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp, return 0; } -SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg) +long ksys_semget(key_t key, int nsems, int semflg) { struct ipc_namespace *ns; - struct ipc_ops sem_ops; + static const struct ipc_ops sem_ops = { + .getnew = newary, + .associate = security_sem_associate, + .more_checks = sem_more_checks, + }; struct ipc_params sem_params; ns = current->nsproxy->ipc_ns; @@ -528,10 +614,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg) if (nsems < 0 || nsems > ns->sc_semmsl) return -EINVAL; - sem_ops.getnew = newary; - sem_ops.associate = sem_security; - sem_ops.more_checks = sem_more_checks; - sem_params.key = key; sem_params.flg = semflg; sem_params.u.nsems = nsems; @@ -539,30 +621,47 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg) return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params); } -/** perform_atomic_semop - Perform (if possible) a semaphore operation +SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg) +{ + return ksys_semget(key, nsems, semflg); +} + +/** + * perform_atomic_semop[_slow] - Attempt to perform semaphore + * operations on a given array. * @sma: semaphore array - * @sops: array with operations that should be checked - * @nsems: number of sops - * @un: undo array - * @pid: pid that did the change + * @q: struct sem_queue that describes the operation + * + * Caller blocking are as follows, based the value + * indicated by the semaphore operation (sem_op): + * + * (1) >0 never blocks. + * (2) 0 (wait-for-zero operation): semval is non-zero. + * (3) <0 attempting to decrement semval to a value smaller than zero. * * Returns 0 if the operation was possible. * Returns 1 if the operation is impossible, the caller must sleep. - * Negative values are error codes. + * Returns <0 for error codes. */ - -static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops, - int nsops, struct sem_undo *un, int pid) +static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q) { - int result, sem_op; + int result, sem_op, nsops; + struct pid *pid; struct sembuf *sop; - struct sem * curr; + struct sem *curr; + struct sembuf *sops; + struct sem_undo *un; + + sops = q->sops; + nsops = q->nsops; + un = q->undo; for (sop = sops; sop < sops + nsops; sop++) { - curr = sma->sem_base + sop->sem_num; + int idx = array_index_nospec(sop->sem_num, sma->sem_nsems); + curr = &sma->sems[idx]; sem_op = sop->sem_op; result = curr->semval; - + if (!sem_op && result) goto would_block; @@ -571,25 +670,25 @@ static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops, goto would_block; if (result > SEMVMX) goto out_of_range; + if (sop->sem_flg & SEM_UNDO) { int undo = un->semadj[sop->sem_num] - sem_op; - /* - * Exceeding the undo range is an error. - */ + /* Exceeding the undo range is an error. */ if (undo < (-SEMAEM - 1) || undo > SEMAEM) goto out_of_range; + un->semadj[sop->sem_num] = undo; } + curr->semval = result; } sop--; + pid = q->pid; while (sop >= sops) { - sma->sem_base[sop->sem_num].sempid = pid; - if (sop->sem_flg & SEM_UNDO) - un->semadj[sop->sem_num] -= sop->sem_op; + ipc_update_pid(&sma->sems[sop->sem_num].sempid, pid); sop--; } - + return 0; out_of_range: @@ -597,6 +696,8 @@ out_of_range: goto undo; would_block: + q->blocking = sop; + if (sop->sem_flg & IPC_NOWAIT) result = -EAGAIN; else @@ -605,58 +706,94 @@ would_block: undo: sop--; while (sop >= sops) { - sma->sem_base[sop->sem_num].semval -= sop->sem_op; + sem_op = sop->sem_op; + sma->sems[sop->sem_num].semval -= sem_op; + if (sop->sem_flg & SEM_UNDO) + un->semadj[sop->sem_num] += sem_op; sop--; } return result; } -/** wake_up_sem_queue_prepare(q, error): Prepare wake-up - * @q: queue entry that must be signaled - * @error: Error value for the signal - * - * Prepare the wake-up of the queue entry q. - */ -static void wake_up_sem_queue_prepare(struct list_head *pt, - struct sem_queue *q, int error) +static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q) { - if (list_empty(pt)) { - /* - * Hold preempt off so that we don't get preempted and have the - * wakee busy-wait until we're scheduled back on. - */ - preempt_disable(); + int result, sem_op, nsops; + struct sembuf *sop; + struct sem *curr; + struct sembuf *sops; + struct sem_undo *un; + + sops = q->sops; + nsops = q->nsops; + un = q->undo; + + if (unlikely(q->dupsop)) + return perform_atomic_semop_slow(sma, q); + + /* + * We scan the semaphore set twice, first to ensure that the entire + * operation can succeed, therefore avoiding any pointless writes + * to shared memory and having to undo such changes in order to block + * until the operations can go through. + */ + for (sop = sops; sop < sops + nsops; sop++) { + int idx = array_index_nospec(sop->sem_num, sma->sem_nsems); + + curr = &sma->sems[idx]; + sem_op = sop->sem_op; + result = curr->semval; + + if (!sem_op && result) + goto would_block; /* wait-for-zero */ + + result += sem_op; + if (result < 0) + goto would_block; + + if (result > SEMVMX) + return -ERANGE; + + if (sop->sem_flg & SEM_UNDO) { + int undo = un->semadj[sop->sem_num] - sem_op; + + /* Exceeding the undo range is an error. */ + if (undo < (-SEMAEM - 1) || undo > SEMAEM) + return -ERANGE; + } } - q->status = IN_WAKEUP; - q->pid = error; - list_add_tail(&q->list, pt); + for (sop = sops; sop < sops + nsops; sop++) { + curr = &sma->sems[sop->sem_num]; + sem_op = sop->sem_op; + + if (sop->sem_flg & SEM_UNDO) { + int undo = un->semadj[sop->sem_num] - sem_op; + + un->semadj[sop->sem_num] = undo; + } + curr->semval += sem_op; + ipc_update_pid(&curr->sempid, q->pid); + } + + return 0; + +would_block: + q->blocking = sop; + return sop->sem_flg & IPC_NOWAIT ? -EAGAIN : 1; } -/** - * wake_up_sem_queue_do(pt) - do the actual wake-up - * @pt: list of tasks to be woken up - * - * Do the actual wake-up. - * The function is called without any locks held, thus the semaphore array - * could be destroyed already and the tasks can disappear as soon as the - * status is set to the actual return code. - */ -static void wake_up_sem_queue_do(struct list_head *pt) +static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error, + struct wake_q_head *wake_q) { - struct sem_queue *q, *t; - int did_something; + struct task_struct *sleeper; - did_something = !list_empty(pt); - list_for_each_entry_safe(q, t, pt, list) { - wake_up_process(q->sleeper); - /* q can disappear immediately after writing q->status. */ - smp_wmb(); - q->status = q->pid; - } - if (did_something) - preempt_enable(); + sleeper = get_task_struct(q->sleeper); + + /* see SEM_BARRIER_2 for purpose/pairing */ + smp_store_release(&q->status, error); + + wake_q_add_safe(wake_q, sleeper); } static void unlink_queue(struct sem_array *sma, struct sem_queue *q) @@ -676,7 +813,7 @@ static void unlink_queue(struct sem_array *sma, struct sem_queue *q) * modified the array. * Note that wait-for-zero operations are handled without restart. */ -static int check_restart(struct sem_array *sma, struct sem_queue *q) +static inline int check_restart(struct sem_array *sma, struct sem_queue *q) { /* pending complex alter operations are too difficult to analyse */ if (!list_empty(&sma->pending_alter)) @@ -688,7 +825,7 @@ static int check_restart(struct sem_array *sma, struct sem_queue *q) /* It is impossible that someone waits for the new value: * - complex operations always restart. - * - wait-for-zero are handled seperately. + * - wait-for-zero are handled separately. * - q is a previously sleeping simple operation that * altered the array. It must be a decrement, because * simple increments never sleep. @@ -701,69 +838,60 @@ static int check_restart(struct sem_array *sma, struct sem_queue *q) } /** - * wake_const_ops(sma, semnum, pt) - Wake up non-alter tasks + * wake_const_ops - wake up non-alter tasks * @sma: semaphore array. * @semnum: semaphore that was modified. - * @pt: list head for the tasks that must be woken up. + * @wake_q: lockless wake-queue head. * * wake_const_ops must be called after a semaphore in a semaphore array * was set to 0. If complex const operations are pending, wake_const_ops must * be called with semnum = -1, as well as with the number of each modified * semaphore. - * The tasks that must be woken up are added to @pt. The return code + * The tasks that must be woken up are added to @wake_q. The return code * is stored in q->pid. * The function returns 1 if at least one operation was completed successfully. */ static int wake_const_ops(struct sem_array *sma, int semnum, - struct list_head *pt) + struct wake_q_head *wake_q) { - struct sem_queue *q; - struct list_head *walk; + struct sem_queue *q, *tmp; struct list_head *pending_list; int semop_completed = 0; if (semnum == -1) pending_list = &sma->pending_const; else - pending_list = &sma->sem_base[semnum].pending_const; - - walk = pending_list->next; - while (walk != pending_list) { - int error; + pending_list = &sma->sems[semnum].pending_const; - q = container_of(walk, struct sem_queue, list); - walk = walk->next; + list_for_each_entry_safe(q, tmp, pending_list, list) { + int error = perform_atomic_semop(sma, q); - error = perform_atomic_semop(sma, q->sops, q->nsops, - q->undo, q->pid); - - if (error <= 0) { - /* operation completed, remove from queue & wakeup */ - - unlink_queue(sma, q); + if (error > 0) + continue; + /* operation completed, remove from queue & wakeup */ + unlink_queue(sma, q); - wake_up_sem_queue_prepare(pt, q, error); - if (error == 0) - semop_completed = 1; - } + wake_up_sem_queue_prepare(q, error, wake_q); + if (error == 0) + semop_completed = 1; } + return semop_completed; } /** - * do_smart_wakeup_zero(sma, sops, nsops, pt) - wakeup all wait for zero tasks + * do_smart_wakeup_zero - wakeup all wait for zero tasks * @sma: semaphore array * @sops: operations that were performed * @nsops: number of operations - * @pt: list head of the tasks that must be woken up. + * @wake_q: lockless wake-queue head * - * do_smart_wakeup_zero() checks all required queue for wait-for-zero - * operations, based on the actual changes that were performed on the - * semaphore array. + * Checks all required queue for wait-for-zero operations, based + * on the actual changes that were performed on the semaphore array. * The function returns 1 if at least one operation was completed successfully. */ static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops, - int nsops, struct list_head *pt) + int nsops, struct wake_q_head *wake_q) { int i; int semop_completed = 0; @@ -774,9 +902,9 @@ static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops, for (i = 0; i < nsops; i++) { int num = sops[i].sem_num; - if (sma->sem_base[num].semval == 0) { + if (sma->sems[num].semval == 0) { got_zero = 1; - semop_completed |= wake_const_ops(sma, num, pt); + semop_completed |= wake_const_ops(sma, num, wake_q); } } } else { @@ -785,9 +913,9 @@ static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops, * Assume all were changed. */ for (i = 0; i < sma->sem_nsems; i++) { - if (sma->sem_base[i].semval == 0) { + if (sma->sems[i].semval == 0) { got_zero = 1; - semop_completed |= wake_const_ops(sma, i, pt); + semop_completed |= wake_const_ops(sma, i, wake_q); } } } @@ -796,48 +924,43 @@ static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops, * then check the global queue, too. */ if (got_zero) - semop_completed |= wake_const_ops(sma, -1, pt); + semop_completed |= wake_const_ops(sma, -1, wake_q); return semop_completed; } /** - * update_queue(sma, semnum): Look for tasks that can be completed. + * update_queue - look for tasks that can be completed. * @sma: semaphore array. * @semnum: semaphore that was modified. - * @pt: list head for the tasks that must be woken up. + * @wake_q: lockless wake-queue head. * * update_queue must be called after a semaphore in a semaphore array * was modified. If multiple semaphores were modified, update_queue must * be called with semnum = -1, as well as with the number of each modified * semaphore. - * The tasks that must be woken up are added to @pt. The return code + * The tasks that must be woken up are added to @wake_q. The return code * is stored in q->pid. * The function internally checks if const operations can now succeed. * * The function return 1 if at least one semop was completed successfully. */ -static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt) +static int update_queue(struct sem_array *sma, int semnum, struct wake_q_head *wake_q) { - struct sem_queue *q; - struct list_head *walk; + struct sem_queue *q, *tmp; struct list_head *pending_list; int semop_completed = 0; if (semnum == -1) pending_list = &sma->pending_alter; else - pending_list = &sma->sem_base[semnum].pending_alter; + pending_list = &sma->sems[semnum].pending_alter; again: - walk = pending_list->next; - while (walk != pending_list) { + list_for_each_entry_safe(q, tmp, pending_list, list) { int error, restart; - q = container_of(walk, struct sem_queue, list); - walk = walk->next; - /* If we are scanning the single sop, per-semaphore list of * one semaphore and that semaphore is 0, then it is not * necessary to scan further: simple increments @@ -845,11 +968,10 @@ again: * be in the per semaphore pending queue, and decrements * cannot be successful if the value is already 0. */ - if (semnum != -1 && sma->sem_base[semnum].semval == 0) + if (semnum != -1 && sma->sems[semnum].semval == 0) break; - error = perform_atomic_semop(sma, q->sops, q->nsops, - q->undo, q->pid); + error = perform_atomic_semop(sma, q); /* Does q->sleeper still need to sleep? */ if (error > 0) @@ -861,11 +983,11 @@ again: restart = 0; } else { semop_completed = 1; - do_smart_wakeup_zero(sma, q->sops, q->nsops, pt); + do_smart_wakeup_zero(sma, q->sops, q->nsops, wake_q); restart = check_restart(sma, q); } - wake_up_sem_queue_prepare(pt, q, error); + wake_up_sem_queue_prepare(q, error, wake_q); if (restart) goto again; } @@ -873,29 +995,47 @@ again: } /** - * do_smart_update(sma, sops, nsops, otime, pt) - optimized update_queue + * set_semotime - set sem_otime + * @sma: semaphore array + * @sops: operations that modified the array, may be NULL + * + * sem_otime is replicated to avoid cache line trashing. + * This function sets one instance to the current time. + */ +static void set_semotime(struct sem_array *sma, struct sembuf *sops) +{ + if (sops == NULL) { + sma->sems[0].sem_otime = ktime_get_real_seconds(); + } else { + sma->sems[sops[0].sem_num].sem_otime = + ktime_get_real_seconds(); + } +} + +/** + * do_smart_update - optimized update_queue * @sma: semaphore array * @sops: operations that were performed * @nsops: number of operations * @otime: force setting otime - * @pt: list head of the tasks that must be woken up. + * @wake_q: lockless wake-queue head * * do_smart_update() does the required calls to update_queue and wakeup_zero, * based on the actual changes that were performed on the semaphore array. * Note that the function does not do the actual wake-up: the caller is - * responsible for calling wake_up_sem_queue_do(@pt). + * responsible for calling wake_up_q(). * It is safe to perform this call after dropping all locks. */ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops, - int otime, struct list_head *pt) + int otime, struct wake_q_head *wake_q) { int i; - otime |= do_smart_wakeup_zero(sma, sops, nsops, pt); + otime |= do_smart_wakeup_zero(sma, sops, nsops, wake_q); if (!list_empty(&sma->pending_alter)) { /* semaphore array uses the global queue - just process it. */ - otime |= update_queue(sma, -1, pt); + otime |= update_queue(sma, -1, wake_q); } else { if (!sops) { /* @@ -903,99 +1043,101 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop * known. Check all. */ for (i = 0; i < sma->sem_nsems; i++) - otime |= update_queue(sma, i, pt); + otime |= update_queue(sma, i, wake_q); } else { /* * Check the semaphores that were increased: * - No complex ops, thus all sleeping ops are * decrease. * - if we decreased the value, then any sleeping - * semaphore ops wont be able to run: If the + * semaphore ops won't be able to run: If the * previous value was too small, then the new * value will be too small, too. */ for (i = 0; i < nsops; i++) { if (sops[i].sem_op > 0) { otime |= update_queue(sma, - sops[i].sem_num, pt); + sops[i].sem_num, wake_q); } } } } - if (otime) { - if (sops == NULL) { - sma->sem_base[0].sem_otime = get_seconds(); - } else { - sma->sem_base[sops[0].sem_num].sem_otime = - get_seconds(); - } - } + if (otime) + set_semotime(sma, sops); } +/* + * check_qop: Test if a queued operation sleeps on the semaphore semnum + */ +static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q, + bool count_zero) +{ + struct sembuf *sop = q->blocking; + + /* + * Linux always (since 0.99.10) reported a task as sleeping on all + * semaphores. This violates SUS, therefore it was changed to the + * standard compliant behavior. + * Give the administrators a chance to notice that an application + * might misbehave because it relies on the Linux behavior. + */ + pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n" + "The task %s (%d) triggered the difference, watch for misbehavior.\n", + current->comm, task_pid_nr(current)); + + if (sop->sem_num != semnum) + return 0; + + if (count_zero && sop->sem_op == 0) + return 1; + if (!count_zero && sop->sem_op < 0) + return 1; + + return 0; +} /* The following counts are associated to each semaphore: * semncnt number of tasks waiting on semval being nonzero * semzcnt number of tasks waiting on semval being zero - * This model assumes that a task waits on exactly one semaphore. - * Since semaphore operations are to be performed atomically, tasks actually - * wait on a whole sequence of semaphores simultaneously. - * The counts we return here are a rough approximation, but still - * warrant that semncnt+semzcnt>0 if the task is on the pending queue. + * + * Per definition, a task waits only on the semaphore of the first semop + * that cannot proceed, even if additional operation would block, too. */ -static int count_semncnt (struct sem_array * sma, ushort semnum) +static int count_semcnt(struct sem_array *sma, ushort semnum, + bool count_zero) { - int semncnt; - struct sem_queue * q; + struct list_head *l; + struct sem_queue *q; + int semcnt; - semncnt = 0; - list_for_each_entry(q, &sma->sem_base[semnum].pending_alter, list) { - struct sembuf * sops = q->sops; - BUG_ON(sops->sem_num != semnum); - if ((sops->sem_op < 0) && !(sops->sem_flg & IPC_NOWAIT)) - semncnt++; - } + semcnt = 0; + /* First: check the simple operations. They are easy to evaluate */ + if (count_zero) + l = &sma->sems[semnum].pending_const; + else + l = &sma->sems[semnum].pending_alter; - list_for_each_entry(q, &sma->pending_alter, list) { - struct sembuf * sops = q->sops; - int nsops = q->nsops; - int i; - for (i = 0; i < nsops; i++) - if (sops[i].sem_num == semnum - && (sops[i].sem_op < 0) - && !(sops[i].sem_flg & IPC_NOWAIT)) - semncnt++; + list_for_each_entry(q, l, list) { + /* all task on a per-semaphore list sleep on exactly + * that semaphore + */ + semcnt++; } - return semncnt; -} - -static int count_semzcnt (struct sem_array * sma, ushort semnum) -{ - int semzcnt; - struct sem_queue * q; - semzcnt = 0; - list_for_each_entry(q, &sma->sem_base[semnum].pending_const, list) { - struct sembuf * sops = q->sops; - BUG_ON(sops->sem_num != semnum); - if ((sops->sem_op == 0) && !(sops->sem_flg & IPC_NOWAIT)) - semzcnt++; + /* Then: check the complex operations. */ + list_for_each_entry(q, &sma->pending_alter, list) { + semcnt += check_qop(sma, semnum, q, count_zero); } - - list_for_each_entry(q, &sma->pending_const, list) { - struct sembuf * sops = q->sops; - int nsops = q->nsops; - int i; - for (i = 0; i < nsops; i++) - if (sops[i].sem_num == semnum - && (sops[i].sem_op == 0) - && !(sops[i].sem_flg & IPC_NOWAIT)) - semzcnt++; + if (count_zero) { + list_for_each_entry(q, &sma->pending_const, list) { + semcnt += check_qop(sma, semnum, q, count_zero); + } } - return semzcnt; + return semcnt; } -/* Free a semaphore set. freeary() is called with sem_ids.rw_mutex locked - * as a writer and the spinlock for this semaphore set hold. sem_ids.rw_mutex +/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked + * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem * remains locked on exit. */ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) @@ -1003,8 +1145,8 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) struct sem_undo *un, *tu; struct sem_queue *q, *tq; struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm); - struct list_head tasks; int i; + DEFINE_WAKE_Q(wake_q); /* Free the existing undo structures for this semaphore set. */ ipc_assert_locked_object(&sma->sem_perm); @@ -1014,30 +1156,30 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) un->semid = -1; list_del_rcu(&un->list_proc); spin_unlock(&un->ulp->lock); - kfree_rcu(un, rcu); + kvfree_rcu(un, rcu); } /* Wake up all pending processes and let them fail with EIDRM. */ - INIT_LIST_HEAD(&tasks); list_for_each_entry_safe(q, tq, &sma->pending_const, list) { unlink_queue(sma, q); - wake_up_sem_queue_prepare(&tasks, q, -EIDRM); + wake_up_sem_queue_prepare(q, -EIDRM, &wake_q); } list_for_each_entry_safe(q, tq, &sma->pending_alter, list) { unlink_queue(sma, q); - wake_up_sem_queue_prepare(&tasks, q, -EIDRM); + wake_up_sem_queue_prepare(q, -EIDRM, &wake_q); } for (i = 0; i < sma->sem_nsems; i++) { - struct sem *sem = sma->sem_base + i; + struct sem *sem = &sma->sems[i]; list_for_each_entry_safe(q, tq, &sem->pending_const, list) { unlink_queue(sma, q); - wake_up_sem_queue_prepare(&tasks, q, -EIDRM); + wake_up_sem_queue_prepare(q, -EIDRM, &wake_q); } list_for_each_entry_safe(q, tq, &sem->pending_alter, list) { unlink_queue(sma, q); - wake_up_sem_queue_prepare(&tasks, q, -EIDRM); + wake_up_sem_queue_prepare(q, -EIDRM, &wake_q); } + ipc_update_pid(&sem->sempid, NULL); } /* Remove the semaphore set from the IDR */ @@ -1045,15 +1187,14 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) sem_unlock(sma, -1); rcu_read_unlock(); - wake_up_sem_queue_do(&tasks); + wake_up_q(&wake_q); ns->used_sems -= sma->sem_nsems; - security_sem_free(sma); - ipc_rcu_putref(sma); + ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); } static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version) { - switch(version) { + switch (version) { case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: @@ -1075,14 +1216,14 @@ static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, } } -static time_t get_semotime(struct sem_array *sma) +static time64_t get_semotime(struct sem_array *sma) { int i; - time_t res; + time64_t res; - res = sma->sem_base[0].sem_otime; + res = sma->sems[0].sem_otime; for (i = 1; i < sma->sem_nsems; i++) { - time_t to = sma->sem_base[i].sem_otime; + time64_t to = sma->sems[i].sem_otime; if (to > res) res = to; @@ -1090,117 +1231,127 @@ static time_t get_semotime(struct sem_array *sma) return res; } -static int semctl_nolock(struct ipc_namespace *ns, int semid, - int cmd, int version, void __user *p) +static int semctl_stat(struct ipc_namespace *ns, int semid, + int cmd, struct semid64_ds *semid64) { - int err; struct sem_array *sma; + time64_t semotime; + int err; - switch(cmd) { - case IPC_INFO: - case SEM_INFO: - { - struct seminfo seminfo; - int max_id; + memset(semid64, 0, sizeof(*semid64)); - err = security_sem_semctl(NULL, cmd); - if (err) - return err; - - memset(&seminfo,0,sizeof(seminfo)); - seminfo.semmni = ns->sc_semmni; - seminfo.semmns = ns->sc_semmns; - seminfo.semmsl = ns->sc_semmsl; - seminfo.semopm = ns->sc_semopm; - seminfo.semvmx = SEMVMX; - seminfo.semmnu = SEMMNU; - seminfo.semmap = SEMMAP; - seminfo.semume = SEMUME; - down_read(&sem_ids(ns).rw_mutex); - if (cmd == SEM_INFO) { - seminfo.semusz = sem_ids(ns).in_use; - seminfo.semaem = ns->used_sems; - } else { - seminfo.semusz = SEMUSZ; - seminfo.semaem = SEMAEM; + rcu_read_lock(); + if (cmd == SEM_STAT || cmd == SEM_STAT_ANY) { + sma = sem_obtain_object(ns, semid); + if (IS_ERR(sma)) { + err = PTR_ERR(sma); + goto out_unlock; } - max_id = ipc_get_maxid(&sem_ids(ns)); - up_read(&sem_ids(ns).rw_mutex); - if (copy_to_user(p, &seminfo, sizeof(struct seminfo))) - return -EFAULT; - return (max_id < 0) ? 0: max_id; - } - case IPC_STAT: - case SEM_STAT: - { - struct semid64_ds tbuf; - int id = 0; - - memset(&tbuf, 0, sizeof(tbuf)); - - rcu_read_lock(); - if (cmd == SEM_STAT) { - sma = sem_obtain_object(ns, semid); - if (IS_ERR(sma)) { - err = PTR_ERR(sma); - goto out_unlock; - } - id = sma->sem_perm.id; - } else { - sma = sem_obtain_object_check(ns, semid); - if (IS_ERR(sma)) { - err = PTR_ERR(sma); - goto out_unlock; - } + } else { /* IPC_STAT */ + sma = sem_obtain_object_check(ns, semid); + if (IS_ERR(sma)) { + err = PTR_ERR(sma); + goto out_unlock; } + } + /* see comment for SHM_STAT_ANY */ + if (cmd == SEM_STAT_ANY) + audit_ipc_obj(&sma->sem_perm); + else { err = -EACCES; if (ipcperms(ns, &sma->sem_perm, S_IRUGO)) goto out_unlock; + } - err = security_sem_semctl(sma, cmd); - if (err) - goto out_unlock; + err = security_sem_semctl(&sma->sem_perm, cmd); + if (err) + goto out_unlock; - kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm); - tbuf.sem_otime = get_semotime(sma); - tbuf.sem_ctime = sma->sem_ctime; - tbuf.sem_nsems = sma->sem_nsems; - rcu_read_unlock(); - if (copy_semid_to_user(p, &tbuf, version)) - return -EFAULT; - return id; + ipc_lock_object(&sma->sem_perm); + + if (!ipc_valid_object(&sma->sem_perm)) { + ipc_unlock_object(&sma->sem_perm); + err = -EIDRM; + goto out_unlock; } - default: - return -EINVAL; + + kernel_to_ipc64_perm(&sma->sem_perm, &semid64->sem_perm); + semotime = get_semotime(sma); + semid64->sem_otime = semotime; + semid64->sem_ctime = sma->sem_ctime; +#ifndef CONFIG_64BIT + semid64->sem_otime_high = semotime >> 32; + semid64->sem_ctime_high = sma->sem_ctime >> 32; +#endif + semid64->sem_nsems = sma->sem_nsems; + + if (cmd == IPC_STAT) { + /* + * As defined in SUS: + * Return 0 on success + */ + err = 0; + } else { + /* + * SEM_STAT and SEM_STAT_ANY (both Linux specific) + * Return the full id, including the sequence number + */ + err = sma->sem_perm.id; } + ipc_unlock_object(&sma->sem_perm); out_unlock: rcu_read_unlock(); return err; } +static int semctl_info(struct ipc_namespace *ns, int semid, + int cmd, void __user *p) +{ + struct seminfo seminfo; + int max_idx; + int err; + + err = security_sem_semctl(NULL, cmd); + if (err) + return err; + + memset(&seminfo, 0, sizeof(seminfo)); + seminfo.semmni = ns->sc_semmni; + seminfo.semmns = ns->sc_semmns; + seminfo.semmsl = ns->sc_semmsl; + seminfo.semopm = ns->sc_semopm; + seminfo.semvmx = SEMVMX; + seminfo.semmnu = SEMMNU; + seminfo.semmap = SEMMAP; + seminfo.semume = SEMUME; + down_read(&sem_ids(ns).rwsem); + if (cmd == SEM_INFO) { + seminfo.semusz = sem_ids(ns).in_use; + seminfo.semaem = ns->used_sems; + } else { + seminfo.semusz = SEMUSZ; + seminfo.semaem = SEMAEM; + } + max_idx = ipc_get_maxidx(&sem_ids(ns)); + up_read(&sem_ids(ns).rwsem); + if (copy_to_user(p, &seminfo, sizeof(struct seminfo))) + return -EFAULT; + return (max_idx < 0) ? 0 : max_idx; +} + static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum, - unsigned long arg) + int val) { struct sem_undo *un; struct sem_array *sma; - struct sem* curr; + struct sem *curr; int err; - struct list_head tasks; - int val; -#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN) - /* big-endian 64bit */ - val = arg >> 32; -#else - /* 32bit or little-endian 64bit */ - val = arg; -#endif + DEFINE_WAKE_Q(wake_q); if (val > SEMVMX || val < 0) return -ERANGE; - INIT_LIST_HEAD(&tasks); - rcu_read_lock(); sma = sem_obtain_object_check(ns, semid); if (IS_ERR(sma)) { @@ -1219,7 +1370,7 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum, return -EACCES; } - err = security_sem_semctl(sma, SETVAL); + err = security_sem_semctl(&sma->sem_perm, SETVAL); if (err) { rcu_read_unlock(); return -EACCES; @@ -1227,20 +1378,27 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum, sem_lock(sma, NULL, -1); - curr = &sma->sem_base[semnum]; + if (!ipc_valid_object(&sma->sem_perm)) { + sem_unlock(sma, -1); + rcu_read_unlock(); + return -EIDRM; + } + + semnum = array_index_nospec(semnum, sma->sem_nsems); + curr = &sma->sems[semnum]; ipc_assert_locked_object(&sma->sem_perm); list_for_each_entry(un, &sma->list_id, list_id) un->semadj[semnum] = 0; curr->semval = val; - curr->sempid = task_tgid_vnr(current); - sma->sem_ctime = get_seconds(); + ipc_update_pid(&curr->sempid, task_tgid(current)); + sma->sem_ctime = ktime_get_real_seconds(); /* maybe some queued-up processes were waiting for this */ - do_smart_update(sma, NULL, 0, 0, &tasks); + do_smart_update(sma, NULL, 0, 0, &wake_q); sem_unlock(sma, -1); rcu_read_unlock(); - wake_up_sem_queue_do(&tasks); + wake_up_q(&wake_q); return 0; } @@ -1248,13 +1406,11 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, int cmd, void __user *p) { struct sem_array *sma; - struct sem* curr; + struct sem *curr; int err, nsems; ushort fast_sem_io[SEMMSL_FAST]; - ushort* sem_io = fast_sem_io; - struct list_head tasks; - - INIT_LIST_HEAD(&tasks); + ushort *sem_io = fast_sem_io; + DEFINE_WAKE_Q(wake_q); rcu_read_lock(); sma = sem_obtain_object_check(ns, semid); @@ -1269,11 +1425,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO)) goto out_rcu_wakeup; - err = security_sem_semctl(sma, cmd); + err = security_sem_semctl(&sma->sem_perm, cmd); if (err) goto out_rcu_wakeup; - err = -EACCES; switch (cmd) { case GETALL: { @@ -1281,36 +1436,37 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, int i; sem_lock(sma, NULL, -1); - if(nsems > SEMMSL_FAST) { - if (!ipc_rcu_getref(sma)) { - sem_unlock(sma, -1); - rcu_read_unlock(); + if (!ipc_valid_object(&sma->sem_perm)) { + err = -EIDRM; + goto out_unlock; + } + if (nsems > SEMMSL_FAST) { + if (!ipc_rcu_getref(&sma->sem_perm)) { err = -EIDRM; - goto out_free; + goto out_unlock; } sem_unlock(sma, -1); rcu_read_unlock(); - sem_io = ipc_alloc(sizeof(ushort)*nsems); - if(sem_io == NULL) { - sem_putref(sma); + sem_io = kvmalloc_array(nsems, sizeof(ushort), + GFP_KERNEL); + if (sem_io == NULL) { + ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); return -ENOMEM; } rcu_read_lock(); sem_lock_and_putref(sma); - if (sma->sem_perm.deleted) { - sem_unlock(sma, -1); - rcu_read_unlock(); + if (!ipc_valid_object(&sma->sem_perm)) { err = -EIDRM; - goto out_free; + goto out_unlock; } } for (i = 0; i < sma->sem_nsems; i++) - sem_io[i] = sma->sem_base[i].semval; + sem_io[i] = sma->sems[i].semval; sem_unlock(sma, -1); rcu_read_unlock(); err = 0; - if(copy_to_user(array, sem_io, nsems*sizeof(ushort))) + if (copy_to_user(array, sem_io, nsems*sizeof(ushort))) err = -EFAULT; goto out_free; } @@ -1319,53 +1475,54 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, int i; struct sem_undo *un; - if (!ipc_rcu_getref(sma)) { - rcu_read_unlock(); - return -EIDRM; + if (!ipc_rcu_getref(&sma->sem_perm)) { + err = -EIDRM; + goto out_rcu_wakeup; } rcu_read_unlock(); - if(nsems > SEMMSL_FAST) { - sem_io = ipc_alloc(sizeof(ushort)*nsems); - if(sem_io == NULL) { - sem_putref(sma); + if (nsems > SEMMSL_FAST) { + sem_io = kvmalloc_array(nsems, sizeof(ushort), + GFP_KERNEL); + if (sem_io == NULL) { + ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); return -ENOMEM; } } - if (copy_from_user (sem_io, p, nsems*sizeof(ushort))) { - sem_putref(sma); + if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) { + ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); err = -EFAULT; goto out_free; } for (i = 0; i < nsems; i++) { if (sem_io[i] > SEMVMX) { - sem_putref(sma); + ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); err = -ERANGE; goto out_free; } } rcu_read_lock(); sem_lock_and_putref(sma); - if (sma->sem_perm.deleted) { - sem_unlock(sma, -1); - rcu_read_unlock(); + if (!ipc_valid_object(&sma->sem_perm)) { err = -EIDRM; - goto out_free; + goto out_unlock; } - for (i = 0; i < nsems; i++) - sma->sem_base[i].semval = sem_io[i]; + for (i = 0; i < nsems; i++) { + sma->sems[i].semval = sem_io[i]; + ipc_update_pid(&sma->sems[i].sempid, task_tgid(current)); + } ipc_assert_locked_object(&sma->sem_perm); list_for_each_entry(un, &sma->list_id, list_id) { for (i = 0; i < nsems; i++) un->semadj[i] = 0; } - sma->sem_ctime = get_seconds(); + sma->sem_ctime = ktime_get_real_seconds(); /* maybe some queued-up processes were waiting for this */ - do_smart_update(sma, NULL, 0, 0, &tasks); + do_smart_update(sma, NULL, 0, 0, &wake_q); err = 0; goto out_unlock; } @@ -1376,20 +1533,26 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, goto out_rcu_wakeup; sem_lock(sma, NULL, -1); - curr = &sma->sem_base[semnum]; + if (!ipc_valid_object(&sma->sem_perm)) { + err = -EIDRM; + goto out_unlock; + } + + semnum = array_index_nospec(semnum, nsems); + curr = &sma->sems[semnum]; switch (cmd) { case GETVAL: err = curr->semval; goto out_unlock; case GETPID: - err = curr->sempid; + err = pid_vnr(curr->sempid); goto out_unlock; case GETNCNT: - err = count_semncnt(sma,semnum); + err = count_semcnt(sma, semnum, 0); goto out_unlock; case GETZCNT: - err = count_semzcnt(sma,semnum); + err = count_semcnt(sma, semnum, 1); goto out_unlock; } @@ -1397,17 +1560,17 @@ out_unlock: sem_unlock(sma, -1); out_rcu_wakeup: rcu_read_unlock(); - wake_up_sem_queue_do(&tasks); + wake_up_q(&wake_q); out_free: - if(sem_io != fast_sem_io) - ipc_free(sem_io, sizeof(ushort)*nsems); + if (sem_io != fast_sem_io) + kvfree(sem_io); return err; } static inline unsigned long copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version) { - switch(version) { + switch (version) { case IPC_64: if (copy_from_user(out, buf, sizeof(*out))) return -EFAULT; @@ -1416,7 +1579,7 @@ copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version) { struct semid_ds tbuf_old; - if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) + if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) return -EFAULT; out->sem_perm.uid = tbuf_old.sem_perm.uid; @@ -1431,28 +1594,22 @@ copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version) } /* - * This function handles some semctl commands which require the rw_mutex + * This function handles some semctl commands which require the rwsem * to be held in write mode. - * NOTE: no locks must be held, the rw_mutex is taken inside this function. + * NOTE: no locks must be held, the rwsem is taken inside this function. */ static int semctl_down(struct ipc_namespace *ns, int semid, - int cmd, int version, void __user *p) + int cmd, struct semid64_ds *semid64) { struct sem_array *sma; int err; - struct semid64_ds semid64; struct kern_ipc_perm *ipcp; - if(cmd == IPC_SET) { - if (copy_semid_from_user(&semid64, p, version)) - return -EFAULT; - } - - down_write(&sem_ids(ns).rw_mutex); + down_write(&sem_ids(ns).rwsem); rcu_read_lock(); - ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd, - &semid64.sem_perm, 0); + ipcp = ipcctl_obtain_check(ns, &sem_ids(ns), semid, cmd, + &semid64->sem_perm, 0); if (IS_ERR(ipcp)) { err = PTR_ERR(ipcp); goto out_unlock1; @@ -1460,7 +1617,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid, sma = container_of(ipcp, struct sem_array, sem_perm); - err = security_sem_semctl(sma, cmd); + err = security_sem_semctl(&sma->sem_perm, cmd); if (err) goto out_unlock1; @@ -1472,10 +1629,10 @@ static int semctl_down(struct ipc_namespace *ns, int semid, goto out_up; case IPC_SET: sem_lock(sma, NULL, -1); - err = ipc_update_perm(&semid64.sem_perm, ipcp); + err = ipc_update_perm(&semid64->sem_perm, ipcp); if (err) goto out_unlock0; - sma->sem_ctime = get_seconds(); + sma->sem_ctime = ktime_get_real_seconds(); break; default: err = -EINVAL; @@ -1487,28 +1644,35 @@ out_unlock0: out_unlock1: rcu_read_unlock(); out_up: - up_write(&sem_ids(ns).rw_mutex); + up_write(&sem_ids(ns).rwsem); return err; } -SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg) +static long ksys_semctl(int semid, int semnum, int cmd, unsigned long arg, int version) { - int version; struct ipc_namespace *ns; void __user *p = (void __user *)arg; + struct semid64_ds semid64; + int err; if (semid < 0) return -EINVAL; - version = ipc_parse_version(&cmd); ns = current->nsproxy->ipc_ns; - switch(cmd) { + switch (cmd) { case IPC_INFO: case SEM_INFO: + return semctl_info(ns, semid, cmd, p); case IPC_STAT: case SEM_STAT: - return semctl_nolock(ns, semid, cmd, version, p); + case SEM_STAT_ANY: + err = semctl_stat(ns, semid, cmd, &semid64); + if (err < 0) + return err; + if (copy_semid_to_user(p, &semid64, version)) + err = -EFAULT; + return err; case GETALL: case GETVAL: case GETPID: @@ -1516,16 +1680,162 @@ SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg) case GETZCNT: case SETALL: return semctl_main(ns, semid, semnum, cmd, p); + case SETVAL: { + int val; +#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN) + /* big-endian 64bit */ + val = arg >> 32; +#else + /* 32bit or little-endian 64bit */ + val = arg; +#endif + return semctl_setval(ns, semid, semnum, val); + } + case IPC_SET: + if (copy_semid_from_user(&semid64, p, version)) + return -EFAULT; + fallthrough; + case IPC_RMID: + return semctl_down(ns, semid, cmd, &semid64); + default: + return -EINVAL; + } +} + +SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg) +{ + return ksys_semctl(semid, semnum, cmd, arg, IPC_64); +} + +#ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION +long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg) +{ + int version = ipc_parse_version(&cmd); + + return ksys_semctl(semid, semnum, cmd, arg, version); +} + +SYSCALL_DEFINE4(old_semctl, int, semid, int, semnum, int, cmd, unsigned long, arg) +{ + return ksys_old_semctl(semid, semnum, cmd, arg); +} +#endif + +#ifdef CONFIG_COMPAT + +struct compat_semid_ds { + struct compat_ipc_perm sem_perm; + old_time32_t sem_otime; + old_time32_t sem_ctime; + compat_uptr_t sem_base; + compat_uptr_t sem_pending; + compat_uptr_t sem_pending_last; + compat_uptr_t undo; + unsigned short sem_nsems; +}; + +static int copy_compat_semid_from_user(struct semid64_ds *out, void __user *buf, + int version) +{ + memset(out, 0, sizeof(*out)); + if (version == IPC_64) { + struct compat_semid64_ds __user *p = buf; + return get_compat_ipc64_perm(&out->sem_perm, &p->sem_perm); + } else { + struct compat_semid_ds __user *p = buf; + return get_compat_ipc_perm(&out->sem_perm, &p->sem_perm); + } +} + +static int copy_compat_semid_to_user(void __user *buf, struct semid64_ds *in, + int version) +{ + if (version == IPC_64) { + struct compat_semid64_ds v; + memset(&v, 0, sizeof(v)); + to_compat_ipc64_perm(&v.sem_perm, &in->sem_perm); + v.sem_otime = lower_32_bits(in->sem_otime); + v.sem_otime_high = upper_32_bits(in->sem_otime); + v.sem_ctime = lower_32_bits(in->sem_ctime); + v.sem_ctime_high = upper_32_bits(in->sem_ctime); + v.sem_nsems = in->sem_nsems; + return copy_to_user(buf, &v, sizeof(v)); + } else { + struct compat_semid_ds v; + memset(&v, 0, sizeof(v)); + to_compat_ipc_perm(&v.sem_perm, &in->sem_perm); + v.sem_otime = in->sem_otime; + v.sem_ctime = in->sem_ctime; + v.sem_nsems = in->sem_nsems; + return copy_to_user(buf, &v, sizeof(v)); + } +} + +static long compat_ksys_semctl(int semid, int semnum, int cmd, int arg, int version) +{ + void __user *p = compat_ptr(arg); + struct ipc_namespace *ns; + struct semid64_ds semid64; + int err; + + ns = current->nsproxy->ipc_ns; + + if (semid < 0) + return -EINVAL; + + switch (cmd & (~IPC_64)) { + case IPC_INFO: + case SEM_INFO: + return semctl_info(ns, semid, cmd, p); + case IPC_STAT: + case SEM_STAT: + case SEM_STAT_ANY: + err = semctl_stat(ns, semid, cmd, &semid64); + if (err < 0) + return err; + if (copy_compat_semid_to_user(p, &semid64, version)) + err = -EFAULT; + return err; + case GETVAL: + case GETPID: + case GETNCNT: + case GETZCNT: + case GETALL: + case SETALL: + return semctl_main(ns, semid, semnum, cmd, p); case SETVAL: return semctl_setval(ns, semid, semnum, arg); - case IPC_RMID: case IPC_SET: - return semctl_down(ns, semid, cmd, version, p); + if (copy_compat_semid_from_user(&semid64, p, version)) + return -EFAULT; + fallthrough; + case IPC_RMID: + return semctl_down(ns, semid, cmd, &semid64); default: return -EINVAL; } } +COMPAT_SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, int, arg) +{ + return compat_ksys_semctl(semid, semnum, cmd, arg, IPC_64); +} + +#ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION +long compat_ksys_old_semctl(int semid, int semnum, int cmd, int arg) +{ + int version = compat_ipc_parse_version(&cmd); + + return compat_ksys_semctl(semid, semnum, cmd, arg, version); +} + +COMPAT_SYSCALL_DEFINE4(old_semctl, int, semid, int, semnum, int, cmd, int, arg) +{ + return compat_ksys_old_semctl(semid, semnum, cmd, arg); +} +#endif +#endif + /* If the task doesn't already have a undo_list, then allocate one * here. We guarantee there is only one thread using this undo list, * and current is THE ONE @@ -1543,11 +1853,11 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp) undo_list = current->sysvsem.undo_list; if (!undo_list) { - undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL); + undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL_ACCOUNT); if (undo_list == NULL) return -ENOMEM; spin_lock_init(&undo_list->lock); - atomic_set(&undo_list->refcnt, 1); + refcount_set(&undo_list->refcnt, 1); INIT_LIST_HEAD(&undo_list->list_proc); current->sysvsem.undo_list = undo_list; @@ -1560,7 +1870,8 @@ static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid) { struct sem_undo *un; - list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) { + list_for_each_entry_rcu(un, &ulp->list_proc, list_proc, + spin_is_locked(&ulp->lock)) { if (un->semid == semid) return un; } @@ -1571,7 +1882,7 @@ static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) { struct sem_undo *un; - assert_spin_locked(&ulp->lock); + assert_spin_locked(&ulp->lock); un = __lookup_undo(ulp, semid); if (un) { @@ -1582,7 +1893,7 @@ static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) } /** - * find_alloc_undo - Lookup (and if not present create) undo array + * find_alloc_undo - lookup (and if not present create) undo array * @ns: namespace * @semid: semaphore array id * @@ -1607,7 +1918,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) spin_lock(&ulp->lock); un = lookup_undo(ulp, semid); spin_unlock(&ulp->lock); - if (likely(un!=NULL)) + if (likely(un != NULL)) goto out; /* no undo structure around - allocate one. */ @@ -1619,7 +1930,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) } nsems = sma->sem_nsems; - if (!ipc_rcu_getref(sma)) { + if (!ipc_rcu_getref(&sma->sem_perm)) { rcu_read_unlock(); un = ERR_PTR(-EIDRM); goto out; @@ -1627,19 +1938,19 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) rcu_read_unlock(); /* step 2: allocate new undo structure */ - new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); + new = kvzalloc(struct_size(new, semadj, nsems), GFP_KERNEL_ACCOUNT); if (!new) { - sem_putref(sma); + ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); return ERR_PTR(-ENOMEM); } /* step 3: Acquire the lock on semaphore array */ rcu_read_lock(); sem_lock_and_putref(sma); - if (sma->sem_perm.deleted) { + if (!ipc_valid_object(&sma->sem_perm)) { sem_unlock(sma, -1); rcu_read_unlock(); - kfree(new); + kvfree(new); un = ERR_PTR(-EIDRM); goto out; } @@ -1650,11 +1961,11 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) */ un = lookup_undo(ulp, semid); if (un) { - kfree(new); + spin_unlock(&ulp->lock); + kvfree(new); goto success; } /* step 5: initialize & link new undo structure */ - new->semadj = (short *) &new[1]; new->ulp = ulp; new->semid = semid; assert_spin_locked(&ulp->lock); @@ -1662,100 +1973,71 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) ipc_assert_locked_object(&sma->sem_perm); list_add(&new->list_id, &sma->list_id); un = new; - -success: spin_unlock(&ulp->lock); +success: sem_unlock(sma, -1); out: return un; } - -/** - * get_queue_result - Retrieve the result code from sem_queue - * @q: Pointer to queue structure - * - * Retrieve the return code from the pending queue. If IN_WAKEUP is found in - * q->status, then we must loop until the value is replaced with the final - * value: This may happen if a task is woken up by an unrelated event (e.g. - * signal) and in parallel the task is woken up by another task because it got - * the requested semaphores. - * - * The function can be called with or without holding the semaphore spinlock. - */ -static int get_queue_result(struct sem_queue *q) -{ - int error; - - error = q->status; - while (unlikely(error == IN_WAKEUP)) { - cpu_relax(); - error = q->status; - } - - return error; -} - -SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, - unsigned, nsops, const struct timespec __user *, timeout) +long __do_semtimedop(int semid, struct sembuf *sops, + unsigned nsops, const struct timespec64 *timeout, + struct ipc_namespace *ns) { int error = -EINVAL; struct sem_array *sma; - struct sembuf fast_sops[SEMOPM_FAST]; - struct sembuf* sops = fast_sops, *sop; + struct sembuf *sop; struct sem_undo *un; - int undos = 0, alter = 0, max, locknum; + int max, locknum; + bool undos = false, alter = false, dupsop = false; struct sem_queue queue; - unsigned long jiffies_left = 0; - struct ipc_namespace *ns; - struct list_head tasks; - - ns = current->nsproxy->ipc_ns; + unsigned long dup = 0; + ktime_t expires, *exp = NULL; + bool timed_out = false; if (nsops < 1 || semid < 0) return -EINVAL; if (nsops > ns->sc_semopm) return -E2BIG; - if(nsops > SEMOPM_FAST) { - sops = kmalloc(sizeof(*sops)*nsops,GFP_KERNEL); - if(sops==NULL) - return -ENOMEM; - } - if (copy_from_user (sops, tsops, nsops * sizeof(*tsops))) { - error=-EFAULT; - goto out_free; - } + if (timeout) { - struct timespec _timeout; - if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) { - error = -EFAULT; - goto out_free; - } - if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 || - _timeout.tv_nsec >= 1000000000L) { - error = -EINVAL; - goto out_free; - } - jiffies_left = timespec_to_jiffies(&_timeout); + if (!timespec64_valid(timeout)) + return -EINVAL; + expires = ktime_add_safe(ktime_get(), + timespec64_to_ktime(*timeout)); + exp = &expires; } + + max = 0; for (sop = sops; sop < sops + nsops; sop++) { + unsigned long mask = 1ULL << ((sop->sem_num) % BITS_PER_LONG); + if (sop->sem_num >= max) max = sop->sem_num; if (sop->sem_flg & SEM_UNDO) - undos = 1; - if (sop->sem_op != 0) - alter = 1; + undos = true; + if (dup & mask) { + /* + * There was a previous alter access that appears + * to have accessed the same semaphore, thus use + * the dupsop logic. "appears", because the detection + * can only check % BITS_PER_LONG. + */ + dupsop = true; + } + if (sop->sem_op != 0) { + alter = true; + dup |= mask; + } } - INIT_LIST_HEAD(&tasks); - if (undos) { /* On success, find_alloc_undo takes the rcu_read_lock */ un = find_alloc_undo(ns, semid); if (IS_ERR(un)) { error = PTR_ERR(un); - goto out_free; + goto out; } } else { un = NULL; @@ -1766,21 +2048,39 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, if (IS_ERR(sma)) { rcu_read_unlock(); error = PTR_ERR(sma); - goto out_free; + goto out; } error = -EFBIG; - if (max >= sma->sem_nsems) - goto out_rcu_wakeup; + if (max >= sma->sem_nsems) { + rcu_read_unlock(); + goto out; + } error = -EACCES; - if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) - goto out_rcu_wakeup; + if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) { + rcu_read_unlock(); + goto out; + } - error = security_sem_semop(sma, sops, nsops, alter); - if (error) - goto out_rcu_wakeup; + error = security_sem_semop(&sma->sem_perm, sops, nsops, alter); + if (error) { + rcu_read_unlock(); + goto out; + } + error = -EIDRM; + locknum = sem_lock(sma, sops, nsops); + /* + * We eventually might perform the following check in a lockless + * fashion, considering ipc_valid_object() locking constraints. + * If nsops == 1 and there is no contention for sem_perm.lock, then + * only a per-semaphore lock is held and it's OK to proceed with the + * check below. More details on the fine grained locking scheme + * entangled here and why it's RMID race safe on comments at sem_lock() + */ + if (!ipc_valid_object(&sma->sem_perm)) + goto out_unlock; /* * semid identifiers are not unique - find_alloc_undo may have * allocated an undo structure, it was invalidated by an RMID @@ -1788,33 +2088,46 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, * This case can be detected checking un->semid. The existence of * "un" itself is guaranteed by rcu. */ - error = -EIDRM; - locknum = sem_lock(sma, sops, nsops); if (un && un->semid == -1) - goto out_unlock_free; - - error = perform_atomic_semop(sma, sops, nsops, un, - task_tgid_vnr(current)); - if (error <= 0) { - if (alter && error == 0) - do_smart_update(sma, sops, nsops, 1, &tasks); - - goto out_unlock_free; - } + goto out_unlock; - /* We need to sleep on this operation, so we put the current - * task into the pending queue and go to sleep. - */ - queue.sops = sops; queue.nsops = nsops; queue.undo = un; - queue.pid = task_tgid_vnr(current); + queue.pid = task_tgid(current); queue.alter = alter; + queue.dupsop = dupsop; + + error = perform_atomic_semop(sma, &queue); + if (error == 0) { /* non-blocking successful path */ + DEFINE_WAKE_Q(wake_q); + + /* + * If the operation was successful, then do + * the required updates. + */ + if (alter) + do_smart_update(sma, sops, nsops, 1, &wake_q); + else + set_semotime(sma, sops); + + sem_unlock(sma, locknum); + rcu_read_unlock(); + wake_up_q(&wake_q); + + goto out; + } + if (error < 0) /* non-blocking error path */ + goto out_unlock; + /* + * We need to sleep on this operation, so we put the current + * task into the pending queue and go to sleep. + */ if (nsops == 1) { struct sem *curr; - curr = &sma->sem_base[sops->sem_num]; + int idx = array_index_nospec(sops->sem_num, sma->sem_nsems); + curr = &sma->sems[idx]; if (alter) { if (sma->complex_count) { @@ -1840,96 +2153,157 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, sma->complex_count++; } - queue.status = -EINTR; - queue.sleeper = current; + do { + /* memory ordering ensured by the lock in sem_lock() */ + WRITE_ONCE(queue.status, -EINTR); + queue.sleeper = current; -sleep_again: - current->state = TASK_INTERRUPTIBLE; - sem_unlock(sma, locknum); - rcu_read_unlock(); + /* memory ordering is ensured by the lock in sem_lock() */ + __set_current_state(TASK_INTERRUPTIBLE); + sem_unlock(sma, locknum); + rcu_read_unlock(); - if (timeout) - jiffies_left = schedule_timeout(jiffies_left); - else - schedule(); + timed_out = !schedule_hrtimeout_range(exp, + current->timer_slack_ns, HRTIMER_MODE_ABS); - error = get_queue_result(&queue); + /* + * fastpath: the semop has completed, either successfully or + * not, from the syscall pov, is quite irrelevant to us at this + * point; we're done. + * + * We _do_ care, nonetheless, about being awoken by a signal or + * spuriously. The queue.status is checked again in the + * slowpath (aka after taking sem_lock), such that we can detect + * scenarios where we were awakened externally, during the + * window between wake_q_add() and wake_up_q(). + */ + rcu_read_lock(); + error = READ_ONCE(queue.status); + if (error != -EINTR) { + /* see SEM_BARRIER_2 for purpose/pairing */ + smp_acquire__after_ctrl_dep(); + rcu_read_unlock(); + goto out; + } + + locknum = sem_lock(sma, sops, nsops); - if (error != -EINTR) { - /* fast path: update_queue already obtained all requested - * resources. - * Perform a smp_mb(): User space could assume that semop() - * is a memory barrier: Without the mb(), the cpu could - * speculatively read in user space stale data that was - * overwritten by the previous owner of the semaphore. + if (!ipc_valid_object(&sma->sem_perm)) + goto out_unlock; + + /* + * No necessity for any barrier: We are protect by sem_lock() */ - smp_mb(); + error = READ_ONCE(queue.status); - goto out_free; - } + /* + * If queue.status != -EINTR we are woken up by another process. + * Leave without unlink_queue(), but with sem_unlock(). + */ + if (error != -EINTR) + goto out_unlock; - rcu_read_lock(); - sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum); + /* + * If an interrupt occurred we have to clean up the queue. + */ + if (timed_out) + error = -EAGAIN; + } while (error == -EINTR && !signal_pending(current)); /* spurious */ - /* - * Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing. - */ - error = get_queue_result(&queue); + unlink_queue(sma, &queue); - /* - * Array removed? If yes, leave without sem_unlock(). - */ - if (IS_ERR(sma)) { - rcu_read_unlock(); +out_unlock: + sem_unlock(sma, locknum); + rcu_read_unlock(); +out: + return error; +} + +static long do_semtimedop(int semid, struct sembuf __user *tsops, + unsigned nsops, const struct timespec64 *timeout) +{ + struct sembuf fast_sops[SEMOPM_FAST]; + struct sembuf *sops = fast_sops; + struct ipc_namespace *ns; + int ret; + + ns = current->nsproxy->ipc_ns; + if (nsops > ns->sc_semopm) + return -E2BIG; + if (nsops < 1) + return -EINVAL; + + if (nsops > SEMOPM_FAST) { + sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL); + if (sops == NULL) + return -ENOMEM; + } + + if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) { + ret = -EFAULT; goto out_free; } + ret = __do_semtimedop(semid, sops, nsops, timeout, ns); - /* - * If queue.status != -EINTR we are woken up by another process. - * Leave without unlink_queue(), but with sem_unlock(). - */ +out_free: + if (sops != fast_sops) + kvfree(sops); - if (error != -EINTR) { - goto out_unlock_free; - } + return ret; +} - /* - * If an interrupt occurred we have to clean up the queue - */ - if (timeout && jiffies_left == 0) - error = -EAGAIN; +long ksys_semtimedop(int semid, struct sembuf __user *tsops, + unsigned int nsops, const struct __kernel_timespec __user *timeout) +{ + if (timeout) { + struct timespec64 ts; + if (get_timespec64(&ts, timeout)) + return -EFAULT; + return do_semtimedop(semid, tsops, nsops, &ts); + } + return do_semtimedop(semid, tsops, nsops, NULL); +} - /* - * If the wakeup was spurious, just retry - */ - if (error == -EINTR && !signal_pending(current)) - goto sleep_again; +SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, + unsigned int, nsops, const struct __kernel_timespec __user *, timeout) +{ + return ksys_semtimedop(semid, tsops, nsops, timeout); +} - unlink_queue(sma, &queue); +#ifdef CONFIG_COMPAT_32BIT_TIME +long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems, + unsigned int nsops, + const struct old_timespec32 __user *timeout) +{ + if (timeout) { + struct timespec64 ts; + if (get_old_timespec32(&ts, timeout)) + return -EFAULT; + return do_semtimedop(semid, tsems, nsops, &ts); + } + return do_semtimedop(semid, tsems, nsops, NULL); +} -out_unlock_free: - sem_unlock(sma, locknum); -out_rcu_wakeup: - rcu_read_unlock(); - wake_up_sem_queue_do(&tasks); -out_free: - if(sops != fast_sops) - kfree(sops); - return error; +SYSCALL_DEFINE4(semtimedop_time32, int, semid, struct sembuf __user *, tsems, + unsigned int, nsops, + const struct old_timespec32 __user *, timeout) +{ + return compat_ksys_semtimedop(semid, tsems, nsops, timeout); } +#endif SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops, unsigned, nsops) { - return sys_semtimedop(semid, tsops, nsops, NULL); + return do_semtimedop(semid, tsops, nsops, NULL); } /* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between * parent and child tasks. */ -int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) +int copy_semundo(u64 clone_flags, struct task_struct *tsk) { struct sem_undo_list *undo_list; int error; @@ -1938,9 +2312,9 @@ int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) error = get_undo_list(&undo_list); if (error) return error; - atomic_inc(&undo_list->refcnt); + refcount_inc(&undo_list->refcnt); tsk->sysvsem.undo_list = undo_list; - } else + } else tsk->sysvsem.undo_list = NULL; return 0; @@ -1967,29 +2341,43 @@ void exit_sem(struct task_struct *tsk) return; tsk->sysvsem.undo_list = NULL; - if (!atomic_dec_and_test(&ulp->refcnt)) + if (!refcount_dec_and_test(&ulp->refcnt)) return; for (;;) { struct sem_array *sma; struct sem_undo *un; - struct list_head tasks; int semid, i; + DEFINE_WAKE_Q(wake_q); + + cond_resched(); rcu_read_lock(); un = list_entry_rcu(ulp->list_proc.next, struct sem_undo, list_proc); - if (&un->list_proc == &ulp->list_proc) - semid = -1; - else - semid = un->semid; + if (&un->list_proc == &ulp->list_proc) { + /* + * We must wait for freeary() before freeing this ulp, + * in case we raced with last sem_undo. There is a small + * possibility where we exit while freeary() didn't + * finish unlocking sem_undo_list. + */ + spin_lock(&ulp->lock); + spin_unlock(&ulp->lock); + rcu_read_unlock(); + break; + } + spin_lock(&ulp->lock); + semid = un->semid; + spin_unlock(&ulp->lock); + /* exit_sem raced with IPC_RMID, nothing to do */ if (semid == -1) { rcu_read_unlock(); - break; + continue; } - sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid); + sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid); /* exit_sem raced with IPC_RMID, nothing to do */ if (IS_ERR(sma)) { rcu_read_unlock(); @@ -1997,6 +2385,12 @@ void exit_sem(struct task_struct *tsk) } sem_lock(sma, NULL, -1); + /* exit_sem raced with IPC_RMID, nothing to do */ + if (!ipc_valid_object(&sma->sem_perm)) { + sem_unlock(sma, -1); + rcu_read_unlock(); + continue; + } un = __lookup_undo(ulp, semid); if (un == NULL) { /* exit_sem raced with IPC_RMID+semget() that created @@ -2017,7 +2411,7 @@ void exit_sem(struct task_struct *tsk) /* perform adjustments registered in un */ for (i = 0; i < sma->sem_nsems; i++) { - struct sem * semaphore = &sma->sem_base[i]; + struct sem *semaphore = &sma->sems[i]; if (un->semadj[i]) { semaphore->semval += un->semadj[i]; /* @@ -2031,23 +2425,22 @@ void exit_sem(struct task_struct *tsk) * Linux caps the semaphore value, both at 0 * and at SEMVMX. * - * Manfred <manfred@colorfullife.com> + * Manfred <manfred@colorfullife.com> */ if (semaphore->semval < 0) semaphore->semval = 0; if (semaphore->semval > SEMVMX) semaphore->semval = SEMVMX; - semaphore->sempid = task_tgid_vnr(current); + ipc_update_pid(&semaphore->sempid, task_tgid(current)); } } /* maybe some queued-up processes were waiting for this */ - INIT_LIST_HEAD(&tasks); - do_smart_update(sma, NULL, 0, 1, &tasks); + do_smart_update(sma, NULL, 0, 1, &wake_q); sem_unlock(sma, -1); rcu_read_unlock(); - wake_up_sem_queue_do(&tasks); + wake_up_q(&wake_q); - kfree_rcu(un, rcu); + kvfree_rcu(un, rcu); } kfree(ulp); } @@ -2056,22 +2449,36 @@ void exit_sem(struct task_struct *tsk) static int sysvipc_sem_proc_show(struct seq_file *s, void *it) { struct user_namespace *user_ns = seq_user_ns(s); - struct sem_array *sma = it; - time_t sem_otime; + struct kern_ipc_perm *ipcp = it; + struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm); + time64_t sem_otime; + + /* + * The proc interface isn't aware of sem_lock(), it calls + * ipc_lock_object(), i.e. spin_lock(&sma->sem_perm.lock). + * (in sysvipc_find_ipc) + * In order to stay compatible with sem_lock(), we must + * enter / leave complex_mode. + */ + complexmode_enter(sma); sem_otime = get_semotime(sma); - return seq_printf(s, - "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n", - sma->sem_perm.key, - sma->sem_perm.id, - sma->sem_perm.mode, - sma->sem_nsems, - from_kuid_munged(user_ns, sma->sem_perm.uid), - from_kgid_munged(user_ns, sma->sem_perm.gid), - from_kuid_munged(user_ns, sma->sem_perm.cuid), - from_kgid_munged(user_ns, sma->sem_perm.cgid), - sem_otime, - sma->sem_ctime); + seq_printf(s, + "%10d %10d %4o %10u %5u %5u %5u %5u %10llu %10llu\n", + sma->sem_perm.key, + sma->sem_perm.id, + sma->sem_perm.mode, + sma->sem_nsems, + from_kuid_munged(user_ns, sma->sem_perm.uid), + from_kgid_munged(user_ns, sma->sem_perm.gid), + from_kuid_munged(user_ns, sma->sem_perm.cuid), + from_kgid_munged(user_ns, sma->sem_perm.cgid), + sem_otime, + sma->sem_ctime); + + complexmode_tryleave(sma); + + return 0; } #endif diff --git a/ipc/shm.c b/ipc/shm.c index c6b4ad5ce3b7..3db36773dd10 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * linux/ipc/shm.c * Copyright (C) 1992, 1993 Krishna Balasubramanian @@ -19,12 +20,16 @@ * namespaces support * OpenVZ, SWsoft Inc. * Pavel Emelianov <xemul@openvz.org> + * + * Better ipc lock (kern_ipc_perm.lock) handling + * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013. */ #include <linux/slab.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/shm.h> +#include <uapi/linux/shm.h> #include <linux/init.h> #include <linux/file.h> #include <linux/mman.h> @@ -39,11 +44,44 @@ #include <linux/nsproxy.h> #include <linux/mount.h> #include <linux/ipc_namespace.h> +#include <linux/rhashtable.h> +#include <linux/nstree.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include "util.h" +struct shmid_kernel /* private to the kernel */ +{ + struct kern_ipc_perm shm_perm; + struct file *shm_file; + unsigned long shm_nattch; + unsigned long shm_segsz; + time64_t shm_atim; + time64_t shm_dtim; + time64_t shm_ctim; + struct pid *shm_cprid; + struct pid *shm_lprid; + struct ucounts *mlock_ucounts; + + /* + * The task created the shm object, for + * task_lock(shp->shm_creator) + */ + struct task_struct *shm_creator; + + /* + * List by creator. task_lock(->shm_creator) required for read/write. + * If list_empty(), then the creator is dead already. + */ + struct list_head shm_clist; + struct ipc_namespace *ns; +} __randomize_layout; + +/* shm_mode upper byte flags */ +#define SHM_DEST 01000 /* segment will be destroyed on last detach */ +#define SHM_LOCKED 02000 /* segment will not be swapped */ + struct shm_file_data { int id; struct ipc_namespace *ns; @@ -64,7 +102,7 @@ static const struct vm_operations_struct shm_vm_ops; static int newseg(struct ipc_namespace *, struct ipc_params *); static void shm_open(struct vm_area_struct *vma); static void shm_close(struct vm_area_struct *vma); -static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp); +static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp); #ifdef CONFIG_PROC_FS static int sysvipc_shm_proc_show(struct seq_file *s, void *it); #endif @@ -80,18 +118,20 @@ void shm_init_ns(struct ipc_namespace *ns) } /* - * Called with shm_ids.rw_mutex (writer) and the shp structure locked. - * Only shm_ids.rw_mutex remains locked on exit. + * Called with shm_ids.rwsem (writer) and the shp structure locked. + * Only shm_ids.rwsem remains locked on exit. */ static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) { struct shmid_kernel *shp; + shp = container_of(ipcp, struct shmid_kernel, shm_perm); + WARN_ON(ns != shp->ns); - if (shp->shm_nattch){ + if (shp->shm_nattch) { shp->shm_perm.mode |= SHM_DEST; /* Do not find it any more */ - shp->shm_perm.key = IPC_PRIVATE; + ipc_set_key_private(&shm_ids(ns), &shp->shm_perm); shm_unlock(shp); } else shm_destroy(ns, shp); @@ -102,18 +142,20 @@ void shm_exit_ns(struct ipc_namespace *ns) { free_ipcs(ns, &shm_ids(ns), do_shm_rmid); idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr); + rhashtable_destroy(&ns->ids[IPC_SHM_IDS].key_ht); } #endif static int __init ipc_ns_init(void) { shm_init_ns(&init_ipc_ns); + ns_tree_add(&init_ipc_ns); return 0; } pure_initcall(ipc_ns_init); -void __init shm_init (void) +void __init shm_init(void) { ipc_init_proc_interface("sysvipc/shm", #if BITS_PER_LONG <= 32 @@ -124,18 +166,60 @@ void __init shm_init (void) IPC_SHM_IDS, sysvipc_shm_proc_show); } +static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id) +{ + struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id); + + if (IS_ERR(ipcp)) + return ERR_CAST(ipcp); + + return container_of(ipcp, struct shmid_kernel, shm_perm); +} + +static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id) +{ + struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id); + + if (IS_ERR(ipcp)) + return ERR_CAST(ipcp); + + return container_of(ipcp, struct shmid_kernel, shm_perm); +} + /* - * shm_lock_(check_) routines are called in the paths where the rw_mutex + * shm_lock_(check_) routines are called in the paths where the rwsem * is not necessarily held. */ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id) { - struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id); + struct kern_ipc_perm *ipcp; + rcu_read_lock(); + ipcp = ipc_obtain_object_idr(&shm_ids(ns), id); if (IS_ERR(ipcp)) - return (struct shmid_kernel *)ipcp; + goto err; - return container_of(ipcp, struct shmid_kernel, shm_perm); + ipc_lock_object(ipcp); + /* + * ipc_rmid() may have already freed the ID while ipc_lock_object() + * was spinning: here verify that the structure is still valid. + * Upon races with RMID, return -EIDRM, thus indicating that + * the ID points to a removed identifier. + */ + if (ipc_valid_object(ipcp)) { + /* return a locked ipc object upon success */ + return container_of(ipcp, struct shmid_kernel, shm_perm); + } + + ipc_unlock_object(ipcp); + ipcp = ERR_PTR(-EIDRM); +err: + rcu_read_unlock(); + /* + * Callers of shm_lock() must validate the status of the returned ipc + * object pointer and error out as appropriate. + */ + return ERR_CAST(ipcp); } static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp) @@ -144,36 +228,95 @@ static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp) ipc_lock_object(&ipcp->shm_perm); } -static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns, - int id) +static void shm_rcu_free(struct rcu_head *head) { - struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id); + struct kern_ipc_perm *ptr = container_of(head, struct kern_ipc_perm, + rcu); + struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel, + shm_perm); + security_shm_free(&shp->shm_perm); + kfree(shp); +} - if (IS_ERR(ipcp)) - return (struct shmid_kernel *)ipcp; +/* + * It has to be called with shp locked. + * It must be called before ipc_rmid() + */ +static inline void shm_clist_rm(struct shmid_kernel *shp) +{ + struct task_struct *creator; - return container_of(ipcp, struct shmid_kernel, shm_perm); + /* ensure that shm_creator does not disappear */ + rcu_read_lock(); + + /* + * A concurrent exit_shm may do a list_del_init() as well. + * Just do nothing if exit_shm already did the work + */ + if (!list_empty(&shp->shm_clist)) { + /* + * shp->shm_creator is guaranteed to be valid *only* + * if shp->shm_clist is not empty. + */ + creator = shp->shm_creator; + + task_lock(creator); + /* + * list_del_init() is a nop if the entry was already removed + * from the list. + */ + list_del_init(&shp->shm_clist); + task_unlock(creator); + } + rcu_read_unlock(); } -static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) +static inline void shm_rmid(struct shmid_kernel *s) { - ipc_rmid(&shm_ids(ns), &s->shm_perm); + shm_clist_rm(s); + ipc_rmid(&shm_ids(s->ns), &s->shm_perm); } -/* This is called by fork, once for every shm attach. */ -static void shm_open(struct vm_area_struct *vma) +static int __shm_open(struct shm_file_data *sfd) { - struct file *file = vma->vm_file; - struct shm_file_data *sfd = shm_file_data(file); struct shmid_kernel *shp; shp = shm_lock(sfd->ns, sfd->id); - BUG_ON(IS_ERR(shp)); - shp->shm_atim = get_seconds(); - shp->shm_lprid = task_tgid_vnr(current); + + if (IS_ERR(shp)) + return PTR_ERR(shp); + + if (shp->shm_file != sfd->file) { + /* ID was reused */ + shm_unlock(shp); + return -EINVAL; + } + + shp->shm_atim = ktime_get_real_seconds(); + ipc_update_pid(&shp->shm_lprid, task_tgid(current)); shp->shm_nattch++; shm_unlock(shp); + return 0; +} + +/* This is called by fork, once for every shm attach. */ +static void shm_open(struct vm_area_struct *vma) +{ + struct file *file = vma->vm_file; + struct shm_file_data *sfd = shm_file_data(file); + int err; + + /* Always call underlying open if present */ + if (sfd->vm_ops->open) + sfd->vm_ops->open(vma); + + err = __shm_open(sfd); + /* + * We raced in the idr lookup or with shm_destroy(). + * Either way, the ID is busted. + */ + WARN_ON_ONCE(err); } /* @@ -182,22 +325,24 @@ static void shm_open(struct vm_area_struct *vma) * @ns: namespace * @shp: struct to free * - * It has to be called with shp and shm_ids.rw_mutex (writer) locked, + * It has to be called with shp and shm_ids.rwsem (writer) locked, * but returns with shp unlocked and freed. */ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) { + struct file *shm_file; + + shm_file = shp->shm_file; + shp->shm_file = NULL; ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; - shm_rmid(ns, shp); + shm_rmid(shp); shm_unlock(shp); - if (!is_file_hugepages(shp->shm_file)) - shmem_lock(shp->shm_file, 0, shp->mlock_user); - else if (shp->mlock_user) - user_shm_unlock(file_inode(shp->shm_file)->i_size, - shp->mlock_user); - fput (shp->shm_file); - security_shm_free(shp); - ipc_rcu_putref(shp); + if (!is_file_hugepages(shm_file)) + shmem_lock(shm_file, 0, shp->mlock_ucounts); + fput(shm_file); + ipc_update_pid(&shp->shm_cprid, NULL); + ipc_update_pid(&shp->shm_lprid, NULL); + ipc_rcu_putref(&shp->shm_perm, shm_rcu_free); } /* @@ -210,10 +355,10 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) * * 2) sysctl kernel.shm_rmid_forced is set to 1. */ -static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) +static bool shm_may_destroy(struct shmid_kernel *shp) { return (shp->shm_nattch == 0) && - (ns->shm_rmid_forced || + (shp->ns->shm_rmid_forced || (shp->shm_perm.mode & SHM_DEST)); } @@ -223,59 +368,46 @@ static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) * The descriptor has already been removed from the current->mm->mmap list * and will later be kfree()d. */ -static void shm_close(struct vm_area_struct *vma) +static void __shm_close(struct shm_file_data *sfd) { - struct file * file = vma->vm_file; - struct shm_file_data *sfd = shm_file_data(file); struct shmid_kernel *shp; struct ipc_namespace *ns = sfd->ns; - down_write(&shm_ids(ns).rw_mutex); + down_write(&shm_ids(ns).rwsem); /* remove from the list of attaches of the shm segment */ shp = shm_lock(ns, sfd->id); - BUG_ON(IS_ERR(shp)); - shp->shm_lprid = task_tgid_vnr(current); - shp->shm_dtim = get_seconds(); + + /* + * We raced in the idr lookup or with shm_destroy(). + * Either way, the ID is busted. + */ + if (WARN_ON_ONCE(IS_ERR(shp))) + goto done; /* no-op */ + + ipc_update_pid(&shp->shm_lprid, task_tgid(current)); + shp->shm_dtim = ktime_get_real_seconds(); shp->shm_nattch--; - if (shm_may_destroy(ns, shp)) + if (shm_may_destroy(shp)) shm_destroy(ns, shp); else shm_unlock(shp); - up_write(&shm_ids(ns).rw_mutex); +done: + up_write(&shm_ids(ns).rwsem); } -/* Called with ns->shm_ids(ns).rw_mutex locked */ -static int shm_try_destroy_current(int id, void *p, void *data) +static void shm_close(struct vm_area_struct *vma) { - struct ipc_namespace *ns = data; - struct kern_ipc_perm *ipcp = p; - struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm); - - if (shp->shm_creator != current) - return 0; - - /* - * Mark it as orphaned to destroy the segment when - * kernel.shm_rmid_forced is changed. - * It is noop if the following shm_may_destroy() returns true. - */ - shp->shm_creator = NULL; + struct file *file = vma->vm_file; + struct shm_file_data *sfd = shm_file_data(file); - /* - * Don't even try to destroy it. If shm_rmid_forced=0 and IPC_RMID - * is not set, it shouldn't be deleted here. - */ - if (!ns->shm_rmid_forced) - return 0; + /* Always call underlying close if present */ + if (sfd->vm_ops->close) + sfd->vm_ops->close(vma); - if (shm_may_destroy(ns, shp)) { - shm_lock_by_ptr(shp); - shm_destroy(ns, shp); - } - return 0; + __shm_close(sfd); } -/* Called with ns->shm_ids(ns).rw_mutex locked */ +/* Called with ns->shm_ids(ns).rwsem locked */ static int shm_try_destroy_orphaned(int id, void *p, void *data) { struct ipc_namespace *ns = data; @@ -286,12 +418,12 @@ static int shm_try_destroy_orphaned(int id, void *p, void *data) * We want to destroy segments without users and with already * exit'ed originating process. * - * As shp->* are changed under rw_mutex, it's safe to skip shp locking. + * As shp->* are changed under rwsem, it's safe to skip shp locking. */ - if (shp->shm_creator != NULL) + if (!list_empty(&shp->shm_clist)) return 0; - if (shm_may_destroy(ns, shp)) { + if (shm_may_destroy(shp)) { shm_lock_by_ptr(shp); shm_destroy(ns, shp); } @@ -300,78 +432,189 @@ static int shm_try_destroy_orphaned(int id, void *p, void *data) void shm_destroy_orphaned(struct ipc_namespace *ns) { - down_write(&shm_ids(ns).rw_mutex); - if (shm_ids(ns).in_use) + down_write(&shm_ids(ns).rwsem); + if (shm_ids(ns).in_use) { + rcu_read_lock(); idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns); - up_write(&shm_ids(ns).rw_mutex); + rcu_read_unlock(); + } + up_write(&shm_ids(ns).rwsem); } - +/* Locking assumes this will only be called with task == current */ void exit_shm(struct task_struct *task) { - struct ipc_namespace *ns = task->nsproxy->ipc_ns; + for (;;) { + struct shmid_kernel *shp; + struct ipc_namespace *ns; + + task_lock(task); + + if (list_empty(&task->sysvshm.shm_clist)) { + task_unlock(task); + break; + } + + shp = list_first_entry(&task->sysvshm.shm_clist, struct shmid_kernel, + shm_clist); + + /* + * 1) Get pointer to the ipc namespace. It is worth to say + * that this pointer is guaranteed to be valid because + * shp lifetime is always shorter than namespace lifetime + * in which shp lives. + * We taken task_lock it means that shp won't be freed. + */ + ns = shp->ns; + + /* + * 2) If kernel.shm_rmid_forced is not set then only keep track of + * which shmids are orphaned, so that a later set of the sysctl + * can clean them up. + */ + if (!ns->shm_rmid_forced) + goto unlink_continue; + + /* + * 3) get a reference to the namespace. + * The refcount could be already 0. If it is 0, then + * the shm objects will be free by free_ipc_work(). + */ + ns = get_ipc_ns_not_zero(ns); + if (!ns) { +unlink_continue: + list_del_init(&shp->shm_clist); + task_unlock(task); + continue; + } + + /* + * 4) get a reference to shp. + * This cannot fail: shm_clist_rm() is called before + * ipc_rmid(), thus the refcount cannot be 0. + */ + WARN_ON(!ipc_rcu_getref(&shp->shm_perm)); + + /* + * 5) unlink the shm segment from the list of segments + * created by current. + * This must be done last. After unlinking, + * only the refcounts obtained above prevent IPC_RMID + * from destroying the segment or the namespace. + */ + list_del_init(&shp->shm_clist); + + task_unlock(task); + + /* + * 6) we have all references + * Thus lock & if needed destroy shp. + */ + down_write(&shm_ids(ns).rwsem); + shm_lock_by_ptr(shp); + /* + * rcu_read_lock was implicitly taken in shm_lock_by_ptr, it's + * safe to call ipc_rcu_putref here + */ + ipc_rcu_putref(&shp->shm_perm, shm_rcu_free); + + if (ipc_valid_object(&shp->shm_perm)) { + if (shm_may_destroy(shp)) + shm_destroy(ns, shp); + else + shm_unlock(shp); + } else { + /* + * Someone else deleted the shp from namespace + * idr/kht while we have waited. + * Just unlock and continue. + */ + shm_unlock(shp); + } + + up_write(&shm_ids(ns).rwsem); + put_ipc_ns(ns); /* paired with get_ipc_ns_not_zero */ + } +} - if (shm_ids(ns).in_use == 0) - return; +static vm_fault_t shm_fault(struct vm_fault *vmf) +{ + struct file *file = vmf->vma->vm_file; + struct shm_file_data *sfd = shm_file_data(file); - /* Destroy all already created segments, but not mapped yet */ - down_write(&shm_ids(ns).rw_mutex); - if (shm_ids(ns).in_use) - idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns); - up_write(&shm_ids(ns).rw_mutex); + return sfd->vm_ops->fault(vmf); } -static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +static int shm_may_split(struct vm_area_struct *vma, unsigned long addr) { struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); - return sfd->vm_ops->fault(vma, vmf); + if (sfd->vm_ops->may_split) + return sfd->vm_ops->may_split(vma, addr); + + return 0; } -#ifdef CONFIG_NUMA -static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new) +static unsigned long shm_pagesize(struct vm_area_struct *vma) { struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); + + if (sfd->vm_ops->pagesize) + return sfd->vm_ops->pagesize(vma); + + return PAGE_SIZE; +} + +#ifdef CONFIG_NUMA +static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) +{ + struct shm_file_data *sfd = shm_file_data(vma->vm_file); int err = 0; + if (sfd->vm_ops->set_policy) - err = sfd->vm_ops->set_policy(vma, new); + err = sfd->vm_ops->set_policy(vma, mpol); return err; } static struct mempolicy *shm_get_policy(struct vm_area_struct *vma, - unsigned long addr) + unsigned long addr, pgoff_t *ilx) { - struct file *file = vma->vm_file; - struct shm_file_data *sfd = shm_file_data(file); - struct mempolicy *pol = NULL; + struct shm_file_data *sfd = shm_file_data(vma->vm_file); + struct mempolicy *mpol = vma->vm_policy; if (sfd->vm_ops->get_policy) - pol = sfd->vm_ops->get_policy(vma, addr); - else if (vma->vm_policy) - pol = vma->vm_policy; - - return pol; + mpol = sfd->vm_ops->get_policy(vma, addr, ilx); + return mpol; } #endif -static int shm_mmap(struct file * file, struct vm_area_struct * vma) +static int shm_mmap(struct file *file, struct vm_area_struct *vma) { struct shm_file_data *sfd = shm_file_data(file); int ret; - ret = sfd->file->f_op->mmap(sfd->file, vma); - if (ret != 0) + /* + * In case of remap_file_pages() emulation, the file can represent an + * IPC ID that was removed, and possibly even reused by another shm + * segment already. Propagate this case as an error to caller. + */ + ret = __shm_open(sfd); + if (ret) + return ret; + + ret = vfs_mmap(sfd->file, vma); + if (ret) { + __shm_close(sfd); return ret; + } sfd->vm_ops = vma->vm_ops; #ifdef CONFIG_MMU - BUG_ON(!sfd->vm_ops->fault); + WARN_ON(!sfd->vm_ops->fault); #endif vma->vm_ops = &shm_vm_ops; - shm_open(vma); - - return ret; + return 0; } static int shm_release(struct inode *ino, struct file *file) @@ -379,6 +622,7 @@ static int shm_release(struct inode *ino, struct file *file) struct shm_file_data *sfd = shm_file_data(file); put_ipc_ns(sfd->ns); + fput(sfd->file); shm_file_data(file) = NULL; kfree(sfd); return 0; @@ -408,6 +652,7 @@ static unsigned long shm_get_unmapped_area(struct file *file, unsigned long flags) { struct shm_file_data *sfd = shm_file_data(file); + return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len, pgoff, flags); } @@ -416,13 +661,15 @@ static const struct file_operations shm_file_operations = { .mmap = shm_mmap, .fsync = shm_fsync, .release = shm_release, -#ifndef CONFIG_MMU .get_unmapped_area = shm_get_unmapped_area, -#endif .llseek = noop_llseek, .fallocate = shm_fallocate, }; +/* + * shm_file_operations_huge is now identical to shm_file_operations + * except for fop_flags + */ static const struct file_operations shm_file_operations_huge = { .mmap = shm_mmap, .fsync = shm_fsync, @@ -430,17 +677,15 @@ static const struct file_operations shm_file_operations_huge = { .get_unmapped_area = shm_get_unmapped_area, .llseek = noop_llseek, .fallocate = shm_fallocate, + .fop_flags = FOP_HUGE_PAGES, }; -int is_file_shm_hugepages(struct file *file) -{ - return file->f_op == &shm_file_operations_huge; -} - static const struct vm_operations_struct shm_vm_ops = { .open = shm_open, /* callback for a new vm-area open */ .close = shm_close, /* callback for when the vm-area is released */ .fault = shm_fault, + .may_split = shm_may_split, + .pagesize = shm_pagesize, #if defined(CONFIG_NUMA) .set_policy = shm_set_policy, .get_policy = shm_get_policy, @@ -452,9 +697,8 @@ static const struct vm_operations_struct shm_vm_ops = { * @ns: namespace * @params: ptr to the structure that contains key, size and shmflg * - * Called with shm_ids.rw_mutex held as a writer. + * Called with shm_ids.rwsem held as a writer. */ - static int newseg(struct ipc_namespace *ns, struct ipc_params *params) { key_t key = params->key; @@ -463,33 +707,36 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) int error; struct shmid_kernel *shp; size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; - struct file * file; + struct file *file; char name[13]; - int id; vm_flags_t acctflag = 0; if (size < SHMMIN || size > ns->shm_ctlmax) return -EINVAL; - if (ns->shm_tot + numpages > ns->shm_ctlall) + if (numpages << PAGE_SHIFT < size) + return -ENOSPC; + + if (ns->shm_tot + numpages < ns->shm_tot || + ns->shm_tot + numpages > ns->shm_ctlall) return -ENOSPC; - shp = ipc_rcu_alloc(sizeof(*shp)); - if (!shp) + shp = kmalloc(sizeof(*shp), GFP_KERNEL_ACCOUNT); + if (unlikely(!shp)) return -ENOMEM; shp->shm_perm.key = key; shp->shm_perm.mode = (shmflg & S_IRWXUGO); - shp->mlock_user = NULL; + shp->mlock_ucounts = NULL; shp->shm_perm.security = NULL; - error = security_shm_alloc(shp); + error = security_shm_alloc(&shp->shm_perm); if (error) { - ipc_rcu_putref(shp); + kfree(shp); return error; } - sprintf (name, "SYSV%08x", key); + sprintf(name, "SYSV%08x", key); if (shmflg & SHM_HUGETLB) { struct hstate *hs; size_t hugesize; @@ -505,37 +752,41 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) if (shmflg & SHM_NORESERVE) acctflag = VM_NORESERVE; file = hugetlb_file_setup(name, hugesize, acctflag, - &shp->mlock_user, HUGETLB_SHMFS_INODE, - (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); + HUGETLB_SHMFS_INODE, (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); } else { /* * Do not allow no accounting for OVERCOMMIT_NEVER, even - * if it's asked for. + * if it's asked for. */ if ((shmflg & SHM_NORESERVE) && sysctl_overcommit_memory != OVERCOMMIT_NEVER) acctflag = VM_NORESERVE; - file = shmem_file_setup(name, size, acctflag); + file = shmem_kernel_file_setup(name, size, acctflag); } error = PTR_ERR(file); if (IS_ERR(file)) goto no_file; - id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); - if (id < 0) { - error = id; - goto no_id; - } - - shp->shm_cprid = task_tgid_vnr(current); - shp->shm_lprid = 0; + shp->shm_cprid = get_pid(task_tgid(current)); + shp->shm_lprid = NULL; shp->shm_atim = shp->shm_dtim = 0; - shp->shm_ctim = get_seconds(); + shp->shm_ctim = ktime_get_real_seconds(); shp->shm_segsz = size; shp->shm_nattch = 0; shp->shm_file = file; shp->shm_creator = current; + /* ipc_addid() locks shp upon success. */ + error = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); + if (error < 0) + goto no_id; + + shp->ns = ns; + + task_lock(current); + list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist); + task_unlock(current); + /* * shmid gets reported as "inode#" in /proc/pid/maps. * proc-ps tools use this. Changing this will break them. @@ -550,31 +801,20 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) return error; no_id: - if (is_file_hugepages(file) && shp->mlock_user) - user_shm_unlock(size, shp->mlock_user); + ipc_update_pid(&shp->shm_cprid, NULL); + ipc_update_pid(&shp->shm_lprid, NULL); fput(file); + ipc_rcu_putref(&shp->shm_perm, shm_rcu_free); + return error; no_file: - security_shm_free(shp); - ipc_rcu_putref(shp); + call_rcu(&shp->shm_perm.rcu, shm_rcu_free); return error; } /* - * Called with shm_ids.rw_mutex and ipcp locked. - */ -static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg) -{ - struct shmid_kernel *shp; - - shp = container_of(ipcp, struct shmid_kernel, shm_perm); - return security_shm_associate(shp, shmflg); -} - -/* - * Called with shm_ids.rw_mutex and ipcp locked. + * Called with shm_ids.rwsem and ipcp locked. */ -static inline int shm_more_checks(struct kern_ipc_perm *ipcp, - struct ipc_params *params) +static int shm_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params) { struct shmid_kernel *shp; @@ -585,18 +825,18 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp, return 0; } -SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg) +long ksys_shmget(key_t key, size_t size, int shmflg) { struct ipc_namespace *ns; - struct ipc_ops shm_ops; + static const struct ipc_ops shm_ops = { + .getnew = newseg, + .associate = security_shm_associate, + .more_checks = shm_more_checks, + }; struct ipc_params shm_params; ns = current->nsproxy->ipc_ns; - shm_ops.getnew = newseg; - shm_ops.associate = shm_security; - shm_ops.more_checks = shm_more_checks; - shm_params.key = key; shm_params.flg = shmflg; shm_params.u.size = size; @@ -604,9 +844,14 @@ SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg) return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params); } +SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg) +{ + return ksys_shmget(key, size, shmflg); +} + static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version) { - switch(version) { + switch (version) { case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: @@ -633,7 +878,7 @@ static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ static inline unsigned long copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version) { - switch(version) { + switch (version) { case IPC_64: if (copy_from_user(out, buf, sizeof(*out))) return -EFAULT; @@ -658,14 +903,14 @@ copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version) static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version) { - switch(version) { + switch (version) { case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: { struct shminfo out; - if(in->shmmax > INT_MAX) + if (in->shmmax > INT_MAX) out.shmmax = INT_MAX; else out.shmmax = (int)in->shmmax; @@ -673,7 +918,7 @@ static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminf out.shmmin = in->shmmin; out.shmmni = in->shmmni; out.shmseg = in->shmseg; - out.shmall = in->shmall; + out.shmall = in->shmall; return copy_to_user(buf, &out, sizeof(out)); } @@ -684,7 +929,7 @@ static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminf /* * Calculate and add used RSS and swap pages of a shm. - * Called with shm_ids.rw_mutex held as a reader + * Called with shm_ids.rwsem held as a reader */ static void shm_add_rss_swap(struct shmid_kernel *shp, unsigned long *rss_add, unsigned long *swp_add) @@ -700,10 +945,11 @@ static void shm_add_rss_swap(struct shmid_kernel *shp, } else { #ifdef CONFIG_SHMEM struct shmem_inode_info *info = SHMEM_I(inode); - spin_lock(&info->lock); + + spin_lock_irq(&info->lock); *rss_add += inode->i_mapping->nrpages; *swp_add += info->swapped; - spin_unlock(&info->lock); + spin_unlock_irq(&info->lock); #else *rss_add += inode->i_mapping->nrpages; #endif @@ -711,7 +957,7 @@ static void shm_add_rss_swap(struct shmid_kernel *shp, } /* - * Called with shm_ids.rw_mutex held as a reader + * Called with shm_ids.rwsem held as a reader */ static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss, unsigned long *swp) @@ -740,53 +986,49 @@ static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss, } /* - * This function handles some shmctl commands which require the rw_mutex + * This function handles some shmctl commands which require the rwsem * to be held in write mode. - * NOTE: no locks must be held, the rw_mutex is taken inside this function. + * NOTE: no locks must be held, the rwsem is taken inside this function. */ static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd, - struct shmid_ds __user *buf, int version) + struct shmid64_ds *shmid64) { struct kern_ipc_perm *ipcp; - struct shmid64_ds shmid64; struct shmid_kernel *shp; int err; - if (cmd == IPC_SET) { - if (copy_shmid_from_user(&shmid64, buf, version)) - return -EFAULT; - } - - down_write(&shm_ids(ns).rw_mutex); + down_write(&shm_ids(ns).rwsem); rcu_read_lock(); - ipcp = ipcctl_pre_down(ns, &shm_ids(ns), shmid, cmd, - &shmid64.shm_perm, 0); + ipcp = ipcctl_obtain_check(ns, &shm_ids(ns), shmid, cmd, + &shmid64->shm_perm, 0); if (IS_ERR(ipcp)) { err = PTR_ERR(ipcp); - /* the ipc lock is not held upon failure */ goto out_unlock1; } shp = container_of(ipcp, struct shmid_kernel, shm_perm); - err = security_shm_shmctl(shp, cmd); + err = security_shm_shmctl(&shp->shm_perm, cmd); if (err) - goto out_unlock0; + goto out_unlock1; switch (cmd) { case IPC_RMID: + ipc_lock_object(&shp->shm_perm); /* do_shm_rmid unlocks the ipc object and rcu */ do_shm_rmid(ns, ipcp); goto out_up; case IPC_SET: - err = ipc_update_perm(&shmid64.shm_perm, ipcp); + ipc_lock_object(&shp->shm_perm); + err = ipc_update_perm(&shmid64->shm_perm, ipcp); if (err) goto out_unlock0; - shp->shm_ctim = get_seconds(); + shp->shm_ctim = ktime_get_real_seconds(); break; default: err = -EINVAL; + goto out_unlock1; } out_unlock0: @@ -794,185 +1036,479 @@ out_unlock0: out_unlock1: rcu_read_unlock(); out_up: - up_write(&shm_ids(ns).rw_mutex); + up_write(&shm_ids(ns).rwsem); return err; } -SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) +static int shmctl_ipc_info(struct ipc_namespace *ns, + struct shminfo64 *shminfo) +{ + int err = security_shm_shmctl(NULL, IPC_INFO); + if (!err) { + memset(shminfo, 0, sizeof(*shminfo)); + shminfo->shmmni = shminfo->shmseg = ns->shm_ctlmni; + shminfo->shmmax = ns->shm_ctlmax; + shminfo->shmall = ns->shm_ctlall; + shminfo->shmmin = SHMMIN; + down_read(&shm_ids(ns).rwsem); + err = ipc_get_maxidx(&shm_ids(ns)); + up_read(&shm_ids(ns).rwsem); + if (err < 0) + err = 0; + } + return err; +} + +static int shmctl_shm_info(struct ipc_namespace *ns, + struct shm_info *shm_info) +{ + int err = security_shm_shmctl(NULL, SHM_INFO); + if (!err) { + memset(shm_info, 0, sizeof(*shm_info)); + down_read(&shm_ids(ns).rwsem); + shm_info->used_ids = shm_ids(ns).in_use; + shm_get_stat(ns, &shm_info->shm_rss, &shm_info->shm_swp); + shm_info->shm_tot = ns->shm_tot; + shm_info->swap_attempts = 0; + shm_info->swap_successes = 0; + err = ipc_get_maxidx(&shm_ids(ns)); + up_read(&shm_ids(ns).rwsem); + if (err < 0) + err = 0; + } + return err; +} + +static int shmctl_stat(struct ipc_namespace *ns, int shmid, + int cmd, struct shmid64_ds *tbuf) { struct shmid_kernel *shp; - int err, version; - struct ipc_namespace *ns; + int err; - if (cmd < 0 || shmid < 0) { - err = -EINVAL; - goto out; + memset(tbuf, 0, sizeof(*tbuf)); + + rcu_read_lock(); + if (cmd == SHM_STAT || cmd == SHM_STAT_ANY) { + shp = shm_obtain_object(ns, shmid); + if (IS_ERR(shp)) { + err = PTR_ERR(shp); + goto out_unlock; + } + } else { /* IPC_STAT */ + shp = shm_obtain_object_check(ns, shmid); + if (IS_ERR(shp)) { + err = PTR_ERR(shp); + goto out_unlock; + } } - version = ipc_parse_version(&cmd); - ns = current->nsproxy->ipc_ns; + /* + * Semantically SHM_STAT_ANY ought to be identical to + * that functionality provided by the /proc/sysvipc/ + * interface. As such, only audit these calls and + * do not do traditional S_IRUGO permission checks on + * the ipc object. + */ + if (cmd == SHM_STAT_ANY) + audit_ipc_obj(&shp->shm_perm); + else { + err = -EACCES; + if (ipcperms(ns, &shp->shm_perm, S_IRUGO)) + goto out_unlock; + } - switch (cmd) { /* replace with proc interface ? */ - case IPC_INFO: - { - struct shminfo64 shminfo; + err = security_shm_shmctl(&shp->shm_perm, cmd); + if (err) + goto out_unlock; - err = security_shm_shmctl(NULL, cmd); - if (err) - return err; + ipc_lock_object(&shp->shm_perm); - memset(&shminfo, 0, sizeof(shminfo)); - shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni; - shminfo.shmmax = ns->shm_ctlmax; - shminfo.shmall = ns->shm_ctlall; + if (!ipc_valid_object(&shp->shm_perm)) { + ipc_unlock_object(&shp->shm_perm); + err = -EIDRM; + goto out_unlock; + } - shminfo.shmmin = SHMMIN; - if(copy_shminfo_to_user (buf, &shminfo, version)) - return -EFAULT; + kernel_to_ipc64_perm(&shp->shm_perm, &tbuf->shm_perm); + tbuf->shm_segsz = shp->shm_segsz; + tbuf->shm_atime = shp->shm_atim; + tbuf->shm_dtime = shp->shm_dtim; + tbuf->shm_ctime = shp->shm_ctim; +#ifndef CONFIG_64BIT + tbuf->shm_atime_high = shp->shm_atim >> 32; + tbuf->shm_dtime_high = shp->shm_dtim >> 32; + tbuf->shm_ctime_high = shp->shm_ctim >> 32; +#endif + tbuf->shm_cpid = pid_vnr(shp->shm_cprid); + tbuf->shm_lpid = pid_vnr(shp->shm_lprid); + tbuf->shm_nattch = shp->shm_nattch; - down_read(&shm_ids(ns).rw_mutex); - err = ipc_get_maxid(&shm_ids(ns)); - up_read(&shm_ids(ns).rw_mutex); + if (cmd == IPC_STAT) { + /* + * As defined in SUS: + * Return 0 on success + */ + err = 0; + } else { + /* + * SHM_STAT and SHM_STAT_ANY (both Linux specific) + * Return the full id, including the sequence number + */ + err = shp->shm_perm.id; + } - if(err<0) - err = 0; - goto out; + ipc_unlock_object(&shp->shm_perm); +out_unlock: + rcu_read_unlock(); + return err; +} + +static int shmctl_do_lock(struct ipc_namespace *ns, int shmid, int cmd) +{ + struct shmid_kernel *shp; + struct file *shm_file; + int err; + + rcu_read_lock(); + shp = shm_obtain_object_check(ns, shmid); + if (IS_ERR(shp)) { + err = PTR_ERR(shp); + goto out_unlock1; } - case SHM_INFO: - { - struct shm_info shm_info; - err = security_shm_shmctl(NULL, cmd); - if (err) - return err; + audit_ipc_obj(&(shp->shm_perm)); + err = security_shm_shmctl(&shp->shm_perm, cmd); + if (err) + goto out_unlock1; - memset(&shm_info, 0, sizeof(shm_info)); - down_read(&shm_ids(ns).rw_mutex); - shm_info.used_ids = shm_ids(ns).in_use; - shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp); - shm_info.shm_tot = ns->shm_tot; - shm_info.swap_attempts = 0; - shm_info.swap_successes = 0; - err = ipc_get_maxid(&shm_ids(ns)); - up_read(&shm_ids(ns).rw_mutex); - if (copy_to_user(buf, &shm_info, sizeof(shm_info))) { - err = -EFAULT; - goto out; + ipc_lock_object(&shp->shm_perm); + + /* check if shm_destroy() is tearing down shp */ + if (!ipc_valid_object(&shp->shm_perm)) { + err = -EIDRM; + goto out_unlock0; + } + + if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) { + kuid_t euid = current_euid(); + + if (!uid_eq(euid, shp->shm_perm.uid) && + !uid_eq(euid, shp->shm_perm.cuid)) { + err = -EPERM; + goto out_unlock0; } + if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) { + err = -EPERM; + goto out_unlock0; + } + } - err = err < 0 ? 0 : err; - goto out; + shm_file = shp->shm_file; + if (is_file_hugepages(shm_file)) + goto out_unlock0; + + if (cmd == SHM_LOCK) { + struct ucounts *ucounts = current_ucounts(); + + err = shmem_lock(shm_file, 1, ucounts); + if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) { + shp->shm_perm.mode |= SHM_LOCKED; + shp->mlock_ucounts = ucounts; + } + goto out_unlock0; + } + + /* SHM_UNLOCK */ + if (!(shp->shm_perm.mode & SHM_LOCKED)) + goto out_unlock0; + shmem_lock(shm_file, 0, shp->mlock_ucounts); + shp->shm_perm.mode &= ~SHM_LOCKED; + shp->mlock_ucounts = NULL; + get_file(shm_file); + ipc_unlock_object(&shp->shm_perm); + rcu_read_unlock(); + shmem_unlock_mapping(shm_file->f_mapping); + + fput(shm_file); + return err; + +out_unlock0: + ipc_unlock_object(&shp->shm_perm); +out_unlock1: + rcu_read_unlock(); + return err; +} + +static long ksys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf, int version) +{ + int err; + struct ipc_namespace *ns; + struct shmid64_ds sem64; + + if (cmd < 0 || shmid < 0) + return -EINVAL; + + ns = current->nsproxy->ipc_ns; + + switch (cmd) { + case IPC_INFO: { + struct shminfo64 shminfo; + err = shmctl_ipc_info(ns, &shminfo); + if (err < 0) + return err; + if (copy_shminfo_to_user(buf, &shminfo, version)) + err = -EFAULT; + return err; + } + case SHM_INFO: { + struct shm_info shm_info; + err = shmctl_shm_info(ns, &shm_info); + if (err < 0) + return err; + if (copy_to_user(buf, &shm_info, sizeof(shm_info))) + err = -EFAULT; + return err; } case SHM_STAT: - case IPC_STAT: - { - struct shmid64_ds tbuf; - int result; - - if (cmd == SHM_STAT) { - shp = shm_lock(ns, shmid); - if (IS_ERR(shp)) { - err = PTR_ERR(shp); - goto out; - } - result = shp->shm_perm.id; - } else { - shp = shm_lock_check(ns, shmid); - if (IS_ERR(shp)) { - err = PTR_ERR(shp); - goto out; - } - result = 0; - } - err = -EACCES; - if (ipcperms(ns, &shp->shm_perm, S_IRUGO)) - goto out_unlock; - err = security_shm_shmctl(shp, cmd); - if (err) - goto out_unlock; - memset(&tbuf, 0, sizeof(tbuf)); - kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm); - tbuf.shm_segsz = shp->shm_segsz; - tbuf.shm_atime = shp->shm_atim; - tbuf.shm_dtime = shp->shm_dtim; - tbuf.shm_ctime = shp->shm_ctim; - tbuf.shm_cpid = shp->shm_cprid; - tbuf.shm_lpid = shp->shm_lprid; - tbuf.shm_nattch = shp->shm_nattch; - shm_unlock(shp); - if(copy_shmid_to_user (buf, &tbuf, version)) + case SHM_STAT_ANY: + case IPC_STAT: { + err = shmctl_stat(ns, shmid, cmd, &sem64); + if (err < 0) + return err; + if (copy_shmid_to_user(buf, &sem64, version)) err = -EFAULT; - else - err = result; - goto out; + return err; } + case IPC_SET: + if (copy_shmid_from_user(&sem64, buf, version)) + return -EFAULT; + fallthrough; + case IPC_RMID: + return shmctl_down(ns, shmid, cmd, &sem64); case SHM_LOCK: case SHM_UNLOCK: - { - struct file *shm_file; + return shmctl_do_lock(ns, shmid, cmd); + default: + return -EINVAL; + } +} - shp = shm_lock_check(ns, shmid); - if (IS_ERR(shp)) { - err = PTR_ERR(shp); - goto out; - } +SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) +{ + return ksys_shmctl(shmid, cmd, buf, IPC_64); +} - audit_ipc_obj(&(shp->shm_perm)); +#ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION +long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf) +{ + int version = ipc_parse_version(&cmd); - if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) { - kuid_t euid = current_euid(); - err = -EPERM; - if (!uid_eq(euid, shp->shm_perm.uid) && - !uid_eq(euid, shp->shm_perm.cuid)) - goto out_unlock; - if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) - goto out_unlock; - } + return ksys_shmctl(shmid, cmd, buf, version); +} - err = security_shm_shmctl(shp, cmd); - if (err) - goto out_unlock; +SYSCALL_DEFINE3(old_shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) +{ + return ksys_old_shmctl(shmid, cmd, buf); +} +#endif - shm_file = shp->shm_file; - if (is_file_hugepages(shm_file)) - goto out_unlock; +#ifdef CONFIG_COMPAT + +struct compat_shmid_ds { + struct compat_ipc_perm shm_perm; + int shm_segsz; + old_time32_t shm_atime; + old_time32_t shm_dtime; + old_time32_t shm_ctime; + compat_ipc_pid_t shm_cpid; + compat_ipc_pid_t shm_lpid; + unsigned short shm_nattch; + unsigned short shm_unused; + compat_uptr_t shm_unused2; + compat_uptr_t shm_unused3; +}; - if (cmd == SHM_LOCK) { - struct user_struct *user = current_user(); - err = shmem_lock(shm_file, 1, user); - if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) { - shp->shm_perm.mode |= SHM_LOCKED; - shp->mlock_user = user; - } - goto out_unlock; - } +struct compat_shminfo64 { + compat_ulong_t shmmax; + compat_ulong_t shmmin; + compat_ulong_t shmmni; + compat_ulong_t shmseg; + compat_ulong_t shmall; + compat_ulong_t __unused1; + compat_ulong_t __unused2; + compat_ulong_t __unused3; + compat_ulong_t __unused4; +}; - /* SHM_UNLOCK */ - if (!(shp->shm_perm.mode & SHM_LOCKED)) - goto out_unlock; - shmem_lock(shm_file, 0, shp->mlock_user); - shp->shm_perm.mode &= ~SHM_LOCKED; - shp->mlock_user = NULL; - get_file(shm_file); - shm_unlock(shp); - shmem_unlock_mapping(shm_file->f_mapping); - fput(shm_file); - goto out; +struct compat_shm_info { + compat_int_t used_ids; + compat_ulong_t shm_tot, shm_rss, shm_swp; + compat_ulong_t swap_attempts, swap_successes; +}; + +static int copy_compat_shminfo_to_user(void __user *buf, struct shminfo64 *in, + int version) +{ + if (in->shmmax > INT_MAX) + in->shmmax = INT_MAX; + if (version == IPC_64) { + struct compat_shminfo64 info; + memset(&info, 0, sizeof(info)); + info.shmmax = in->shmmax; + info.shmmin = in->shmmin; + info.shmmni = in->shmmni; + info.shmseg = in->shmseg; + info.shmall = in->shmall; + return copy_to_user(buf, &info, sizeof(info)); + } else { + struct shminfo info; + memset(&info, 0, sizeof(info)); + info.shmmax = in->shmmax; + info.shmmin = in->shmmin; + info.shmmni = in->shmmni; + info.shmseg = in->shmseg; + info.shmall = in->shmall; + return copy_to_user(buf, &info, sizeof(info)); } - case IPC_RMID: - case IPC_SET: - err = shmctl_down(ns, shmid, cmd, buf, version); +} + +static int put_compat_shm_info(struct shm_info *ip, + struct compat_shm_info __user *uip) +{ + struct compat_shm_info info; + + memset(&info, 0, sizeof(info)); + info.used_ids = ip->used_ids; + info.shm_tot = ip->shm_tot; + info.shm_rss = ip->shm_rss; + info.shm_swp = ip->shm_swp; + info.swap_attempts = ip->swap_attempts; + info.swap_successes = ip->swap_successes; + return copy_to_user(uip, &info, sizeof(info)); +} + +static int copy_compat_shmid_to_user(void __user *buf, struct shmid64_ds *in, + int version) +{ + if (version == IPC_64) { + struct compat_shmid64_ds v; + memset(&v, 0, sizeof(v)); + to_compat_ipc64_perm(&v.shm_perm, &in->shm_perm); + v.shm_atime = lower_32_bits(in->shm_atime); + v.shm_atime_high = upper_32_bits(in->shm_atime); + v.shm_dtime = lower_32_bits(in->shm_dtime); + v.shm_dtime_high = upper_32_bits(in->shm_dtime); + v.shm_ctime = lower_32_bits(in->shm_ctime); + v.shm_ctime_high = upper_32_bits(in->shm_ctime); + v.shm_segsz = in->shm_segsz; + v.shm_nattch = in->shm_nattch; + v.shm_cpid = in->shm_cpid; + v.shm_lpid = in->shm_lpid; + return copy_to_user(buf, &v, sizeof(v)); + } else { + struct compat_shmid_ds v; + memset(&v, 0, sizeof(v)); + to_compat_ipc_perm(&v.shm_perm, &in->shm_perm); + v.shm_perm.key = in->shm_perm.key; + v.shm_atime = in->shm_atime; + v.shm_dtime = in->shm_dtime; + v.shm_ctime = in->shm_ctime; + v.shm_segsz = in->shm_segsz; + v.shm_nattch = in->shm_nattch; + v.shm_cpid = in->shm_cpid; + v.shm_lpid = in->shm_lpid; + return copy_to_user(buf, &v, sizeof(v)); + } +} + +static int copy_compat_shmid_from_user(struct shmid64_ds *out, void __user *buf, + int version) +{ + memset(out, 0, sizeof(*out)); + if (version == IPC_64) { + struct compat_shmid64_ds __user *p = buf; + return get_compat_ipc64_perm(&out->shm_perm, &p->shm_perm); + } else { + struct compat_shmid_ds __user *p = buf; + return get_compat_ipc_perm(&out->shm_perm, &p->shm_perm); + } +} + +static long compat_ksys_shmctl(int shmid, int cmd, void __user *uptr, int version) +{ + struct ipc_namespace *ns; + struct shmid64_ds sem64; + int err; + + ns = current->nsproxy->ipc_ns; + + if (cmd < 0 || shmid < 0) + return -EINVAL; + + switch (cmd) { + case IPC_INFO: { + struct shminfo64 shminfo; + err = shmctl_ipc_info(ns, &shminfo); + if (err < 0) + return err; + if (copy_compat_shminfo_to_user(uptr, &shminfo, version)) + err = -EFAULT; + return err; + } + case SHM_INFO: { + struct shm_info shm_info; + err = shmctl_shm_info(ns, &shm_info); + if (err < 0) + return err; + if (put_compat_shm_info(&shm_info, uptr)) + err = -EFAULT; + return err; + } + case IPC_STAT: + case SHM_STAT_ANY: + case SHM_STAT: + err = shmctl_stat(ns, shmid, cmd, &sem64); + if (err < 0) + return err; + if (copy_compat_shmid_to_user(uptr, &sem64, version)) + err = -EFAULT; return err; + + case IPC_SET: + if (copy_compat_shmid_from_user(&sem64, uptr, version)) + return -EFAULT; + fallthrough; + case IPC_RMID: + return shmctl_down(ns, shmid, cmd, &sem64); + case SHM_LOCK: + case SHM_UNLOCK: + return shmctl_do_lock(ns, shmid, cmd); default: return -EINVAL; } - -out_unlock: - shm_unlock(shp); -out: return err; } +COMPAT_SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, void __user *, uptr) +{ + return compat_ksys_shmctl(shmid, cmd, uptr, IPC_64); +} + +#ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION +long compat_ksys_old_shmctl(int shmid, int cmd, void __user *uptr) +{ + int version = compat_ipc_parse_version(&cmd); + + return compat_ksys_shmctl(shmid, cmd, uptr, version); +} + +COMPAT_SYSCALL_DEFINE3(old_shmctl, int, shmid, int, cmd, void __user *, uptr) +{ + return compat_ksys_old_shmctl(shmid, cmd, uptr); +} +#endif +#endif + /* * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists. * @@ -980,52 +1516,57 @@ out: * "raddr" thing points to kernel space, and there has to be a wrapper around * this. */ -long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, - unsigned long shmlba) +long do_shmat(int shmid, char __user *shmaddr, int shmflg, + ulong *raddr, unsigned long shmlba) { struct shmid_kernel *shp; - unsigned long addr; + unsigned long addr = (unsigned long)shmaddr; unsigned long size; - struct file * file; + struct file *file, *base; int err; - unsigned long flags; + unsigned long flags = MAP_SHARED; unsigned long prot; int acc_mode; struct ipc_namespace *ns; struct shm_file_data *sfd; - struct path path; - fmode_t f_mode; + int f_flags; unsigned long populate = 0; err = -EINVAL; if (shmid < 0) goto out; - else if ((addr = (ulong)shmaddr)) { + + if (addr) { if (addr & (shmlba - 1)) { - if (shmflg & SHM_RND) - addr &= ~(shmlba - 1); /* round down */ - else + if (shmflg & SHM_RND) { + addr &= ~(shmlba - 1); /* round down */ + + /* + * Ensure that the round-down is non-nil + * when remapping. This can happen for + * cases when addr < shmlba. + */ + if (!addr && (shmflg & SHM_REMAP)) + goto out; + } else #ifndef __ARCH_FORCE_SHMLBA if (addr & ~PAGE_MASK) #endif goto out; } - flags = MAP_SHARED | MAP_FIXED; - } else { - if ((shmflg & SHM_REMAP)) - goto out; - flags = MAP_SHARED; - } + flags |= MAP_FIXED; + } else if ((shmflg & SHM_REMAP)) + goto out; if (shmflg & SHM_RDONLY) { prot = PROT_READ; acc_mode = S_IRUGO; - f_mode = FMODE_READ; + f_flags = O_RDONLY; } else { prot = PROT_READ | PROT_WRITE; acc_mode = S_IRUGO | S_IWUGO; - f_mode = FMODE_READ | FMODE_WRITE; + f_flags = O_RDWR; } if (shmflg & SHM_EXEC) { prot |= PROT_EXEC; @@ -1037,71 +1578,94 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, * additional creator id... */ ns = current->nsproxy->ipc_ns; - shp = shm_lock_check(ns, shmid); + rcu_read_lock(); + shp = shm_obtain_object_check(ns, shmid); if (IS_ERR(shp)) { err = PTR_ERR(shp); - goto out; + goto out_unlock; } err = -EACCES; if (ipcperms(ns, &shp->shm_perm, acc_mode)) goto out_unlock; - err = security_shm_shmat(shp, shmaddr, shmflg); + err = security_shm_shmat(&shp->shm_perm, shmaddr, shmflg); if (err) goto out_unlock; - path = shp->shm_file->f_path; - path_get(&path); + ipc_lock_object(&shp->shm_perm); + + /* check if shm_destroy() is tearing down shp */ + if (!ipc_valid_object(&shp->shm_perm)) { + ipc_unlock_object(&shp->shm_perm); + err = -EIDRM; + goto out_unlock; + } + + /* + * We need to take a reference to the real shm file to prevent the + * pointer from becoming stale in cases where the lifetime of the outer + * file extends beyond that of the shm segment. It's not usually + * possible, but it can happen during remap_file_pages() emulation as + * that unmaps the memory, then does ->mmap() via file reference only. + * We'll deny the ->mmap() if the shm segment was since removed, but to + * detect shm ID reuse we need to compare the file pointers. + */ + base = get_file(shp->shm_file); shp->shm_nattch++; - size = i_size_read(path.dentry->d_inode); - shm_unlock(shp); + size = i_size_read(file_inode(base)); + ipc_unlock_object(&shp->shm_perm); + rcu_read_unlock(); err = -ENOMEM; sfd = kzalloc(sizeof(*sfd), GFP_KERNEL); - if (!sfd) - goto out_put_dentry; + if (!sfd) { + fput(base); + goto out_nattch; + } - file = alloc_file(&path, f_mode, - is_file_hugepages(shp->shm_file) ? + file = alloc_file_clone(base, f_flags, + is_file_hugepages(base) ? &shm_file_operations_huge : &shm_file_operations); err = PTR_ERR(file); - if (IS_ERR(file)) - goto out_free; + if (IS_ERR(file)) { + kfree(sfd); + fput(base); + goto out_nattch; + } - file->private_data = sfd; - file->f_mapping = shp->shm_file->f_mapping; sfd->id = shp->shm_perm.id; sfd->ns = get_ipc_ns(ns); - sfd->file = shp->shm_file; + sfd->file = base; sfd->vm_ops = NULL; + file->private_data = sfd; err = security_mmap_file(file, prot, flags); if (err) goto out_fput; - down_write(¤t->mm->mmap_sem); + if (mmap_write_lock_killable(current->mm)) { + err = -EINTR; + goto out_fput; + } + if (addr && !(shmflg & SHM_REMAP)) { err = -EINVAL; - if (find_vma_intersection(current->mm, addr, addr + size)) + if (addr + size < addr) goto invalid; - /* - * If shm segment goes below stack, make sure there is some - * space left for the stack to grow (at least 4 pages). - */ - if (addr < current->mm->start_stack && - addr > current->mm->start_stack - size - PAGE_SIZE * 5) + + if (find_vma_intersection(current->mm, addr, addr + size)) goto invalid; } - - addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate); + + addr = do_mmap(file, addr, size, prot, flags, 0, 0, &populate, NULL); *raddr = addr; err = 0; if (IS_ERR_VALUE(addr)) err = (long)addr; invalid: - up_write(¤t->mm->mmap_sem); + mmap_write_unlock(current->mm); if (populate) mm_populate(addr, populate); @@ -1109,28 +1673,21 @@ out_fput: fput(file); out_nattch: - down_write(&shm_ids(ns).rw_mutex); + down_write(&shm_ids(ns).rwsem); shp = shm_lock(ns, shmid); - BUG_ON(IS_ERR(shp)); shp->shm_nattch--; - if (shm_may_destroy(ns, shp)) + + if (shm_may_destroy(shp)) shm_destroy(ns, shp); else shm_unlock(shp); - up_write(&shm_ids(ns).rw_mutex); - -out: + up_write(&shm_ids(ns).rwsem); return err; out_unlock: - shm_unlock(shp); - goto out; - -out_free: - kfree(sfd); -out_put_dentry: - path_put(&path); - goto out_nattch; + rcu_read_unlock(); +out: + return err; } SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg) @@ -1145,11 +1702,30 @@ SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg) return (long)ret; } +#ifdef CONFIG_COMPAT + +#ifndef COMPAT_SHMLBA +#define COMPAT_SHMLBA SHMLBA +#endif + +COMPAT_SYSCALL_DEFINE3(shmat, int, shmid, compat_uptr_t, shmaddr, int, shmflg) +{ + unsigned long ret; + long err; + + err = do_shmat(shmid, compat_ptr(shmaddr), shmflg, &ret, COMPAT_SHMLBA); + if (err) + return err; + force_successful_syscall_return(); + return (long)ret; +} +#endif + /* * detach and kill segment if marked destroyed. * The work is done in shm_close. */ -SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) +long ksys_shmdt(char __user *shmaddr) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; @@ -1157,13 +1733,15 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) int retval = -EINVAL; #ifdef CONFIG_MMU loff_t size = 0; - struct vm_area_struct *next; + struct file *file; + VMA_ITERATOR(vmi, mm, addr); #endif if (addr & ~PAGE_MASK) return retval; - down_write(&mm->mmap_sem); + if (mmap_write_lock_killable(mm)) + return -EINTR; /* * This function tries to be smart and unmap shm segments that @@ -1173,7 +1751,8 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) * started at address shmaddr. It records it's size and then unmaps * it. * - Then it unmaps all shm vmas that started at shmaddr and that - * are within the initially determined size. + * are within the initially determined size and that are from the + * same shm segment from which we determined the size. * Errors from do_munmap are ignored: the function only fails if * it's called with invalid parameters or if it's called to unmap * a part of a vma. Both calls in this function are for full vmas, @@ -1185,12 +1764,9 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) * match the usual checks anyway. So assume all vma's are * above the starting address given. */ - vma = find_vma(mm, addr); #ifdef CONFIG_MMU - while (vma) { - next = vma->vm_next; - + for_each_vma(vmi, vma) { /* * Check if the starting address would match, i.e. it's * a fragment created by mprotect() and/or munmap(), or it @@ -1199,9 +1775,16 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) if ((vma->vm_ops == &shm_vm_ops) && (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) { - - size = file_inode(vma->vm_file)->i_size; - do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); + /* + * Record the file of the shm segment being + * unmapped. With mremap(), someone could place + * page from another segment but with equal offsets + * in the range we are unmapping. + */ + file = vma->vm_file; + size = i_size_read(file_inode(vma->vm_file)); + do_vmi_align_munmap(&vmi, vma, mm, vma->vm_start, + vma->vm_end, NULL, false); /* * We discovered the size of the shm segment, so * break out of here and fall through to the next @@ -1209,10 +1792,9 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) * searching for matching vma's. */ retval = 0; - vma = next; + vma = vma_next(&vmi); break; } - vma = next; } /* @@ -1222,38 +1804,48 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) */ size = PAGE_ALIGN(size); while (vma && (loff_t)(vma->vm_end - addr) <= size) { - next = vma->vm_next; - /* finding a matching vma now does not alter retval */ if ((vma->vm_ops == &shm_vm_ops) && - (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) + ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) && + (vma->vm_file == file)) { + do_vmi_align_munmap(&vmi, vma, mm, vma->vm_start, + vma->vm_end, NULL, false); + } - do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); - vma = next; + vma = vma_next(&vmi); } -#else /* CONFIG_MMU */ +#else /* CONFIG_MMU */ + vma = vma_lookup(mm, addr); /* under NOMMU conditions, the exact address to be destroyed must be - * given */ - retval = -EINVAL; - if (vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) { - do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); + * given + */ + if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) { + do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL); retval = 0; } #endif - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); return retval; } +SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) +{ + return ksys_shmdt(shmaddr); +} + #ifdef CONFIG_PROC_FS static int sysvipc_shm_proc_show(struct seq_file *s, void *it) { + struct pid_namespace *pid_ns = ipc_seq_pid_ns(s); struct user_namespace *user_ns = seq_user_ns(s); - struct shmid_kernel *shp = it; + struct kern_ipc_perm *ipcp = it; + struct shmid_kernel *shp; unsigned long rss = 0, swp = 0; + shp = container_of(ipcp, struct shmid_kernel, shm_perm); shm_add_rss_swap(shp, &rss, &swp); #if BITS_PER_LONG <= 32 @@ -1262,25 +1854,27 @@ static int sysvipc_shm_proc_show(struct seq_file *s, void *it) #define SIZE_SPEC "%21lu" #endif - return seq_printf(s, - "%10d %10d %4o " SIZE_SPEC " %5u %5u " - "%5lu %5u %5u %5u %5u %10lu %10lu %10lu " - SIZE_SPEC " " SIZE_SPEC "\n", - shp->shm_perm.key, - shp->shm_perm.id, - shp->shm_perm.mode, - shp->shm_segsz, - shp->shm_cprid, - shp->shm_lprid, - shp->shm_nattch, - from_kuid_munged(user_ns, shp->shm_perm.uid), - from_kgid_munged(user_ns, shp->shm_perm.gid), - from_kuid_munged(user_ns, shp->shm_perm.cuid), - from_kgid_munged(user_ns, shp->shm_perm.cgid), - shp->shm_atim, - shp->shm_dtim, - shp->shm_ctim, - rss * PAGE_SIZE, - swp * PAGE_SIZE); + seq_printf(s, + "%10d %10d %4o " SIZE_SPEC " %5u %5u " + "%5lu %5u %5u %5u %5u %10llu %10llu %10llu " + SIZE_SPEC " " SIZE_SPEC "\n", + shp->shm_perm.key, + shp->shm_perm.id, + shp->shm_perm.mode, + shp->shm_segsz, + pid_nr_ns(shp->shm_cprid, pid_ns), + pid_nr_ns(shp->shm_lprid, pid_ns), + shp->shm_nattch, + from_kuid_munged(user_ns, shp->shm_perm.uid), + from_kgid_munged(user_ns, shp->shm_perm.gid), + from_kuid_munged(user_ns, shp->shm_perm.cuid), + from_kgid_munged(user_ns, shp->shm_perm.cgid), + shp->shm_atim, + shp->shm_dtim, + shp->shm_ctim, + rss * PAGE_SIZE, + swp * PAGE_SIZE); + + return 0; } #endif diff --git a/ipc/syscall.c b/ipc/syscall.c index 52429489cde0..dfb0e988d542 100644 --- a/ipc/syscall.c +++ b/ipc/syscall.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * sys_ipc() is the old de-multiplexer for the SysV IPC calls. * @@ -5,16 +6,19 @@ * the individual syscalls instead. */ #include <linux/unistd.h> +#include <linux/syscalls.h> +#include <linux/security.h> +#include <linux/ipc_namespace.h> +#include "util.h" #ifdef __ARCH_WANT_SYS_IPC #include <linux/errno.h> #include <linux/ipc.h> #include <linux/shm.h> -#include <linux/syscalls.h> #include <linux/uaccess.h> -SYSCALL_DEFINE6(ipc, unsigned int, call, int, first, unsigned long, second, - unsigned long, third, void __user *, ptr, long, fifth) +int ksys_ipc(unsigned int call, int first, unsigned long second, + unsigned long third, void __user * ptr, long fifth) { int version, ret; @@ -23,26 +27,31 @@ SYSCALL_DEFINE6(ipc, unsigned int, call, int, first, unsigned long, second, switch (call) { case SEMOP: - return sys_semtimedop(first, (struct sembuf __user *)ptr, - second, NULL); + return ksys_semtimedop(first, (struct sembuf __user *)ptr, + second, NULL); case SEMTIMEDOP: - return sys_semtimedop(first, (struct sembuf __user *)ptr, - second, - (const struct timespec __user *)fifth); + if (IS_ENABLED(CONFIG_64BIT)) + return ksys_semtimedop(first, ptr, second, + (const struct __kernel_timespec __user *)fifth); + else if (IS_ENABLED(CONFIG_COMPAT_32BIT_TIME)) + return compat_ksys_semtimedop(first, ptr, second, + (const struct old_timespec32 __user *)fifth); + else + return -ENOSYS; case SEMGET: - return sys_semget(first, second, third); + return ksys_semget(first, second, third); case SEMCTL: { unsigned long arg; if (!ptr) return -EINVAL; if (get_user(arg, (unsigned long __user *) ptr)) return -EFAULT; - return sys_semctl(first, second, third, arg); + return ksys_old_semctl(first, second, third, arg); } case MSGSND: - return sys_msgsnd(first, (struct msgbuf __user *) ptr, + return ksys_msgsnd(first, (struct msgbuf __user *) ptr, second, third); case MSGRCV: switch (version) { @@ -55,18 +64,19 @@ SYSCALL_DEFINE6(ipc, unsigned int, call, int, first, unsigned long, second, (struct ipc_kludge __user *) ptr, sizeof(tmp))) return -EFAULT; - return sys_msgrcv(first, tmp.msgp, second, + return ksys_msgrcv(first, tmp.msgp, second, tmp.msgtyp, third); } default: - return sys_msgrcv(first, + return ksys_msgrcv(first, (struct msgbuf __user *) ptr, second, fifth, third); } case MSGGET: - return sys_msgget((key_t) first, second); + return ksys_msgget((key_t) first, second); case MSGCTL: - return sys_msgctl(first, second, (struct msqid_ds __user *)ptr); + return ksys_old_msgctl(first, second, + (struct msqid_ds __user *)ptr); case SHMAT: switch (version) { @@ -86,14 +96,116 @@ SYSCALL_DEFINE6(ipc, unsigned int, call, int, first, unsigned long, second, return -EINVAL; } case SHMDT: - return sys_shmdt((char __user *)ptr); + return ksys_shmdt((char __user *)ptr); case SHMGET: - return sys_shmget(first, second, third); + return ksys_shmget(first, second, third); case SHMCTL: - return sys_shmctl(first, second, + return ksys_old_shmctl(first, second, (struct shmid_ds __user *) ptr); default: return -ENOSYS; } } + +SYSCALL_DEFINE6(ipc, unsigned int, call, int, first, unsigned long, second, + unsigned long, third, void __user *, ptr, long, fifth) +{ + return ksys_ipc(call, first, second, third, ptr, fifth); +} +#endif + +#ifdef CONFIG_COMPAT +#include <linux/compat.h> + +#ifndef COMPAT_SHMLBA +#define COMPAT_SHMLBA SHMLBA +#endif + +struct compat_ipc_kludge { + compat_uptr_t msgp; + compat_long_t msgtyp; +}; + +#ifdef CONFIG_ARCH_WANT_OLD_COMPAT_IPC +int compat_ksys_ipc(u32 call, int first, int second, + u32 third, compat_uptr_t ptr, u32 fifth) +{ + int version; + u32 pad; + + version = call >> 16; /* hack for backward compatibility */ + call &= 0xffff; + + switch (call) { + case SEMOP: + /* struct sembuf is the same on 32 and 64bit :)) */ + return ksys_semtimedop(first, compat_ptr(ptr), second, NULL); + case SEMTIMEDOP: + if (!IS_ENABLED(CONFIG_COMPAT_32BIT_TIME)) + return -ENOSYS; + return compat_ksys_semtimedop(first, compat_ptr(ptr), second, + compat_ptr(fifth)); + case SEMGET: + return ksys_semget(first, second, third); + case SEMCTL: + if (!ptr) + return -EINVAL; + if (get_user(pad, (u32 __user *) compat_ptr(ptr))) + return -EFAULT; + return compat_ksys_old_semctl(first, second, third, pad); + + case MSGSND: + return compat_ksys_msgsnd(first, ptr, second, third); + + case MSGRCV: { + void __user *uptr = compat_ptr(ptr); + + if (first < 0 || second < 0) + return -EINVAL; + + if (!version) { + struct compat_ipc_kludge ipck; + if (!uptr) + return -EINVAL; + if (copy_from_user(&ipck, uptr, sizeof(ipck))) + return -EFAULT; + return compat_ksys_msgrcv(first, ipck.msgp, second, + ipck.msgtyp, third); + } + return compat_ksys_msgrcv(first, ptr, second, fifth, third); + } + case MSGGET: + return ksys_msgget(first, second); + case MSGCTL: + return compat_ksys_old_msgctl(first, second, compat_ptr(ptr)); + + case SHMAT: { + int err; + unsigned long raddr; + + if (version == 1) + return -EINVAL; + err = do_shmat(first, compat_ptr(ptr), second, &raddr, + COMPAT_SHMLBA); + if (err < 0) + return err; + return put_user(raddr, (compat_ulong_t __user *)compat_ptr(third)); + } + case SHMDT: + return ksys_shmdt(compat_ptr(ptr)); + case SHMGET: + return ksys_shmget(first, (unsigned int)second, third); + case SHMCTL: + return compat_ksys_old_shmctl(first, second, compat_ptr(ptr)); + } + + return -ENOSYS; +} + +COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second, + u32, third, compat_uptr_t, ptr, u32, fifth) +{ + return compat_ksys_ipc(call, first, second, third, ptr, fifth); +} +#endif #endif diff --git a/ipc/util.c b/ipc/util.c index 4704223bfad4..cae60f11d9c2 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * linux/ipc/util.c * Copyright (C) 1992 Krishna Balasubramanian @@ -15,6 +16,32 @@ * Jun 2006 - namespaces ssupport * OpenVZ, SWsoft Inc. * Pavel Emelianov <xemul@openvz.org> + * + * General sysv ipc locking scheme: + * rcu_read_lock() + * obtain the ipc object (kern_ipc_perm) by looking up the id in an idr + * tree. + * - perform initial checks (capabilities, auditing and permission, + * etc). + * - perform read-only operations, such as INFO command, that + * do not demand atomicity + * acquire the ipc lock (kern_ipc_perm.lock) through + * ipc_lock_object() + * - perform read-only operations that demand atomicity, + * such as STAT command. + * - perform data updates, such as SET, RMID commands and + * mechanism-specific operations (semop/semtimedop, + * msgsnd/msgrcv, shmat/shmdt). + * drop the ipc lock, through ipc_unlock_object(). + * rcu_read_unlock() + * + * The ids->rwsem must be taken when: + * - creating, removing and iterating the existing entries in ipc + * identifier sets. + * - iterating through files under /proc/sysvipc/ + * + * Note that sems have a special fast path that avoids kern_ipc_perm.lock - + * see sem_lock(). */ #include <linux/mm.h> @@ -36,6 +63,8 @@ #include <linux/rwsem.h> #include <linux/memory.h> #include <linux/ipc_namespace.h> +#include <linux/rhashtable.h> +#include <linux/log2.h> #include <asm/unistd.h> @@ -48,101 +77,63 @@ struct ipc_proc_iface { int (*show)(struct seq_file *, void *); }; -static void ipc_memory_notifier(struct work_struct *work) -{ - ipcns_notify(IPCNS_MEMCHANGED); -} - -static int ipc_memory_callback(struct notifier_block *self, - unsigned long action, void *arg) -{ - static DECLARE_WORK(ipc_memory_wq, ipc_memory_notifier); - - switch (action) { - case MEM_ONLINE: /* memory successfully brought online */ - case MEM_OFFLINE: /* or offline: it's time to recompute msgmni */ - /* - * This is done by invoking the ipcns notifier chain with the - * IPC_MEMCHANGED event. - * In order not to keep the lock on the hotplug memory chain - * for too long, queue a work item that will, when waken up, - * activate the ipcns notification chain. - * No need to keep several ipc work items on the queue. - */ - if (!work_pending(&ipc_memory_wq)) - schedule_work(&ipc_memory_wq); - break; - case MEM_GOING_ONLINE: - case MEM_GOING_OFFLINE: - case MEM_CANCEL_ONLINE: - case MEM_CANCEL_OFFLINE: - default: - break; - } - - return NOTIFY_OK; -} - -static struct notifier_block ipc_memory_nb = { - .notifier_call = ipc_memory_callback, - .priority = IPC_CALLBACK_PRI, -}; - /** - * ipc_init - initialise IPC subsystem + * ipc_init - initialise ipc subsystem + * + * The various sysv ipc resources (semaphores, messages and shared + * memory) are initialised. * - * The various system5 IPC resources (semaphores, messages and shared - * memory) are initialised - * A callback routine is registered into the memory hotplug notifier - * chain: since msgmni scales to lowmem this callback routine will be - * called upon successful memory add / remove to recompute msmgni. + * A callback routine is registered into the memory hotplug notifier + * chain: since msgmni scales to lowmem this callback routine will be + * called upon successful memory add / remove to recompute msmgni. */ - static int __init ipc_init(void) { + proc_mkdir("sysvipc", NULL); sem_init(); msg_init(); shm_init(); - register_hotmemory_notifier(&ipc_memory_nb); - register_ipcns_notifier(&init_ipc_ns); + return 0; } -__initcall(ipc_init); +device_initcall(ipc_init); + +static const struct rhashtable_params ipc_kht_params = { + .head_offset = offsetof(struct kern_ipc_perm, khtnode), + .key_offset = offsetof(struct kern_ipc_perm, key), + .key_len = sizeof_field(struct kern_ipc_perm, key), + .automatic_shrinking = true, +}; /** - * ipc_init_ids - initialise IPC identifiers - * @ids: Identifier set + * ipc_init_ids - initialise ipc identifiers + * @ids: ipc identifier set * - * Set up the sequence range to use for the ipc identifier range (limited - * below IPCMNI) then initialise the ids idr. + * Set up the sequence range to use for the ipc identifier range (limited + * below ipc_mni) then initialise the keys hashtable and ids idr. */ - void ipc_init_ids(struct ipc_ids *ids) { - init_rwsem(&ids->rw_mutex); - ids->in_use = 0; ids->seq = 0; - ids->next_id = -1; - { - int seq_limit = INT_MAX/SEQ_MULTIPLIER; - if (seq_limit > USHRT_MAX) - ids->seq_max = USHRT_MAX; - else - ids->seq_max = seq_limit; - } - + init_rwsem(&ids->rwsem); + rhashtable_init(&ids->key_ht, &ipc_kht_params); idr_init(&ids->ipcs_idr); + ids->max_idx = -1; + ids->last_idx = -1; +#ifdef CONFIG_CHECKPOINT_RESTORE + ids->next_id = -1; +#endif } #ifdef CONFIG_PROC_FS -static const struct file_operations sysvipc_proc_fops; +static const struct proc_ops sysvipc_proc_ops; /** - * ipc_init_proc_interface - Create a proc interface for sysipc types using a seq_file interface. - * @path: Path in procfs - * @header: Banner to be printed at the beginning of the file. - * @ids: ipc id table to iterate. - * @show: show routine. + * ipc_init_proc_interface - create a proc interface for sysipc types using a seq_file interface. + * @path: Path in procfs + * @header: Banner to be printed at the beginning of the file. + * @ids: ipc id table to iterate. + * @show: show routine. */ void __init ipc_init_proc_interface(const char *path, const char *header, int ids, int (*show)(struct seq_file *, void *)) @@ -161,181 +152,219 @@ void __init ipc_init_proc_interface(const char *path, const char *header, pde = proc_create_data(path, S_IRUGO, /* world readable */ NULL, /* parent dir */ - &sysvipc_proc_fops, + &sysvipc_proc_ops, iface); - if (!pde) { + if (!pde) kfree(iface); - } } #endif /** - * ipc_findkey - find a key in an ipc identifier set - * @ids: Identifier set - * @key: The key to find - * - * Requires ipc_ids.rw_mutex locked. - * Returns the LOCKED pointer to the ipc structure if found or NULL - * if not. - * If key is found ipc points to the owning ipc structure + * ipc_findkey - find a key in an ipc identifier set + * @ids: ipc identifier set + * @key: key to find + * + * Returns the locked pointer to the ipc structure if found or NULL + * otherwise. If key is found ipc points to the owning ipc structure + * + * Called with writer ipc_ids.rwsem held. */ - static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key) { - struct kern_ipc_perm *ipc; - int next_id; - int total; - - for (total = 0, next_id = 0; total < ids->in_use; next_id++) { - ipc = idr_find(&ids->ipcs_idr, next_id); - - if (ipc == NULL) - continue; - - if (ipc->key != key) { - total++; - continue; - } + struct kern_ipc_perm *ipcp; - ipc_lock_by_ptr(ipc); - return ipc; - } + ipcp = rhashtable_lookup_fast(&ids->key_ht, &key, + ipc_kht_params); + if (!ipcp) + return NULL; - return NULL; + rcu_read_lock(); + ipc_lock_object(ipcp); + return ipcp; } -/** - * ipc_get_maxid - get the last assigned id - * @ids: IPC identifier set +/* + * Insert new IPC object into idr tree, and set sequence number and id + * in the correct order. + * Especially: + * - the sequence number must be set before inserting the object into the idr, + * because the sequence number is accessed without a lock. + * - the id can/must be set after inserting the object into the idr. + * All accesses must be done after getting kern_ipc_perm.lock. * - * Called with ipc_ids.rw_mutex held. + * The caller must own kern_ipc_perm.lock.of the new object. + * On error, the function returns a (negative) error code. + * + * To conserve sequence number space, especially with extended ipc_mni, + * the sequence number is incremented only when the returned ID is less than + * the last one. */ - -int ipc_get_maxid(struct ipc_ids *ids) +static inline int ipc_idr_alloc(struct ipc_ids *ids, struct kern_ipc_perm *new) { - struct kern_ipc_perm *ipc; - int max_id = -1; - int total, id; + int idx, next_id = -1; - if (ids->in_use == 0) - return -1; +#ifdef CONFIG_CHECKPOINT_RESTORE + next_id = ids->next_id; + ids->next_id = -1; +#endif - if (ids->in_use == IPCMNI) - return IPCMNI - 1; + /* + * As soon as a new object is inserted into the idr, + * ipc_obtain_object_idr() or ipc_obtain_object_check() can find it, + * and the lockless preparations for ipc operations can start. + * This means especially: permission checks, audit calls, allocation + * of undo structures, ... + * + * Thus the object must be fully initialized, and if something fails, + * then the full tear-down sequence must be followed. + * (i.e.: set new->deleted, reduce refcount, call_rcu()) + */ - /* Look for the last assigned id */ - total = 0; - for (id = 0; id < IPCMNI && total < ids->in_use; id++) { - ipc = idr_find(&ids->ipcs_idr, id); - if (ipc != NULL) { - max_id = id; - total++; + if (next_id < 0) { /* !CHECKPOINT_RESTORE or next_id is unset */ + int max_idx; + + max_idx = max(ids->in_use*3/2, ipc_min_cycle); + max_idx = min(max_idx, ipc_mni); + + /* allocate the idx, with a NULL struct kern_ipc_perm */ + idx = idr_alloc_cyclic(&ids->ipcs_idr, NULL, 0, max_idx, + GFP_NOWAIT); + + if (idx >= 0) { + /* + * idx got allocated successfully. + * Now calculate the sequence number and set the + * pointer for real. + */ + if (idx <= ids->last_idx) { + ids->seq++; + if (ids->seq >= ipcid_seq_max()) + ids->seq = 0; + } + ids->last_idx = idx; + + new->seq = ids->seq; + /* no need for smp_wmb(), this is done + * inside idr_replace, as part of + * rcu_assign_pointer + */ + idr_replace(&ids->ipcs_idr, new, idx); } + } else { + new->seq = ipcid_to_seqx(next_id); + idx = idr_alloc(&ids->ipcs_idr, new, ipcid_to_idx(next_id), + 0, GFP_NOWAIT); } - return max_id; + if (idx >= 0) + new->id = (new->seq << ipcmni_seq_shift()) + idx; + return idx; } /** - * ipc_addid - add an IPC identifier - * @ids: IPC identifier set - * @new: new IPC permission set - * @size: limit for the number of used ids + * ipc_addid - add an ipc identifier + * @ids: ipc identifier set + * @new: new ipc permission set + * @limit: limit for the number of used ids * - * Add an entry 'new' to the IPC ids idr. The permissions object is - * initialised and the first free entry is set up and the id assigned - * is returned. The 'new' entry is returned in a locked state on success. - * On failure the entry is not locked and a negative err-code is returned. + * Add an entry 'new' to the ipc ids idr. The permissions object is + * initialised and the first free entry is set up and the index assigned + * is returned. The 'new' entry is returned in a locked state on success. * - * Called with writer ipc_ids.rw_mutex held. + * On failure the entry is not locked and a negative err-code is returned. + * The caller must use ipc_rcu_putref() to free the identifier. + * + * Called with writer ipc_ids.rwsem held. */ -int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) +int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int limit) { kuid_t euid; kgid_t egid; - int id; - int next_id = ids->next_id; + int idx, err; + + /* 1) Initialize the refcount so that ipc_rcu_putref works */ + refcount_set(&new->refcount, 1); - if (size > IPCMNI) - size = IPCMNI; + if (limit > ipc_mni) + limit = ipc_mni; - if (ids->in_use >= size) + if (ids->in_use >= limit) return -ENOSPC; idr_preload(GFP_KERNEL); spin_lock_init(&new->lock); - new->deleted = 0; rcu_read_lock(); spin_lock(&new->lock); - id = idr_alloc(&ids->ipcs_idr, new, - (next_id < 0) ? 0 : ipcid_to_idx(next_id), 0, - GFP_NOWAIT); - idr_preload_end(); - if (id < 0) { - spin_unlock(&new->lock); - rcu_read_unlock(); - return id; - } - - ids->in_use++; - current_euid_egid(&euid, &egid); new->cuid = new->uid = euid; new->gid = new->cgid = egid; - if (next_id < 0) { - new->seq = ids->seq++; - if (ids->seq > ids->seq_max) - ids->seq = 0; - } else { - new->seq = ipcid_to_seqx(next_id); - ids->next_id = -1; + new->deleted = false; + + idx = ipc_idr_alloc(ids, new); + idr_preload_end(); + + if (idx >= 0 && new->key != IPC_PRIVATE) { + err = rhashtable_insert_fast(&ids->key_ht, &new->khtnode, + ipc_kht_params); + if (err < 0) { + idr_remove(&ids->ipcs_idr, idx); + idx = err; + } + } + if (idx < 0) { + new->deleted = true; + spin_unlock(&new->lock); + rcu_read_unlock(); + return idx; } - new->id = ipc_buildid(id, new->seq); - return id; + ids->in_use++; + if (idx > ids->max_idx) + ids->max_idx = idx; + return idx; } /** - * ipcget_new - create a new ipc object - * @ns: namespace - * @ids: IPC identifer set - * @ops: the actual creation routine to call - * @params: its parameters - * - * This routine is called by sys_msgget, sys_semget() and sys_shmget() - * when the key is IPC_PRIVATE. + * ipcget_new - create a new ipc object + * @ns: ipc namespace + * @ids: ipc identifier set + * @ops: the actual creation routine to call + * @params: its parameters + * + * This routine is called by sys_msgget, sys_semget() and sys_shmget() + * when the key is IPC_PRIVATE. */ static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids, - struct ipc_ops *ops, struct ipc_params *params) + const struct ipc_ops *ops, struct ipc_params *params) { int err; - down_write(&ids->rw_mutex); + down_write(&ids->rwsem); err = ops->getnew(ns, params); - up_write(&ids->rw_mutex); + up_write(&ids->rwsem); return err; } /** - * ipc_check_perms - check security and permissions for an IPC - * @ns: IPC namespace - * @ipcp: ipc permission set - * @ops: the actual security routine to call - * @params: its parameters + * ipc_check_perms - check security and permissions for an ipc object + * @ns: ipc namespace + * @ipcp: ipc permission set + * @ops: the actual security routine to call + * @params: its parameters * - * This routine is called by sys_msgget(), sys_semget() and sys_shmget() - * when the key is not IPC_PRIVATE and that key already exists in the - * ids IDR. + * This routine is called by sys_msgget(), sys_semget() and sys_shmget() + * when the key is not IPC_PRIVATE and that key already exists in the + * ds IDR. * - * On success, the IPC id is returned. + * On success, the ipc id is returned. * - * It is called with ipc_ids.rw_mutex and ipcp->lock held. + * It is called with ipc_ids.rwsem and ipcp->lock held. */ static int ipc_check_perms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, - struct ipc_ops *ops, + const struct ipc_ops *ops, struct ipc_params *params) { int err; @@ -352,21 +381,21 @@ static int ipc_check_perms(struct ipc_namespace *ns, } /** - * ipcget_public - get an ipc object or create a new one - * @ns: namespace - * @ids: IPC identifer set - * @ops: the actual creation routine to call - * @params: its parameters - * - * This routine is called by sys_msgget, sys_semget() and sys_shmget() - * when the key is not IPC_PRIVATE. - * It adds a new entry if the key is not found and does some permission - * / security checkings if the key is found. - * - * On success, the ipc id is returned. + * ipcget_public - get an ipc object or create a new one + * @ns: ipc namespace + * @ids: ipc identifier set + * @ops: the actual creation routine to call + * @params: its parameters + * + * This routine is called by sys_msgget, sys_semget() and sys_shmget() + * when the key is not IPC_PRIVATE. + * It adds a new entry if the key is not found and does some permission + * / security checkings if the key is found. + * + * On success, the ipc id is returned. */ static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids, - struct ipc_ops *ops, struct ipc_params *params) + const struct ipc_ops *ops, struct ipc_params *params) { struct kern_ipc_perm *ipcp; int flg = params->flg; @@ -376,7 +405,7 @@ static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids, * Take the lock as a writer since we are potentially going to add * a new entry + read locks are not "upgradable" */ - down_write(&ids->rw_mutex); + down_write(&ids->rwsem); ipcp = ipc_findkey(ids, params->key); if (ipcp == NULL) { /* key not used */ @@ -402,135 +431,125 @@ static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids, } ipc_unlock(ipcp); } - up_write(&ids->rw_mutex); + up_write(&ids->rwsem); return err; } - /** - * ipc_rmid - remove an IPC identifier - * @ids: IPC identifier set - * @ipcp: ipc perm structure containing the identifier to remove + * ipc_kht_remove - remove an ipc from the key hashtable + * @ids: ipc identifier set + * @ipcp: ipc perm structure containing the key to remove * - * ipc_ids.rw_mutex (as a writer) and the spinlock for this ID are held - * before this function is called, and remain locked on the exit. + * ipc_ids.rwsem (as a writer) and the spinlock for this ID are held + * before this function is called, and remain locked on the exit. */ - -void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp) +static void ipc_kht_remove(struct ipc_ids *ids, struct kern_ipc_perm *ipcp) { - int lid = ipcid_to_idx(ipcp->id); - - idr_remove(&ids->ipcs_idr, lid); - - ids->in_use--; - - ipcp->deleted = 1; - - return; + if (ipcp->key != IPC_PRIVATE) + WARN_ON_ONCE(rhashtable_remove_fast(&ids->key_ht, &ipcp->khtnode, + ipc_kht_params)); } /** - * ipc_alloc - allocate ipc space - * @size: size desired + * ipc_search_maxidx - search for the highest assigned index + * @ids: ipc identifier set + * @limit: known upper limit for highest assigned index * - * Allocate memory from the appropriate pools and return a pointer to it. - * NULL is returned if the allocation fails + * The function determines the highest assigned index in @ids. It is intended + * to be called when ids->max_idx needs to be updated. + * Updating ids->max_idx is necessary when the current highest index ipc + * object is deleted. + * If no ipc object is allocated, then -1 is returned. + * + * ipc_ids.rwsem needs to be held by the caller. */ - -void *ipc_alloc(int size) +static int ipc_search_maxidx(struct ipc_ids *ids, int limit) { - void *out; - if(size > PAGE_SIZE) - out = vmalloc(size); - else - out = kmalloc(size, GFP_KERNEL); - return out; -} + int tmpidx; + int i; + int retval; -/** - * ipc_free - free ipc space - * @ptr: pointer returned by ipc_alloc - * @size: size of block - * - * Free a block created with ipc_alloc(). The caller must know the size - * used in the allocation call. - */ + i = ilog2(limit+1); -void ipc_free(void* ptr, int size) -{ - if(size > PAGE_SIZE) - vfree(ptr); - else - kfree(ptr); + retval = 0; + for (; i >= 0; i--) { + tmpidx = retval | (1<<i); + /* + * "0" is a possible index value, thus search using + * e.g. 15,7,3,1,0 instead of 16,8,4,2,1. + */ + tmpidx = tmpidx-1; + if (idr_get_next(&ids->ipcs_idr, &tmpidx)) + retval |= (1<<i); + } + return retval - 1; } -struct ipc_rcu { - struct rcu_head rcu; - atomic_t refcount; -} ____cacheline_aligned_in_smp; - /** - * ipc_rcu_alloc - allocate ipc and rcu space - * @size: size desired + * ipc_rmid - remove an ipc identifier + * @ids: ipc identifier set + * @ipcp: ipc perm structure containing the identifier to remove * - * Allocate memory for the rcu header structure + the object. - * Returns the pointer to the object or NULL upon failure. + * ipc_ids.rwsem (as a writer) and the spinlock for this ID are held + * before this function is called, and remain locked on the exit. */ -void *ipc_rcu_alloc(int size) +void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp) { - /* - * We prepend the allocation with the rcu struct - */ - struct ipc_rcu *out = ipc_alloc(sizeof(struct ipc_rcu) + size); - if (unlikely(!out)) - return NULL; - atomic_set(&out->refcount, 1); - return out + 1; -} + int idx = ipcid_to_idx(ipcp->id); -int ipc_rcu_getref(void *ptr) -{ - struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1; + WARN_ON_ONCE(idr_remove(&ids->ipcs_idr, idx) != ipcp); + ipc_kht_remove(ids, ipcp); + ids->in_use--; + ipcp->deleted = true; - return atomic_inc_not_zero(&p->refcount); + if (unlikely(idx == ids->max_idx)) { + idx = ids->max_idx-1; + if (idx >= 0) + idx = ipc_search_maxidx(ids, idx); + ids->max_idx = idx; + } } /** - * ipc_schedule_free - free ipc + rcu space - * @head: RCU callback structure for queued work + * ipc_set_key_private - switch the key of an existing ipc to IPC_PRIVATE + * @ids: ipc identifier set + * @ipcp: ipc perm structure containing the key to modify + * + * ipc_ids.rwsem (as a writer) and the spinlock for this ID are held + * before this function is called, and remain locked on the exit. */ -static void ipc_schedule_free(struct rcu_head *head) +void ipc_set_key_private(struct ipc_ids *ids, struct kern_ipc_perm *ipcp) { - vfree(container_of(head, struct ipc_rcu, rcu)); + ipc_kht_remove(ids, ipcp); + ipcp->key = IPC_PRIVATE; } -void ipc_rcu_putref(void *ptr) +bool ipc_rcu_getref(struct kern_ipc_perm *ptr) { - struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1; + return refcount_inc_not_zero(&ptr->refcount); +} - if (!atomic_dec_and_test(&p->refcount)) +void ipc_rcu_putref(struct kern_ipc_perm *ptr, + void (*func)(struct rcu_head *head)) +{ + if (!refcount_dec_and_test(&ptr->refcount)) return; - if (is_vmalloc_addr(ptr)) { - call_rcu(&p->rcu, ipc_schedule_free); - } else { - kfree_rcu(p, rcu); - } + call_rcu(&ptr->rcu, func); } /** - * ipcperms - check IPC permissions - * @ns: IPC namespace - * @ipcp: IPC permission set - * @flag: desired permission set. + * ipcperms - check ipc permissions + * @ns: ipc namespace + * @ipcp: ipc permission set + * @flag: desired permission set * - * Check user, group, other permissions for access - * to ipc resources. return 0 if allowed + * Check user, group, other permissions for access + * to ipc resources. return 0 if allowed * - * @flag will most probably be 0 or S_...UGO from <linux/stat.h> + * @flag will most probably be 0 or ``S_...UGO`` from <linux/stat.h> */ - int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag) { kuid_t euid = current_euid(); @@ -545,7 +564,7 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag) else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid)) granted_mode >>= 3; /* is there some bit set in requested_mode but not in granted_mode? */ - if ((requested_mode & ~granted_mode & 0007) && + if ((requested_mode & ~granted_mode & 0007) && !ns_capable(ns->user_ns, CAP_IPC_OWNER)) return -1; @@ -558,16 +577,14 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag) */ /** - * kernel_to_ipc64_perm - convert kernel ipc permissions to user - * @in: kernel permissions - * @out: new style IPC permissions + * kernel_to_ipc64_perm - convert kernel ipc permissions to user + * @in: kernel permissions + * @out: new style ipc permissions * - * Turn the kernel object @in into a set of permissions descriptions - * for returning to userspace (@out). + * Turn the kernel object @in into a set of permissions descriptions + * for returning to userspace (@out). */ - - -void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out) +void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out) { out->key = in->key; out->uid = from_kuid_munged(current_user_ns(), in->uid); @@ -579,15 +596,14 @@ void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out) } /** - * ipc64_perm_to_ipc_perm - convert new ipc permissions to old - * @in: new style IPC permissions - * @out: old style IPC permissions + * ipc64_perm_to_ipc_perm - convert new ipc permissions to old + * @in: new style ipc permissions + * @out: old style ipc permissions * - * Turn the new style permissions object @in into a compatibility - * object and store it into the @out pointer. + * Turn the new style permissions object @in into a compatibility + * object and store it into the @out pointer. */ - -void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out) +void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out) { out->key = in->key; SET_UID(out->uid, in->uid); @@ -599,21 +615,20 @@ void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out) } /** - * ipc_obtain_object + * ipc_obtain_object_idr - Look for an id in the ipc ids idr and + * return associated ipc object. * @ids: ipc identifier set * @id: ipc id to look for * - * Look for an id in the ipc ids idr and return associated ipc object. - * * Call inside the RCU critical section. * The ipc object is *not* locked on exit. */ -struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id) +struct kern_ipc_perm *ipc_obtain_object_idr(struct ipc_ids *ids, int id) { struct kern_ipc_perm *out; - int lid = ipcid_to_idx(id); + int idx = ipcid_to_idx(id); - out = idr_find(&ids->ipcs_idr, lid); + out = idr_find(&ids->ipcs_idr, idx); if (!out) return ERR_PTR(-EINVAL); @@ -621,90 +636,39 @@ struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id) } /** - * ipc_lock - Lock an ipc structure without rw_mutex held - * @ids: IPC identifier set - * @id: ipc id to look for - * - * Look for an id in the ipc ids idr and lock the associated ipc object. - * - * The ipc object is locked on successful exit. - */ -struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id) -{ - struct kern_ipc_perm *out; - - rcu_read_lock(); - out = ipc_obtain_object(ids, id); - if (IS_ERR(out)) - goto err1; - - spin_lock(&out->lock); - - /* ipc_rmid() may have already freed the ID while ipc_lock - * was spinning: here verify that the structure is still valid - */ - if (!out->deleted) - return out; - - spin_unlock(&out->lock); - out = ERR_PTR(-EINVAL); -err1: - rcu_read_unlock(); - return out; -} - -/** - * ipc_obtain_object_check + * ipc_obtain_object_check - Similar to ipc_obtain_object_idr() but + * also checks the ipc object sequence number. * @ids: ipc identifier set * @id: ipc id to look for * - * Similar to ipc_obtain_object() but also checks - * the ipc object reference counter. - * * Call inside the RCU critical section. * The ipc object is *not* locked on exit. */ struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id) { - struct kern_ipc_perm *out = ipc_obtain_object(ids, id); + struct kern_ipc_perm *out = ipc_obtain_object_idr(ids, id); if (IS_ERR(out)) goto out; if (ipc_checkid(out, id)) - return ERR_PTR(-EIDRM); + return ERR_PTR(-EINVAL); out: return out; } -struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id) -{ - struct kern_ipc_perm *out; - - out = ipc_lock(ids, id); - if (IS_ERR(out)) - return out; - - if (ipc_checkid(out, id)) { - ipc_unlock(out); - return ERR_PTR(-EIDRM); - } - - return out; -} - /** * ipcget - Common sys_*get() code - * @ns : namsepace - * @ids : IPC identifier set - * @ops : operations to be called on ipc object creation, permission checks - * and further checks - * @params : the parameters needed by the previous operations. + * @ns: namespace + * @ids: ipc identifier set + * @ops: operations to be called on ipc object creation, permission checks + * and further checks + * @params: the parameters needed by the previous operations. * * Common routine called by sys_msgget(), sys_semget() and sys_shmget(). */ int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids, - struct ipc_ops *ops, struct ipc_params *params) + const struct ipc_ops *ops, struct ipc_params *params) { if (params->key == IPC_PRIVATE) return ipcget_new(ns, ids, ops, params); @@ -713,7 +677,7 @@ int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids, } /** - * ipc_update_perm - update the permissions of an IPC. + * ipc_update_perm - update the permissions of an ipc object * @in: the permission given as input. * @out: the permission of the ipc to set. */ @@ -733,8 +697,8 @@ int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out) } /** - * ipcctl_pre_down - retrieve an ipc and check permissions for some IPC_XXX cmd - * @ns: the ipc namespace + * ipcctl_obtain_check - retrieve an ipc object and check permissions + * @ns: ipc namespace * @ids: the table of ids where to look for the ipc * @id: the id of the ipc to retrieve * @cmd: the cmd to check @@ -743,32 +707,18 @@ int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out) * * This function does some common audit and permissions check for some IPC_XXX * cmd and is called from semctl_down, shmctl_down and msgctl_down. - * It must be called without any lock held and - * - retrieves the ipc with the given id in the given table. - * - performs some audit and permission check, depending on the given cmd - * - returns the ipc with the ipc lock held in case of success - * or an err-code without any lock held otherwise. * - * Call holding the both the rw_mutex and the rcu read lock. + * It: + * - retrieves the ipc object with the given id in the given table. + * - performs some audit and permission check, depending on the given cmd + * - returns a pointer to the ipc object or otherwise, the corresponding + * error. + * + * Call holding the both the rwsem and the rcu read lock. */ -struct kern_ipc_perm *ipcctl_pre_down(struct ipc_namespace *ns, - struct ipc_ids *ids, int id, int cmd, - struct ipc64_perm *perm, int extra_perm) -{ - struct kern_ipc_perm *ipcp; - - ipcp = ipcctl_pre_down_nolock(ns, ids, id, cmd, perm, extra_perm); - if (IS_ERR(ipcp)) - goto out; - - spin_lock(&ipcp->lock); -out: - return ipcp; -} - -struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns, - struct ipc_ids *ids, int id, int cmd, - struct ipc64_perm *perm, int extra_perm) +struct kern_ipc_perm *ipcctl_obtain_check(struct ipc_namespace *ns, + struct ipc_ids *ids, int id, int cmd, + struct ipc64_perm *perm, int extra_perm) { kuid_t euid; int err = -EPERM; @@ -797,15 +747,14 @@ err: /** - * ipc_parse_version - IPC call version - * @cmd: pointer to command + * ipc_parse_version - ipc call version + * @cmd: pointer to command * - * Return IPC_64 for new style IPC and IPC_OLD for old style IPC. - * The @cmd value is turned from an encoding command and version into - * just the command code. + * Return IPC_64 for new style IPC and IPC_OLD for old style IPC. + * The @cmd value is turned from an encoding command and version into + * just the command code. */ - -int ipc_parse_version (int *cmd) +int ipc_parse_version(int *cmd) { if (*cmd & IPC_64) { *cmd ^= IPC_64; @@ -820,39 +769,48 @@ int ipc_parse_version (int *cmd) #ifdef CONFIG_PROC_FS struct ipc_proc_iter { struct ipc_namespace *ns; + struct pid_namespace *pid_ns; struct ipc_proc_iface *iface; }; -/* - * This routine locks the ipc structure found at least at position pos. +struct pid_namespace *ipc_seq_pid_ns(struct seq_file *s) +{ + struct ipc_proc_iter *iter = s->private; + return iter->pid_ns; +} + +/** + * sysvipc_find_ipc - Find and lock the ipc structure based on seq pos + * @ids: ipc identifier set + * @pos: expected position + * + * The function finds an ipc structure, based on the sequence file + * position @pos. If there is no ipc structure at position @pos, then + * the successor is selected. + * If a structure is found, then it is locked (both rcu_read_lock() and + * ipc_lock_object()) and @pos is set to the position needed to locate + * the found ipc structure. + * If nothing is found (i.e. EOF), @pos is not modified. + * + * The function returns the found ipc structure, or NULL at EOF. */ -static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos, - loff_t *new_pos) +static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t *pos) { + int tmpidx; struct kern_ipc_perm *ipc; - int total, id; - total = 0; - for (id = 0; id < pos && total < ids->in_use; id++) { - ipc = idr_find(&ids->ipcs_idr, id); - if (ipc != NULL) - total++; - } + /* convert from position to idr index -> "-1" */ + tmpidx = *pos - 1; - if (total >= ids->in_use) - return NULL; + ipc = idr_get_next(&ids->ipcs_idr, &tmpidx); + if (ipc != NULL) { + rcu_read_lock(); + ipc_lock_object(ipc); - for ( ; pos < IPCMNI; pos++) { - ipc = idr_find(&ids->ipcs_idr, pos); - if (ipc != NULL) { - *new_pos = pos + 1; - ipc_lock_by_ptr(ipc); - return ipc; - } + /* convert from idr index to position -> "+1" */ + *pos = tmpidx + 1; } - - /* Out of range - return NULL to terminate iteration */ - return NULL; + return ipc; } static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos) @@ -865,11 +823,13 @@ static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos) if (ipc && ipc != SEQ_START_TOKEN) ipc_unlock(ipc); - return sysvipc_find_ipc(&iter->ns->ids[iface->ids], *pos, pos); + /* Next -> search for *pos+1 */ + (*pos)++; + return sysvipc_find_ipc(&iter->ns->ids[iface->ids], pos); } /* - * File positions: pos 0 -> header, pos n -> ipc id = n - 1. + * File positions: pos 0 -> header, pos n -> ipc idx = n - 1. * SeqFile iterator: iterator value locked ipc pointer or SEQ_TOKEN_START. */ static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos) @@ -884,7 +844,7 @@ static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos) * Take the lock - this will be released by the corresponding * call to stop(). */ - down_read(&ids->rw_mutex); + down_read(&ids->rwsem); /* pos < 0 is invalid */ if (*pos < 0) @@ -894,8 +854,8 @@ static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos) if (*pos == 0) return SEQ_START_TOKEN; - /* Find the (pos-1)th ipc */ - return sysvipc_find_ipc(ids, *pos - 1, pos); + /* Otherwise return the correct ipc structure */ + return sysvipc_find_ipc(ids, pos); } static void sysvipc_proc_stop(struct seq_file *s, void *it) @@ -911,7 +871,7 @@ static void sysvipc_proc_stop(struct seq_file *s, void *it) ids = &iter->ns->ids[iface->ids]; /* Release the lock we took in start() */ - up_read(&ids->rw_mutex); + up_read(&ids->rwsem); } static int sysvipc_proc_show(struct seq_file *s, void *it) @@ -919,8 +879,10 @@ static int sysvipc_proc_show(struct seq_file *s, void *it) struct ipc_proc_iter *iter = s->private; struct ipc_proc_iface *iface = iter->iface; - if (it == SEQ_START_TOKEN) - return seq_puts(s, iface->header); + if (it == SEQ_START_TOKEN) { + seq_puts(s, iface->header); + return 0; + } return iface->show(s, it); } @@ -934,29 +896,17 @@ static const struct seq_operations sysvipc_proc_seqops = { static int sysvipc_proc_open(struct inode *inode, struct file *file) { - int ret; - struct seq_file *seq; struct ipc_proc_iter *iter; - ret = -ENOMEM; - iter = kmalloc(sizeof(*iter), GFP_KERNEL); + iter = __seq_open_private(file, &sysvipc_proc_seqops, sizeof(*iter)); if (!iter) - goto out; + return -ENOMEM; - ret = seq_open(file, &sysvipc_proc_seqops); - if (ret) - goto out_kfree; - - seq = file->private_data; - seq->private = iter; - - iter->iface = PDE_DATA(inode); + iter->iface = pde_data(inode); iter->ns = get_ipc_ns(current->nsproxy->ipc_ns); -out: - return ret; -out_kfree: - kfree(iter); - goto out; + iter->pid_ns = get_pid_ns(task_active_pid_ns(current)); + + return 0; } static int sysvipc_proc_release(struct inode *inode, struct file *file) @@ -964,13 +914,15 @@ static int sysvipc_proc_release(struct inode *inode, struct file *file) struct seq_file *seq = file->private_data; struct ipc_proc_iter *iter = seq->private; put_ipc_ns(iter->ns); + put_pid_ns(iter->pid_ns); return seq_release_private(inode, file); } -static const struct file_operations sysvipc_proc_fops = { - .open = sysvipc_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = sysvipc_proc_release, +static const struct proc_ops sysvipc_proc_ops = { + .proc_flags = PROC_ENTRY_PERMANENT, + .proc_open = sysvipc_proc_open, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_release = sysvipc_proc_release, }; #endif /* CONFIG_PROC_FS */ diff --git a/ipc/util.h b/ipc/util.h index b6a6a88f3002..a55d6cebe6d3 100644 --- a/ipc/util.h +++ b/ipc/util.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/ipc/util.h * Copyright (C) 1999 Christoph Rohland @@ -12,26 +13,57 @@ #include <linux/unistd.h> #include <linux/err.h> +#include <linux/ipc_namespace.h> +#include <linux/pid.h> -#define SEQ_MULTIPLIER (IPCMNI) +/* + * The IPC ID contains 2 separate numbers - index and sequence number. + * By default, + * bits 0-14: index (32k, 15 bits) + * bits 15-30: sequence number (64k, 16 bits) + * + * When IPCMNI extension mode is turned on, the composition changes: + * bits 0-23: index (16M, 24 bits) + * bits 24-30: sequence number (128, 7 bits) + */ +#define IPCMNI_SHIFT 15 +#define IPCMNI_EXTEND_SHIFT 24 +#define IPCMNI_EXTEND_MIN_CYCLE (RADIX_TREE_MAP_SIZE * RADIX_TREE_MAP_SIZE) +#define IPCMNI (1 << IPCMNI_SHIFT) +#define IPCMNI_EXTEND (1 << IPCMNI_EXTEND_SHIFT) + +#ifdef CONFIG_SYSVIPC_SYSCTL +extern int ipc_mni; +extern int ipc_mni_shift; +extern int ipc_min_cycle; + +#define ipcmni_seq_shift() ipc_mni_shift +#define IPCMNI_IDX_MASK ((1 << ipc_mni_shift) - 1) + +#else /* CONFIG_SYSVIPC_SYSCTL */ -void sem_init (void); -void msg_init (void); -void shm_init (void); +#define ipc_mni IPCMNI +#define ipc_min_cycle ((int)RADIX_TREE_MAP_SIZE) +#define ipcmni_seq_shift() IPCMNI_SHIFT +#define IPCMNI_IDX_MASK ((1 << IPCMNI_SHIFT) - 1) +#endif /* CONFIG_SYSVIPC_SYSCTL */ + +void sem_init(void); +void msg_init(void); +void shm_init(void); struct ipc_namespace; +struct pid_namespace; #ifdef CONFIG_POSIX_MQUEUE extern void mq_clear_sbinfo(struct ipc_namespace *ns); -extern void mq_put_mnt(struct ipc_namespace *ns); #else static inline void mq_clear_sbinfo(struct ipc_namespace *ns) { } -static inline void mq_put_mnt(struct ipc_namespace *ns) { } #endif #ifdef CONFIG_SYSVIPC void sem_init_ns(struct ipc_namespace *ns); -void msg_init_ns(struct ipc_namespace *ns); +int msg_init_ns(struct ipc_namespace *ns); void shm_init_ns(struct ipc_namespace *ns); void sem_exit_ns(struct ipc_namespace *ns); @@ -39,7 +71,7 @@ void msg_exit_ns(struct ipc_namespace *ns); void shm_exit_ns(struct ipc_namespace *ns); #else static inline void sem_init_ns(struct ipc_namespace *ns) { } -static inline void msg_init_ns(struct ipc_namespace *ns) { } +static inline int msg_init_ns(struct ipc_namespace *ns) { return 0; } static inline void shm_init_ns(struct ipc_namespace *ns) { } static inline void sem_exit_ns(struct ipc_namespace *ns) { } @@ -71,18 +103,19 @@ struct ipc_params { * . routine to call for an extra check if needed */ struct ipc_ops { - int (*getnew) (struct ipc_namespace *, struct ipc_params *); - int (*associate) (struct kern_ipc_perm *, int); - int (*more_checks) (struct kern_ipc_perm *, struct ipc_params *); + int (*getnew)(struct ipc_namespace *, struct ipc_params *); + int (*associate)(struct kern_ipc_perm *, int); + int (*more_checks)(struct kern_ipc_perm *, struct ipc_params *); }; struct seq_file; struct ipc_ids; -void ipc_init_ids(struct ipc_ids *); +void ipc_init_ids(struct ipc_ids *ids); #ifdef CONFIG_PROC_FS void __init ipc_init_proc_interface(const char *path, const char *header, int ids, int (*show)(struct seq_file *, void *)); +struct pid_namespace *ipc_seq_pid_ns(struct seq_file *); #else #define ipc_init_proc_interface(path, header, ids, show) do {} while (0) #endif @@ -91,72 +124,85 @@ void __init ipc_init_proc_interface(const char *path, const char *header, #define IPC_MSG_IDS 1 #define IPC_SHM_IDS 2 -#define ipcid_to_idx(id) ((id) % SEQ_MULTIPLIER) -#define ipcid_to_seqx(id) ((id) / SEQ_MULTIPLIER) +#define ipcid_to_idx(id) ((id) & IPCMNI_IDX_MASK) +#define ipcid_to_seqx(id) ((id) >> ipcmni_seq_shift()) +#define ipcid_seq_max() (INT_MAX >> ipcmni_seq_shift()) -/* must be called with ids->rw_mutex acquired for writing */ +/* must be called with ids->rwsem acquired for writing */ int ipc_addid(struct ipc_ids *, struct kern_ipc_perm *, int); -/* must be called with ids->rw_mutex acquired for reading */ -int ipc_get_maxid(struct ipc_ids *); - /* must be called with both locks acquired. */ void ipc_rmid(struct ipc_ids *, struct kern_ipc_perm *); +/* must be called with both locks acquired. */ +void ipc_set_key_private(struct ipc_ids *, struct kern_ipc_perm *); + /* must be called with ipcp locked */ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flg); -/* for rare, potentially huge allocations. - * both function can sleep +/** + * ipc_get_maxidx - get the highest assigned index + * @ids: ipc identifier set + * + * The function returns the highest assigned index for @ids. The function + * doesn't scan the idr tree, it uses a cached value. + * + * Called with ipc_ids.rwsem held for reading. */ -void* ipc_alloc(int size); -void ipc_free(void* ptr, int size); +static inline int ipc_get_maxidx(struct ipc_ids *ids) +{ + if (ids->in_use == 0) + return -1; + + if (ids->in_use == ipc_mni) + return ipc_mni - 1; + + return ids->max_idx; +} /* * For allocation that need to be freed by RCU. * Objects are reference counted, they start with reference count 1. * getref increases the refcount, the putref call that reduces the recount * to 0 schedules the rcu destruction. Caller must guarantee locking. + * + * refcount is initialized by ipc_addid(), before that point call_rcu() + * must be used. */ -void* ipc_rcu_alloc(int size); -int ipc_rcu_getref(void *ptr); -void ipc_rcu_putref(void *ptr); +bool ipc_rcu_getref(struct kern_ipc_perm *ptr); +void ipc_rcu_putref(struct kern_ipc_perm *ptr, + void (*func)(struct rcu_head *head)); -struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int); -struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id); +struct kern_ipc_perm *ipc_obtain_object_idr(struct ipc_ids *ids, int id); void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out); void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out); int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out); -struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns, +struct kern_ipc_perm *ipcctl_obtain_check(struct ipc_namespace *ns, struct ipc_ids *ids, int id, int cmd, struct ipc64_perm *perm, int extra_perm); -struct kern_ipc_perm *ipcctl_pre_down(struct ipc_namespace *ns, - struct ipc_ids *ids, int id, int cmd, - struct ipc64_perm *perm, int extra_perm); -#ifndef CONFIG_ARCH_WANT_IPC_PARSE_VERSION - /* On IA-64, we always use the "64-bit version" of the IPC structures. */ -# define ipc_parse_version(cmd) IPC_64 -#else -int ipc_parse_version (int *cmd); +static inline void ipc_update_pid(struct pid **pos, struct pid *pid) +{ + struct pid *old = *pos; + if (old != pid) { + *pos = get_pid(pid); + put_pid(old); + } +} + +#ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION +int ipc_parse_version(int *cmd); #endif extern void free_msg(struct msg_msg *msg); -extern struct msg_msg *load_msg(const void __user *src, int len); +extern struct msg_msg *load_msg(const void __user *src, size_t len); extern struct msg_msg *copy_msg(struct msg_msg *src, struct msg_msg *dst); -extern int store_msg(void __user *dest, struct msg_msg *msg, int len); - -extern void recompute_msgmni(struct ipc_namespace *); - -static inline int ipc_buildid(int id, int seq) -{ - return SEQ_MULTIPLIER * seq + id; -} +extern int store_msg(void __user *dest, struct msg_msg *msg, size_t len); -static inline int ipc_checkid(struct kern_ipc_perm *ipcp, int uid) +static inline int ipc_checkid(struct kern_ipc_perm *ipcp, int id) { - return uid / SEQ_MULTIPLIER != ipcp->seq; + return ipcid_to_seqx(id) != ipcp->seq; } static inline void ipc_lock_object(struct kern_ipc_perm *perm) @@ -174,22 +220,73 @@ static inline void ipc_assert_locked_object(struct kern_ipc_perm *perm) assert_spin_locked(&perm->lock); } -static inline void ipc_lock_by_ptr(struct kern_ipc_perm *perm) -{ - rcu_read_lock(); - ipc_lock_object(perm); -} - static inline void ipc_unlock(struct kern_ipc_perm *perm) { ipc_unlock_object(perm); rcu_read_unlock(); } -struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id); +/* + * ipc_valid_object() - helper to sort out IPC_RMID races for codepaths + * where the respective ipc_ids.rwsem is not being held down. + * Checks whether the ipc object is still around or if it's gone already, as + * ipc_rmid() may have already freed the ID while the ipc lock was spinning. + * Needs to be called with kern_ipc_perm.lock held -- exception made for one + * checkpoint case at sys_semtimedop() as noted in code commentary. + */ +static inline bool ipc_valid_object(struct kern_ipc_perm *perm) +{ + return !perm->deleted; +} + struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id); int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids, - struct ipc_ops *ops, struct ipc_params *params); + const struct ipc_ops *ops, struct ipc_params *params); void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids, void (*free)(struct ipc_namespace *, struct kern_ipc_perm *)); + +static inline int sem_check_semmni(struct ipc_namespace *ns) { + /* + * Check semmni range [0, ipc_mni] + * semmni is the last element of sem_ctls[4] array + */ + return ((ns->sem_ctls[3] < 0) || (ns->sem_ctls[3] > ipc_mni)) + ? -ERANGE : 0; +} + +#ifdef CONFIG_COMPAT +#include <linux/compat.h> +struct compat_ipc_perm { + key_t key; + __compat_uid_t uid; + __compat_gid_t gid; + __compat_uid_t cuid; + __compat_gid_t cgid; + compat_mode_t mode; + unsigned short seq; +}; + +void to_compat_ipc_perm(struct compat_ipc_perm *, struct ipc64_perm *); +void to_compat_ipc64_perm(struct compat_ipc64_perm *, struct ipc64_perm *); +int get_compat_ipc_perm(struct ipc64_perm *, struct compat_ipc_perm __user *); +int get_compat_ipc64_perm(struct ipc64_perm *, + struct compat_ipc64_perm __user *); + +static inline int compat_ipc_parse_version(int *cmd) +{ + int version = *cmd & IPC_64; + *cmd &= ~IPC_64; + return version; +} + +long compat_ksys_old_semctl(int semid, int semnum, int cmd, int arg); +long compat_ksys_old_msgctl(int msqid, int cmd, void __user *uptr); +long compat_ksys_msgrcv(int msqid, compat_uptr_t msgp, compat_ssize_t msgsz, + compat_long_t msgtyp, int msgflg); +long compat_ksys_msgsnd(int msqid, compat_uptr_t msgp, + compat_ssize_t msgsz, int msgflg); +long compat_ksys_old_shmctl(int shmid, int cmd, void __user *uptr); + +#endif + #endif |
