diff options
Diffstat (limited to 'ipc/mqueue.c')
| -rw-r--r-- | ipc/mqueue.c | 1234 |
1 files changed, 729 insertions, 505 deletions
diff --git a/ipc/mqueue.c b/ipc/mqueue.c index ae1996d3c539..c4f6d65596cf 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -6,7 +6,7 @@ * * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com) * Lockless receive & send, fd based notify: - * Manfred Spraul (manfred@colorfullife.com) + * Manfred Spraul (manfred@colorfullife.com) * * Audit: George Wilson (ltcgcw@us.ibm.com) * @@ -18,6 +18,7 @@ #include <linux/pagemap.h> #include <linux/file.h> #include <linux/mount.h> +#include <linux/fs_context.h> #include <linux/namei.h> #include <linux/sysctl.h> #include <linux/poll.h> @@ -35,10 +36,18 @@ #include <linux/ipc_namespace.h> #include <linux/user_namespace.h> #include <linux/slab.h> +#include <linux/sched/wake_q.h> +#include <linux/sched/signal.h> +#include <linux/sched/user.h> #include <net/sock.h> #include "util.h" +struct mqueue_fs_context { + struct ipc_namespace *ipc_ns; + bool newns; /* Set if newly created ipc namespace */ +}; + #define MQUEUE_MAGIC 0x19800202 #define DIRENT_SIZE 20 #define FILENT_SIZE 80 @@ -47,8 +56,7 @@ #define RECV 1 #define STATE_NONE 0 -#define STATE_PENDING 1 -#define STATE_READY 2 +#define STATE_READY 1 struct posix_msg_tree_node { struct rb_node rb_node; @@ -56,6 +64,66 @@ struct posix_msg_tree_node { int priority; }; +/* + * Locking: + * + * Accesses to a message queue are synchronized by acquiring info->lock. + * + * There are two notable exceptions: + * - The actual wakeup of a sleeping task is performed using the wake_q + * framework. info->lock is already released when wake_up_q is called. + * - The exit codepaths after sleeping check ext_wait_queue->state without + * any locks. If it is STATE_READY, then the syscall is completed without + * acquiring info->lock. + * + * MQ_BARRIER: + * To achieve proper release/acquire memory barrier pairing, the state is set to + * STATE_READY with smp_store_release(), and it is read with READ_ONCE followed + * by smp_acquire__after_ctrl_dep(). In addition, wake_q_add_safe() is used. + * + * This prevents the following races: + * + * 1) With the simple wake_q_add(), the task could be gone already before + * the increase of the reference happens + * Thread A + * Thread B + * WRITE_ONCE(wait.state, STATE_NONE); + * schedule_hrtimeout() + * wake_q_add(A) + * if (cmpxchg()) // success + * ->state = STATE_READY (reordered) + * <timeout returns> + * if (wait.state == STATE_READY) return; + * sysret to user space + * sys_exit() + * get_task_struct() // UaF + * + * Solution: Use wake_q_add_safe() and perform the get_task_struct() before + * the smp_store_release() that does ->state = STATE_READY. + * + * 2) Without proper _release/_acquire barriers, the woken up task + * could read stale data + * + * Thread A + * Thread B + * do_mq_timedreceive + * WRITE_ONCE(wait.state, STATE_NONE); + * schedule_hrtimeout() + * state = STATE_READY; + * <timeout returns> + * if (wait.state == STATE_READY) return; + * msg_ptr = wait.msg; // Access to stale data! + * receiver->msg = message; (reordered) + * + * Solution: use _release and _acquire barriers. + * + * 3) There is intentionally no barrier when setting current->state + * to TASK_INTERRUPTIBLE: spin_unlock(&info->lock) provides the + * release memory barrier, and the wakeup is triggered when holding + * info->lock, i.e. spin_lock(&info->lock) provided a pairing + * acquire memory barrier. + */ + struct ext_wait_queue { /* queue of sleeping tasks */ struct task_struct *task; struct list_head list; @@ -69,13 +137,15 @@ struct mqueue_inode_info { wait_queue_head_t wait_q; struct rb_root msg_tree; + struct rb_node *msg_tree_rightmost; struct posix_msg_tree_node *node_cache; struct mq_attr attr; struct sigevent notify; - struct pid* notify_owner; + struct pid *notify_owner; + u32 notify_self_exec_id; struct user_namespace *notify_user_ns; - struct user_struct *user; /* user who created, for accounting */ + struct ucounts *ucounts; /* user who created, for accounting */ struct sock *notify_sock; struct sk_buff *notify_cookie; @@ -85,15 +155,15 @@ struct mqueue_inode_info { unsigned long qsize; /* size of queue in memory (sum of all msgs) */ }; +static struct file_system_type mqueue_fs_type; static const struct inode_operations mqueue_dir_inode_operations; static const struct file_operations mqueue_file_operations; static const struct super_operations mqueue_super_ops; +static const struct fs_context_operations mqueue_fs_context_ops; static void remove_notification(struct mqueue_inode_info *info); static struct kmem_cache *mqueue_inode_cachep; -static struct ctl_table_header * mq_sysctl_table; - static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode) { return container_of(inode, struct mqueue_inode_info, vfs_inode); @@ -122,6 +192,7 @@ static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info) { struct rb_node **p, *parent = NULL; struct posix_msg_tree_node *leaf; + bool rightmost = true; p = &info->msg_tree.rb_node; while (*p) { @@ -130,9 +201,10 @@ static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info) if (likely(leaf->priority == msg->m_type)) goto insert_msg; - else if (msg->m_type < leaf->priority) + else if (msg->m_type < leaf->priority) { p = &(*p)->rb_left; - else + rightmost = false; + } else p = &(*p)->rb_right; } if (info->node_cache) { @@ -143,9 +215,12 @@ static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info) if (!leaf) return -ENOMEM; INIT_LIST_HEAD(&leaf->msg_list); - info->qsize += sizeof(*leaf); } leaf->priority = msg->m_type; + + if (rightmost) + info->msg_tree_rightmost = &leaf->rb_node; + rb_link_node(&leaf->rb_node, parent, p); rb_insert_color(&leaf->rb_node, &info->msg_tree); insert_msg: @@ -155,23 +230,34 @@ insert_msg: return 0; } +static inline void msg_tree_erase(struct posix_msg_tree_node *leaf, + struct mqueue_inode_info *info) +{ + struct rb_node *node = &leaf->rb_node; + + if (info->msg_tree_rightmost == node) + info->msg_tree_rightmost = rb_prev(node); + + rb_erase(node, &info->msg_tree); + if (info->node_cache) + kfree(leaf); + else + info->node_cache = leaf; +} + static inline struct msg_msg *msg_get(struct mqueue_inode_info *info) { - struct rb_node **p, *parent = NULL; + struct rb_node *parent = NULL; struct posix_msg_tree_node *leaf; struct msg_msg *msg; try_again: - p = &info->msg_tree.rb_node; - while (*p) { - parent = *p; - /* - * During insert, low priorities go to the left and high to the - * right. On receive, we want the highest priorities first, so - * walk all the way to the right. - */ - p = &(*p)->rb_right; - } + /* + * During insert, low priorities go to the left and high to the + * right. On receive, we want the highest priorities first, so + * walk all the way to the right. + */ + parent = info->msg_tree_rightmost; if (!parent) { if (info->attr.mq_curmsgs) { pr_warn_once("Inconsistency in POSIX message queue, " @@ -186,26 +272,14 @@ try_again: pr_warn_once("Inconsistency in POSIX message queue, " "empty leaf node but we haven't implemented " "lazy leaf delete!\n"); - rb_erase(&leaf->rb_node, &info->msg_tree); - if (info->node_cache) { - info->qsize -= sizeof(*leaf); - kfree(leaf); - } else { - info->node_cache = leaf; - } + msg_tree_erase(leaf, info); goto try_again; } else { msg = list_first_entry(&leaf->msg_list, struct msg_msg, m_list); list_del(&msg->m_list); if (list_empty(&leaf->msg_list)) { - rb_erase(&leaf->rb_node, &info->msg_tree); - if (info->node_cache) { - info->qsize -= sizeof(*leaf); - kfree(leaf); - } else { - info->node_cache = leaf; - } + msg_tree_erase(leaf, info); } } info->attr.mq_curmsgs--; @@ -217,7 +291,6 @@ static struct inode *mqueue_get_inode(struct super_block *sb, struct ipc_namespace *ipc_ns, umode_t mode, struct mq_attr *attr) { - struct user_struct *u = current_user(); struct inode *inode; int ret = -ENOMEM; @@ -229,7 +302,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb, inode->i_mode = mode; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); - inode->i_mtime = inode->i_ctime = inode->i_atime = CURRENT_TIME; + simple_inode_init_ts(inode); if (S_ISREG(mode)) { struct mqueue_inode_info *info; @@ -246,8 +319,9 @@ static struct inode *mqueue_get_inode(struct super_block *sb, info->notify_owner = NULL; info->notify_user_ns = NULL; info->qsize = 0; - info->user = NULL; /* set when all is ok */ + info->ucounts = NULL; /* set when all is ok */ info->msg_tree = RB_ROOT; + info->msg_tree_rightmost = NULL; info->node_cache = NULL; memset(&info->attr, 0, sizeof(info->attr)); info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max, @@ -271,26 +345,47 @@ static struct inode *mqueue_get_inode(struct super_block *sb, * that means the min(mq_maxmsg, max_priorities) * struct * posix_msg_tree_node. */ + + ret = -EINVAL; + if (info->attr.mq_maxmsg <= 0 || info->attr.mq_msgsize <= 0) + goto out_inode; + if (capable(CAP_SYS_RESOURCE)) { + if (info->attr.mq_maxmsg > HARD_MSGMAX || + info->attr.mq_msgsize > HARD_MSGSIZEMAX) + goto out_inode; + } else { + if (info->attr.mq_maxmsg > ipc_ns->mq_msg_max || + info->attr.mq_msgsize > ipc_ns->mq_msgsize_max) + goto out_inode; + } + ret = -EOVERFLOW; + /* check for overflow */ + if (info->attr.mq_msgsize > ULONG_MAX/info->attr.mq_maxmsg) + goto out_inode; mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * sizeof(struct posix_msg_tree_node); - - mq_bytes = mq_treesize + (info->attr.mq_maxmsg * - info->attr.mq_msgsize); - - spin_lock(&mq_lock); - if (u->mq_bytes + mq_bytes < u->mq_bytes || - u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) { - spin_unlock(&mq_lock); - /* mqueue_evict_inode() releases info->messages */ - ret = -EMFILE; + mq_bytes = info->attr.mq_maxmsg * info->attr.mq_msgsize; + if (mq_bytes + mq_treesize < mq_bytes) goto out_inode; + mq_bytes += mq_treesize; + info->ucounts = get_ucounts(current_ucounts()); + if (info->ucounts) { + long msgqueue; + + spin_lock(&mq_lock); + msgqueue = inc_rlimit_ucounts(info->ucounts, UCOUNT_RLIMIT_MSGQUEUE, mq_bytes); + if (msgqueue == LONG_MAX || msgqueue > rlimit(RLIMIT_MSGQUEUE)) { + dec_rlimit_ucounts(info->ucounts, UCOUNT_RLIMIT_MSGQUEUE, mq_bytes); + spin_unlock(&mq_lock); + put_ucounts(info->ucounts); + info->ucounts = NULL; + /* mqueue_evict_inode() releases info->messages */ + ret = -EMFILE; + goto out_inode; + } + spin_unlock(&mq_lock); } - u->mq_bytes += mq_bytes; - spin_unlock(&mq_lock); - - /* all is ok */ - info->user = get_uid(u); } else if (S_ISDIR(mode)) { inc_nlink(inode); /* Some things misbehave if size == 0 on a directory */ @@ -306,15 +401,17 @@ err: return ERR_PTR(ret); } -static int mqueue_fill_super(struct super_block *sb, void *data, int silent) +static int mqueue_fill_super(struct super_block *sb, struct fs_context *fc) { struct inode *inode; - struct ipc_namespace *ns = data; + struct ipc_namespace *ns = sb->s_fs_info; - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV; + sb->s_blocksize = PAGE_SIZE; + sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = MQUEUE_MAGIC; sb->s_op = &mqueue_super_ops; + sb->s_d_flags = DCACHE_DONTCACHE; inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL); if (IS_ERR(inode)) @@ -326,26 +423,74 @@ static int mqueue_fill_super(struct super_block *sb, void *data, int silent) return 0; } -static struct dentry *mqueue_mount(struct file_system_type *fs_type, - int flags, const char *dev_name, - void *data) +static int mqueue_get_tree(struct fs_context *fc) { - if (!(flags & MS_KERNMOUNT)) { - struct ipc_namespace *ns = current->nsproxy->ipc_ns; - /* Don't allow mounting unless the caller has CAP_SYS_ADMIN - * over the ipc namespace. - */ - if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN)) - return ERR_PTR(-EPERM); + struct mqueue_fs_context *ctx = fc->fs_private; - data = ns; + /* + * With a newly created ipc namespace, we don't need to do a search + * for an ipc namespace match, but we still need to set s_fs_info. + */ + if (ctx->newns) { + fc->s_fs_info = ctx->ipc_ns; + return get_tree_nodev(fc, mqueue_fill_super); } - return mount_ns(fs_type, flags, data, mqueue_fill_super); + return get_tree_keyed(fc, mqueue_fill_super, ctx->ipc_ns); +} + +static void mqueue_fs_context_free(struct fs_context *fc) +{ + struct mqueue_fs_context *ctx = fc->fs_private; + + put_ipc_ns(ctx->ipc_ns); + kfree(ctx); +} + +static int mqueue_init_fs_context(struct fs_context *fc) +{ + struct mqueue_fs_context *ctx; + + ctx = kzalloc(sizeof(struct mqueue_fs_context), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns); + put_user_ns(fc->user_ns); + fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns); + fc->fs_private = ctx; + fc->ops = &mqueue_fs_context_ops; + return 0; +} + +/* + * mq_init_ns() is currently the only caller of mq_create_mount(). + * So the ns parameter is always a newly created ipc namespace. + */ +static struct vfsmount *mq_create_mount(struct ipc_namespace *ns) +{ + struct mqueue_fs_context *ctx; + struct fs_context *fc; + struct vfsmount *mnt; + + fc = fs_context_for_mount(&mqueue_fs_type, SB_KERNMOUNT); + if (IS_ERR(fc)) + return ERR_CAST(fc); + + ctx = fc->fs_private; + ctx->newns = true; + put_ipc_ns(ctx->ipc_ns); + ctx->ipc_ns = get_ipc_ns(ns); + put_user_ns(fc->user_ns); + fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns); + + mnt = fc_mount_longterm(fc); + put_fs_context(fc); + return mnt; } static void init_once(void *foo) { - struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; + struct mqueue_inode_info *p = foo; inode_init_once(&p->vfs_inode); } @@ -354,30 +499,23 @@ static struct inode *mqueue_alloc_inode(struct super_block *sb) { struct mqueue_inode_info *ei; - ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL); + ei = alloc_inode_sb(sb, mqueue_inode_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; } -static void mqueue_i_callback(struct rcu_head *head) +static void mqueue_free_inode(struct inode *inode) { - struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode)); } -static void mqueue_destroy_inode(struct inode *inode) -{ - call_rcu(&inode->i_rcu, mqueue_i_callback); -} - static void mqueue_evict_inode(struct inode *inode) { struct mqueue_inode_info *info; - struct user_struct *user; - unsigned long mq_bytes, mq_treesize; struct ipc_namespace *ipc_ns; - struct msg_msg *msg; + struct msg_msg *msg, *nmsg; + LIST_HEAD(tmp_msg); clear_inode(inode); @@ -388,22 +526,28 @@ static void mqueue_evict_inode(struct inode *inode) info = MQUEUE_I(inode); spin_lock(&info->lock); while ((msg = msg_get(info)) != NULL) - free_msg(msg); + list_add_tail(&msg->m_list, &tmp_msg); kfree(info->node_cache); spin_unlock(&info->lock); - /* Total amount of bytes accounted for the mqueue */ - mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + - min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * - sizeof(struct posix_msg_tree_node); + list_for_each_entry_safe(msg, nmsg, &tmp_msg, m_list) { + list_del(&msg->m_list); + free_msg(msg); + } + + if (info->ucounts) { + unsigned long mq_bytes, mq_treesize; - mq_bytes = mq_treesize + (info->attr.mq_maxmsg * - info->attr.mq_msgsize); + /* Total amount of bytes accounted for the mqueue */ + mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + + min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * + sizeof(struct posix_msg_tree_node); + + mq_bytes = mq_treesize + (info->attr.mq_maxmsg * + info->attr.mq_msgsize); - user = info->user; - if (user) { spin_lock(&mq_lock); - user->mq_bytes -= mq_bytes; + dec_rlimit_ucounts(info->ucounts, UCOUNT_RLIMIT_MSGQUEUE, mq_bytes); /* * get_ns_from_inode() ensures that the * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns @@ -413,17 +557,18 @@ static void mqueue_evict_inode(struct inode *inode) if (ipc_ns) ipc_ns->mq_queues_count--; spin_unlock(&mq_lock); - free_uid(user); + put_ucounts(info->ucounts); + info->ucounts = NULL; } if (ipc_ns) put_ipc_ns(ipc_ns); } -static int mqueue_create(struct inode *dir, struct dentry *dentry, - umode_t mode, bool excl) +static int mqueue_create_attr(struct dentry *dentry, umode_t mode, void *arg) { + struct inode *dir = dentry->d_parent->d_inode; struct inode *inode; - struct mq_attr *attr = dentry->d_fsdata; + struct mq_attr *attr = arg; int error; struct ipc_namespace *ipc_ns; @@ -433,9 +578,9 @@ static int mqueue_create(struct inode *dir, struct dentry *dentry, error = -EACCES; goto out_unlock; } - if (ipc_ns->mq_queues_count >= HARD_QUEUESMAX || - (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max && - !capable(CAP_SYS_RESOURCE))) { + + if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max && + !capable(CAP_SYS_RESOURCE)) { error = -ENOSPC; goto out_unlock; } @@ -452,10 +597,9 @@ static int mqueue_create(struct inode *dir, struct dentry *dentry, put_ipc_ns(ipc_ns); dir->i_size += DIRENT_SIZE; - dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; + simple_inode_init_ts(dir); - d_instantiate(dentry, inode); - dget(dentry); + d_make_persistent(dentry, inode); return 0; out_unlock: spin_unlock(&mq_lock); @@ -464,15 +608,16 @@ out_unlock: return error; } -static int mqueue_unlink(struct inode *dir, struct dentry *dentry) +static int mqueue_create(struct mnt_idmap *idmap, struct inode *dir, + struct dentry *dentry, umode_t mode, bool excl) { - struct inode *inode = dentry->d_inode; + return mqueue_create_attr(dentry, mode, NULL); +} - dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; +static int mqueue_unlink(struct inode *dir, struct dentry *dentry) +{ dir->i_size -= DIRENT_SIZE; - drop_nlink(inode); - dput(dentry); - return 0; + return simple_unlink(dir, dentry); } /* @@ -485,7 +630,8 @@ static int mqueue_unlink(struct inode *dir, struct dentry *dentry) static ssize_t mqueue_read_file(struct file *filp, char __user *u_data, size_t count, loff_t *off) { - struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); + struct inode *inode = file_inode(filp); + struct mqueue_inode_info *info = MQUEUE_I(inode); char buffer[FILENT_SIZE]; ssize_t ret; @@ -506,7 +652,7 @@ static ssize_t mqueue_read_file(struct file *filp, char __user *u_data, if (ret <= 0) return ret; - file_inode(filp)->i_atime = file_inode(filp)->i_ctime = CURRENT_TIME; + inode_set_atime_to_ts(inode, inode_set_ctime_current(inode)); return ret; } @@ -522,19 +668,19 @@ static int mqueue_flush_file(struct file *filp, fl_owner_t id) return 0; } -static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab) +static __poll_t mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab) { struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); - int retval = 0; + __poll_t retval = 0; poll_wait(filp, &info->wait_q, poll_tab); spin_lock(&info->lock); if (info->attr.mq_curmsgs) - retval = POLLIN | POLLRDNORM; + retval = EPOLLIN | EPOLLRDNORM; if (info->attr.mq_curmsgs < info->attr.mq_maxmsg) - retval |= POLLOUT | POLLWRNORM; + retval |= EPOLLOUT | EPOLLWRNORM; spin_unlock(&info->lock); return retval; @@ -546,10 +692,8 @@ static void wq_add(struct mqueue_inode_info *info, int sr, { struct ext_wait_queue *walk; - ewp->task = current; - list_for_each_entry(walk, &info->e_wait_q[sr].list, list) { - if (walk->task->static_prio <= current->static_prio) { + if (walk->task->prio <= current->prio) { list_add_tail(&ewp->list, &walk->list); return; } @@ -564,6 +708,7 @@ static void wq_add(struct mqueue_inode_info *info, int sr, */ static int wq_sleep(struct mqueue_inode_info *info, int sr, ktime_t *timeout, struct ext_wait_queue *ewp) + __releases(&info->lock) { int retval; signed long time; @@ -571,21 +716,23 @@ static int wq_sleep(struct mqueue_inode_info *info, int sr, wq_add(info, sr, ewp); for (;;) { - set_current_state(TASK_INTERRUPTIBLE); + /* memory barrier not required, we hold info->lock */ + __set_current_state(TASK_INTERRUPTIBLE); spin_unlock(&info->lock); time = schedule_hrtimeout_range_clock(timeout, 0, HRTIMER_MODE_ABS, CLOCK_REALTIME); - while (ewp->state == STATE_PENDING) - cpu_relax(); - - if (ewp->state == STATE_READY) { + if (READ_ONCE(ewp->state) == STATE_READY) { + /* see MQ_BARRIER for purpose/pairing */ + smp_acquire__after_ctrl_dep(); retval = 0; goto out; } spin_lock(&info->lock); - if (ewp->state == STATE_READY) { + + /* we hold info->lock, so no memory barrier required */ + if (READ_ONCE(ewp->state) == STATE_READY) { retval = 0; goto out_unlock; } @@ -622,7 +769,7 @@ static struct ext_wait_queue *wq_get_first_waiter( static inline void set_cookie(struct sk_buff *skb, char code) { - ((char*)skb->data)[NOTIFY_COOKIE_LEN-1] = code; + ((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code; } /* @@ -637,27 +784,44 @@ static void __do_notify(struct mqueue_inode_info *info) * synchronously. */ if (info->notify_owner && info->attr.mq_curmsgs == 1) { - struct siginfo sig_i; switch (info->notify.sigev_notify) { case SIGEV_NONE: break; - case SIGEV_SIGNAL: - /* sends signal */ + case SIGEV_SIGNAL: { + struct kernel_siginfo sig_i; + struct task_struct *task; + + /* do_mq_notify() accepts sigev_signo == 0, why?? */ + if (!info->notify.sigev_signo) + break; + clear_siginfo(&sig_i); sig_i.si_signo = info->notify.sigev_signo; sig_i.si_errno = 0; sig_i.si_code = SI_MESGQ; sig_i.si_value = info->notify.sigev_value; - /* map current pid/uid into info->owner's namespaces */ rcu_read_lock(); + /* map current pid/uid into info->owner's namespaces */ sig_i.si_pid = task_tgid_nr_ns(current, ns_of_pid(info->notify_owner)); - sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid()); + sig_i.si_uid = from_kuid_munged(info->notify_user_ns, + current_uid()); + /* + * We can't use kill_pid_info(), this signal should + * bypass check_kill_permission(). It is from kernel + * but si_fromuser() can't know this. + * We do check the self_exec_id, to avoid sending + * signals to programs that don't expect them. + */ + task = pid_task(info->notify_owner, PIDTYPE_TGID); + if (task && task->self_exec_id == + info->notify_self_exec_id) { + do_send_sig_info(info->notify.sigev_signo, + &sig_i, task, PIDTYPE_TGID); + } rcu_read_unlock(); - - kill_pid_info(info->notify.sigev_signo, - &sig_i, info->notify_owner); break; + } case SIGEV_THREAD: set_cookie(info->notify_cookie, NOTIFY_WOKENUP); netlink_sendskb(info->notify_sock, info->notify_cookie); @@ -672,15 +836,13 @@ static void __do_notify(struct mqueue_inode_info *info) wake_up(&info->wait_q); } -static int prepare_timeout(const struct timespec __user *u_abs_timeout, - ktime_t *expires, struct timespec *ts) +static int prepare_timeout(const struct __kernel_timespec __user *u_abs_timeout, + struct timespec64 *ts) { - if (copy_from_user(ts, u_abs_timeout, sizeof(struct timespec))) + if (get_timespec64(ts, u_abs_timeout)) return -EFAULT; - if (!timespec_valid(ts)) + if (!timespec64_valid(ts)) return -EINVAL; - - *expires = timespec_to_ktime(*ts); return 0; } @@ -697,163 +859,84 @@ static void remove_notification(struct mqueue_inode_info *info) info->notify_user_ns = NULL; } -static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr) +static int prepare_open(struct dentry *dentry, int oflag, int ro, + umode_t mode, struct filename *name, + struct mq_attr *attr) { - int mq_treesize; - unsigned long total_size; + static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, + MAY_READ | MAY_WRITE }; + int acc; - if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0) - return -EINVAL; - if (capable(CAP_SYS_RESOURCE)) { - if (attr->mq_maxmsg > HARD_MSGMAX || - attr->mq_msgsize > HARD_MSGSIZEMAX) - return -EINVAL; - } else { - if (attr->mq_maxmsg > ipc_ns->mq_msg_max || - attr->mq_msgsize > ipc_ns->mq_msgsize_max) - return -EINVAL; + if (d_really_is_negative(dentry)) { + if (!(oflag & O_CREAT)) + return -ENOENT; + if (ro) + return ro; + audit_inode_parent_hidden(name, dentry->d_parent); + return vfs_mkobj(dentry, mode & ~current_umask(), + mqueue_create_attr, attr); } - /* check for overflow */ - if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg) - return -EOVERFLOW; - mq_treesize = attr->mq_maxmsg * sizeof(struct msg_msg) + - min_t(unsigned int, attr->mq_maxmsg, MQ_PRIO_MAX) * - sizeof(struct posix_msg_tree_node); - total_size = attr->mq_maxmsg * attr->mq_msgsize; - if (total_size + mq_treesize < total_size) - return -EOVERFLOW; - return 0; + /* it already existed */ + audit_inode(name, dentry, 0); + if ((oflag & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL)) + return -EEXIST; + if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) + return -EINVAL; + acc = oflag2acc[oflag & O_ACCMODE]; + return inode_permission(&nop_mnt_idmap, d_inode(dentry), acc); } -/* - * Invoked when creating a new queue via sys_mq_open - */ -static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir, - struct path *path, int oflag, umode_t mode, - struct mq_attr *attr) +static struct file *mqueue_file_open(struct filename *name, + struct vfsmount *mnt, int oflag, int ro, + umode_t mode, struct mq_attr *attr) { - const struct cred *cred = current_cred(); + struct dentry *dentry; + struct file *file; int ret; - if (attr) { - ret = mq_attr_ok(ipc_ns, attr); - if (ret) - return ERR_PTR(ret); - /* store for use during create */ - path->dentry->d_fsdata = attr; - } else { - struct mq_attr def_attr; - - def_attr.mq_maxmsg = min(ipc_ns->mq_msg_max, - ipc_ns->mq_msg_default); - def_attr.mq_msgsize = min(ipc_ns->mq_msgsize_max, - ipc_ns->mq_msgsize_default); - ret = mq_attr_ok(ipc_ns, &def_attr); - if (ret) - return ERR_PTR(ret); + dentry = start_creating_noperm(mnt->mnt_root, &QSTR(name->name)); + if (IS_ERR(dentry)) + return ERR_CAST(dentry); + + ret = prepare_open(dentry, oflag, ro, mode, name, attr); + file = ERR_PTR(ret); + if (!ret) { + const struct path path = { .mnt = mnt, .dentry = dentry }; + file = dentry_open(&path, oflag, current_cred()); } - mode &= ~current_umask(); - ret = vfs_create(dir, path->dentry, mode, true); - path->dentry->d_fsdata = NULL; - if (ret) - return ERR_PTR(ret); - return dentry_open(path, oflag, cred); + end_creating(dentry); + return file; } -/* Opens existing queue */ -static struct file *do_open(struct path *path, int oflag) +static int do_mq_open(const char __user *u_name, int oflag, umode_t mode, + struct mq_attr *attr) { - static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, - MAY_READ | MAY_WRITE }; - int acc; - if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) - return ERR_PTR(-EINVAL); - acc = oflag2acc[oflag & O_ACCMODE]; - if (inode_permission(path->dentry->d_inode, acc)) - return ERR_PTR(-EACCES); - return dentry_open(path, oflag, current_cred()); + struct filename *name __free(putname) = NULL;; + struct vfsmount *mnt = current->nsproxy->ipc_ns->mq_mnt; + int fd, ro; + + audit_mq_open(oflag, mode, attr); + + name = getname(u_name); + if (IS_ERR(name)) + return PTR_ERR(name); + + ro = mnt_want_write(mnt); /* we'll drop it in any case */ + fd = FD_ADD(O_CLOEXEC, mqueue_file_open(name, mnt, oflag, ro, mode, attr)); + if (!ro) + mnt_drop_write(mnt); + return fd; } SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode, struct mq_attr __user *, u_attr) { - struct path path; - struct file *filp; - struct filename *name; struct mq_attr attr; - int fd, error; - struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; - struct vfsmount *mnt = ipc_ns->mq_mnt; - struct dentry *root = mnt->mnt_root; - int ro; - if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr))) return -EFAULT; - audit_mq_open(oflag, mode, u_attr ? &attr : NULL); - - if (IS_ERR(name = getname(u_name))) - return PTR_ERR(name); - - fd = get_unused_fd_flags(O_CLOEXEC); - if (fd < 0) - goto out_putname; - - ro = mnt_want_write(mnt); /* we'll drop it in any case */ - error = 0; - mutex_lock(&root->d_inode->i_mutex); - path.dentry = lookup_one_len(name->name, root, strlen(name->name)); - if (IS_ERR(path.dentry)) { - error = PTR_ERR(path.dentry); - goto out_putfd; - } - path.mnt = mntget(mnt); - - if (oflag & O_CREAT) { - if (path.dentry->d_inode) { /* entry already exists */ - audit_inode(name, path.dentry, 0); - if (oflag & O_EXCL) { - error = -EEXIST; - goto out; - } - filp = do_open(&path, oflag); - } else { - if (ro) { - error = ro; - goto out; - } - audit_inode_parent_hidden(name, root); - filp = do_create(ipc_ns, root->d_inode, - &path, oflag, mode, - u_attr ? &attr : NULL); - } - } else { - if (!path.dentry->d_inode) { - error = -ENOENT; - goto out; - } - audit_inode(name, path.dentry, 0); - filp = do_open(&path, oflag); - } - - if (!IS_ERR(filp)) - fd_install(fd, filp); - else - error = PTR_ERR(filp); -out: - path_put(&path); -out_putfd: - if (error) { - put_unused_fd(fd); - fd = error; - } - mutex_unlock(&root->d_inode->i_mutex); - if (!ro) - mnt_drop_write(mnt); -out_putname: - putname(name); - return fd; + return do_mq_open(u_name, oflag, mode, u_attr ? &attr : NULL); } SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name) @@ -861,7 +944,7 @@ SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name) int err; struct filename *name; struct dentry *dentry; - struct inode *inode = NULL; + struct inode *inode; struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; struct vfsmount *mnt = ipc_ns->mq_mnt; @@ -873,27 +956,20 @@ SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name) err = mnt_want_write(mnt); if (err) goto out_name; - mutex_lock_nested(&mnt->mnt_root->d_inode->i_mutex, I_MUTEX_PARENT); - dentry = lookup_one_len(name->name, mnt->mnt_root, - strlen(name->name)); + dentry = start_removing_noperm(mnt->mnt_root, &QSTR(name->name)); if (IS_ERR(dentry)) { err = PTR_ERR(dentry); - goto out_unlock; + goto out_drop_write; } - inode = dentry->d_inode; - if (!inode) { - err = -ENOENT; - } else { - ihold(inode); - err = vfs_unlink(dentry->d_parent->d_inode, dentry); - } - dput(dentry); + inode = d_inode(dentry); + ihold(inode); + err = vfs_unlink(&nop_mnt_idmap, d_inode(mnt->mnt_root), + dentry, NULL); + end_removing(dentry); + iput(inode); -out_unlock: - mutex_unlock(&mnt->mnt_root->d_inode->i_mutex); - if (inode) - iput(inode); +out_drop_write: mnt_drop_write(mnt); out_name: putname(name); @@ -907,33 +983,49 @@ out_name: * list of waiting receivers. A sender checks that list before adding the new * message into the message array. If there is a waiting receiver, then it * bypasses the message array and directly hands the message over to the - * receiver. - * The receiver accepts the message and returns without grabbing the queue - * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers - * are necessary. The same algorithm is used for sysv semaphores, see - * ipc/sem.c for more details. + * receiver. The receiver accepts the message and returns without grabbing the + * queue spinlock: + * + * - Set pointer to message. + * - Queue the receiver task for later wakeup (without the info->lock). + * - Update its state to STATE_READY. Now the receiver can continue. + * - Wake up the process after the lock is dropped. Should the process wake up + * before this wakeup (due to a timeout or a signal) it will either see + * STATE_READY and continue or acquire the lock to check the state again. * * The same algorithm is used for senders. */ +static inline void __pipelined_op(struct wake_q_head *wake_q, + struct mqueue_inode_info *info, + struct ext_wait_queue *this) +{ + struct task_struct *task; + + list_del(&this->list); + task = get_task_struct(this->task); + + /* see MQ_BARRIER for purpose/pairing */ + smp_store_release(&this->state, STATE_READY); + wake_q_add_safe(wake_q, task); +} + /* pipelined_send() - send a message directly to the task waiting in * sys_mq_timedreceive() (without inserting message into a queue). */ -static inline void pipelined_send(struct mqueue_inode_info *info, +static inline void pipelined_send(struct wake_q_head *wake_q, + struct mqueue_inode_info *info, struct msg_msg *message, struct ext_wait_queue *receiver) { receiver->msg = message; - list_del(&receiver->list); - receiver->state = STATE_PENDING; - wake_up_process(receiver->task); - smp_wmb(); - receiver->state = STATE_READY; + __pipelined_op(wake_q, info, receiver); } /* pipelined_receive() - if there is task waiting in sys_mq_timedsend() * gets its message and put to the queue (we have one free place for sure). */ -static inline void pipelined_receive(struct mqueue_inode_info *info) +static inline void pipelined_receive(struct wake_q_head *wake_q, + struct mqueue_inode_info *info) { struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND); @@ -944,71 +1036,55 @@ static inline void pipelined_receive(struct mqueue_inode_info *info) } if (msg_insert(sender->msg, info)) return; - list_del(&sender->list); - sender->state = STATE_PENDING; - wake_up_process(sender->task); - smp_wmb(); - sender->state = STATE_READY; + + __pipelined_op(wake_q, info, sender); } -SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, - size_t, msg_len, unsigned int, msg_prio, - const struct timespec __user *, u_abs_timeout) +static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr, + size_t msg_len, unsigned int msg_prio, + struct timespec64 *ts) { - struct fd f; struct inode *inode; struct ext_wait_queue wait; struct ext_wait_queue *receiver; struct msg_msg *msg_ptr; struct mqueue_inode_info *info; ktime_t expires, *timeout = NULL; - struct timespec ts; struct posix_msg_tree_node *new_leaf = NULL; int ret = 0; - - if (u_abs_timeout) { - int res = prepare_timeout(u_abs_timeout, &expires, &ts); - if (res) - return res; - timeout = &expires; - } + DEFINE_WAKE_Q(wake_q); if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX)) return -EINVAL; - audit_mq_sendrecv(mqdes, msg_len, msg_prio, timeout ? &ts : NULL); - - f = fdget(mqdes); - if (unlikely(!f.file)) { - ret = -EBADF; - goto out; + if (ts) { + expires = timespec64_to_ktime(*ts); + timeout = &expires; } - inode = file_inode(f.file); - if (unlikely(f.file->f_op != &mqueue_file_operations)) { - ret = -EBADF; - goto out_fput; - } + audit_mq_sendrecv(mqdes, msg_len, msg_prio, ts); + + CLASS(fd, f)(mqdes); + if (fd_empty(f)) + return -EBADF; + + inode = file_inode(fd_file(f)); + if (unlikely(fd_file(f)->f_op != &mqueue_file_operations)) + return -EBADF; info = MQUEUE_I(inode); - audit_inode(NULL, f.file->f_path.dentry, 0); + audit_file(fd_file(f)); - if (unlikely(!(f.file->f_mode & FMODE_WRITE))) { - ret = -EBADF; - goto out_fput; - } + if (unlikely(!(fd_file(f)->f_mode & FMODE_WRITE))) + return -EBADF; - if (unlikely(msg_len > info->attr.mq_msgsize)) { - ret = -EMSGSIZE; - goto out_fput; - } + if (unlikely(msg_len > info->attr.mq_msgsize)) + return -EMSGSIZE; /* First try to allocate memory, before doing anything with * existing queues. */ msg_ptr = load_msg(u_msg_ptr, msg_len); - if (IS_ERR(msg_ptr)) { - ret = PTR_ERR(msg_ptr); - goto out_fput; - } + if (IS_ERR(msg_ptr)) + return PTR_ERR(msg_ptr); msg_ptr->m_ts = msg_len; msg_ptr->m_type = msg_prio; @@ -1026,19 +1102,20 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, /* Save our speculative allocation into the cache */ INIT_LIST_HEAD(&new_leaf->msg_list); info->node_cache = new_leaf; - info->qsize += sizeof(*new_leaf); new_leaf = NULL; } else { kfree(new_leaf); } if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) { - if (f.file->f_flags & O_NONBLOCK) { + if (fd_file(f)->f_flags & O_NONBLOCK) { ret = -EAGAIN; } else { wait.task = current; wait.msg = (void *) msg_ptr; - wait.state = STATE_NONE; + + /* memory barrier not required, we hold info->lock */ + WRITE_ONCE(wait.state, STATE_NONE); ret = wq_sleep(info, SEND, timeout, &wait); /* * wq_sleep must be called with info->lock held, and @@ -1049,7 +1126,7 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, } else { receiver = wq_get_first_waiter(info, RECV); if (receiver) { - pipelined_send(info, msg_ptr, receiver); + pipelined_send(&wake_q, info, msg_ptr, receiver); } else { /* adds message to the queue */ ret = msg_insert(msg_ptr, info); @@ -1057,67 +1134,52 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, goto out_unlock; __do_notify(info); } - inode->i_atime = inode->i_mtime = inode->i_ctime = - CURRENT_TIME; + simple_inode_init_ts(inode); } out_unlock: spin_unlock(&info->lock); + wake_up_q(&wake_q); out_free: if (ret) free_msg(msg_ptr); -out_fput: - fdput(f); -out: return ret; } -SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, - size_t, msg_len, unsigned int __user *, u_msg_prio, - const struct timespec __user *, u_abs_timeout) +static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr, + size_t msg_len, unsigned int __user *u_msg_prio, + struct timespec64 *ts) { ssize_t ret; struct msg_msg *msg_ptr; - struct fd f; struct inode *inode; struct mqueue_inode_info *info; struct ext_wait_queue wait; ktime_t expires, *timeout = NULL; - struct timespec ts; struct posix_msg_tree_node *new_leaf = NULL; - if (u_abs_timeout) { - int res = prepare_timeout(u_abs_timeout, &expires, &ts); - if (res) - return res; + if (ts) { + expires = timespec64_to_ktime(*ts); timeout = &expires; } - audit_mq_sendrecv(mqdes, msg_len, 0, timeout ? &ts : NULL); + audit_mq_sendrecv(mqdes, msg_len, 0, ts); - f = fdget(mqdes); - if (unlikely(!f.file)) { - ret = -EBADF; - goto out; - } + CLASS(fd, f)(mqdes); + if (fd_empty(f)) + return -EBADF; - inode = file_inode(f.file); - if (unlikely(f.file->f_op != &mqueue_file_operations)) { - ret = -EBADF; - goto out_fput; - } + inode = file_inode(fd_file(f)); + if (unlikely(fd_file(f)->f_op != &mqueue_file_operations)) + return -EBADF; info = MQUEUE_I(inode); - audit_inode(NULL, f.file->f_path.dentry, 0); + audit_file(fd_file(f)); - if (unlikely(!(f.file->f_mode & FMODE_READ))) { - ret = -EBADF; - goto out_fput; - } + if (unlikely(!(fd_file(f)->f_mode & FMODE_READ))) + return -EBADF; /* checks if buffer is big enough */ - if (unlikely(msg_len < info->attr.mq_msgsize)) { - ret = -EMSGSIZE; - goto out_fput; - } + if (unlikely(msg_len < info->attr.mq_msgsize)) + return -EMSGSIZE; /* * msg_insert really wants us to have a valid, spare node struct so @@ -1133,30 +1195,33 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, /* Save our speculative allocation into the cache */ INIT_LIST_HEAD(&new_leaf->msg_list); info->node_cache = new_leaf; - info->qsize += sizeof(*new_leaf); } else { kfree(new_leaf); } if (info->attr.mq_curmsgs == 0) { - if (f.file->f_flags & O_NONBLOCK) { + if (fd_file(f)->f_flags & O_NONBLOCK) { spin_unlock(&info->lock); ret = -EAGAIN; } else { wait.task = current; - wait.state = STATE_NONE; + + /* memory barrier not required, we hold info->lock */ + WRITE_ONCE(wait.state, STATE_NONE); ret = wq_sleep(info, RECV, timeout, &wait); msg_ptr = wait.msg; } } else { + DEFINE_WAKE_Q(wake_q); + msg_ptr = msg_get(info); - inode->i_atime = inode->i_mtime = inode->i_ctime = - CURRENT_TIME; + simple_inode_init_ts(inode); /* There is now free space in queue. */ - pipelined_receive(info); + pipelined_receive(&wake_q, info); spin_unlock(&info->lock); + wake_up_q(&wake_q); ret = 0; } if (ret == 0) { @@ -1168,116 +1233,122 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, } free_msg(msg_ptr); } -out_fput: - fdput(f); -out: return ret; } +SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, + size_t, msg_len, unsigned int, msg_prio, + const struct __kernel_timespec __user *, u_abs_timeout) +{ + struct timespec64 ts, *p = NULL; + if (u_abs_timeout) { + int res = prepare_timeout(u_abs_timeout, &ts); + if (res) + return res; + p = &ts; + } + return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p); +} + +SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, + size_t, msg_len, unsigned int __user *, u_msg_prio, + const struct __kernel_timespec __user *, u_abs_timeout) +{ + struct timespec64 ts, *p = NULL; + if (u_abs_timeout) { + int res = prepare_timeout(u_abs_timeout, &ts); + if (res) + return res; + p = &ts; + } + return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p); +} + /* * Notes: the case when user wants us to deregister (with NULL as pointer) * and he isn't currently owner of notification, will be silently discarded. * It isn't explicitly defined in the POSIX. */ -SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, - const struct sigevent __user *, u_notification) +static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification) { int ret; - struct fd f; struct sock *sock; struct inode *inode; - struct sigevent notification; struct mqueue_inode_info *info; struct sk_buff *nc; - if (u_notification) { - if (copy_from_user(¬ification, u_notification, - sizeof(struct sigevent))) - return -EFAULT; - } - - audit_mq_notify(mqdes, u_notification ? ¬ification : NULL); + audit_mq_notify(mqdes, notification); nc = NULL; sock = NULL; - if (u_notification != NULL) { - if (unlikely(notification.sigev_notify != SIGEV_NONE && - notification.sigev_notify != SIGEV_SIGNAL && - notification.sigev_notify != SIGEV_THREAD)) + if (notification != NULL) { + if (unlikely(notification->sigev_notify != SIGEV_NONE && + notification->sigev_notify != SIGEV_SIGNAL && + notification->sigev_notify != SIGEV_THREAD)) return -EINVAL; - if (notification.sigev_notify == SIGEV_SIGNAL && - !valid_signal(notification.sigev_signo)) { + if (notification->sigev_notify == SIGEV_SIGNAL && + !valid_signal(notification->sigev_signo)) { return -EINVAL; } - if (notification.sigev_notify == SIGEV_THREAD) { + if (notification->sigev_notify == SIGEV_THREAD) { long timeo; /* create the notify skb */ nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL); - if (!nc) { - ret = -ENOMEM; - goto out; - } + if (!nc) + return -ENOMEM; + if (copy_from_user(nc->data, - notification.sigev_value.sival_ptr, + notification->sigev_value.sival_ptr, NOTIFY_COOKIE_LEN)) { - ret = -EFAULT; - goto out; + kfree_skb(nc); + return -EFAULT; } /* TODO: add a header? */ skb_put(nc, NOTIFY_COOKIE_LEN); /* and attach it to the socket */ retry: - f = fdget(notification.sigev_signo); - if (!f.file) { - ret = -EBADF; - goto out; - } - sock = netlink_getsockbyfilp(f.file); - fdput(f); + sock = netlink_getsockbyfd(notification->sigev_signo); if (IS_ERR(sock)) { - ret = PTR_ERR(sock); - sock = NULL; - goto out; + kfree_skb(nc); + return PTR_ERR(sock); } timeo = MAX_SCHEDULE_TIMEOUT; ret = netlink_attachskb(sock, nc, &timeo, NULL); if (ret == 1) goto retry; - if (ret) { - sock = NULL; - nc = NULL; - goto out; - } + if (ret) + return ret; } } - f = fdget(mqdes); - if (!f.file) { + CLASS(fd, f)(mqdes); + if (fd_empty(f)) { ret = -EBADF; goto out; } - inode = file_inode(f.file); - if (unlikely(f.file->f_op != &mqueue_file_operations)) { + inode = file_inode(fd_file(f)); + if (unlikely(fd_file(f)->f_op != &mqueue_file_operations)) { ret = -EBADF; - goto out_fput; + goto out; } info = MQUEUE_I(inode); ret = 0; spin_lock(&info->lock); - if (u_notification == NULL) { + if (notification == NULL) { if (info->notify_owner == task_tgid(current)) { remove_notification(info); - inode->i_atime = inode->i_ctime = CURRENT_TIME; + inode_set_atime_to_ts(inode, + inode_set_ctime_current(inode)); } } else if (info->notify_owner != NULL) { ret = -EBUSY; } else { - switch (notification.sigev_notify) { + switch (notification->sigev_notify) { case SIGEV_NONE: info->notify.sigev_notify = SIGEV_NONE; break; @@ -1289,87 +1360,237 @@ retry: info->notify.sigev_notify = SIGEV_THREAD; break; case SIGEV_SIGNAL: - info->notify.sigev_signo = notification.sigev_signo; - info->notify.sigev_value = notification.sigev_value; + info->notify.sigev_signo = notification->sigev_signo; + info->notify.sigev_value = notification->sigev_value; info->notify.sigev_notify = SIGEV_SIGNAL; + info->notify_self_exec_id = current->self_exec_id; break; } info->notify_owner = get_pid(task_tgid(current)); info->notify_user_ns = get_user_ns(current_user_ns()); - inode->i_atime = inode->i_ctime = CURRENT_TIME; + inode_set_atime_to_ts(inode, inode_set_ctime_current(inode)); } spin_unlock(&info->lock); -out_fput: - fdput(f); out: - if (sock) { + if (sock) netlink_detachskb(sock, nc); - } else if (nc) { - dev_kfree_skb(nc); - } return ret; } +SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, + const struct sigevent __user *, u_notification) +{ + struct sigevent n, *p = NULL; + if (u_notification) { + if (copy_from_user(&n, u_notification, sizeof(struct sigevent))) + return -EFAULT; + p = &n; + } + return do_mq_notify(mqdes, p); +} + +static int do_mq_getsetattr(int mqdes, struct mq_attr *new, struct mq_attr *old) +{ + struct inode *inode; + struct mqueue_inode_info *info; + + if (new && (new->mq_flags & (~O_NONBLOCK))) + return -EINVAL; + + CLASS(fd, f)(mqdes); + if (fd_empty(f)) + return -EBADF; + + if (unlikely(fd_file(f)->f_op != &mqueue_file_operations)) + return -EBADF; + + inode = file_inode(fd_file(f)); + info = MQUEUE_I(inode); + + spin_lock(&info->lock); + + if (old) { + *old = info->attr; + old->mq_flags = fd_file(f)->f_flags & O_NONBLOCK; + } + if (new) { + audit_mq_getsetattr(mqdes, new); + spin_lock(&fd_file(f)->f_lock); + if (new->mq_flags & O_NONBLOCK) + fd_file(f)->f_flags |= O_NONBLOCK; + else + fd_file(f)->f_flags &= ~O_NONBLOCK; + spin_unlock(&fd_file(f)->f_lock); + + inode_set_atime_to_ts(inode, inode_set_ctime_current(inode)); + } + + spin_unlock(&info->lock); + return 0; +} + SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, const struct mq_attr __user *, u_mqstat, struct mq_attr __user *, u_omqstat) { int ret; struct mq_attr mqstat, omqstat; - struct fd f; - struct inode *inode; - struct mqueue_inode_info *info; + struct mq_attr *new = NULL, *old = NULL; - if (u_mqstat != NULL) { - if (copy_from_user(&mqstat, u_mqstat, sizeof(struct mq_attr))) + if (u_mqstat) { + new = &mqstat; + if (copy_from_user(new, u_mqstat, sizeof(struct mq_attr))) return -EFAULT; - if (mqstat.mq_flags & (~O_NONBLOCK)) - return -EINVAL; } + if (u_omqstat) + old = &omqstat; - f = fdget(mqdes); - if (!f.file) { - ret = -EBADF; - goto out; + ret = do_mq_getsetattr(mqdes, new, old); + if (ret || !old) + return ret; + + if (copy_to_user(u_omqstat, old, sizeof(struct mq_attr))) + return -EFAULT; + return 0; +} + +#ifdef CONFIG_COMPAT + +struct compat_mq_attr { + compat_long_t mq_flags; /* message queue flags */ + compat_long_t mq_maxmsg; /* maximum number of messages */ + compat_long_t mq_msgsize; /* maximum message size */ + compat_long_t mq_curmsgs; /* number of messages currently queued */ + compat_long_t __reserved[4]; /* ignored for input, zeroed for output */ +}; + +static inline int get_compat_mq_attr(struct mq_attr *attr, + const struct compat_mq_attr __user *uattr) +{ + struct compat_mq_attr v; + + if (copy_from_user(&v, uattr, sizeof(*uattr))) + return -EFAULT; + + memset(attr, 0, sizeof(*attr)); + attr->mq_flags = v.mq_flags; + attr->mq_maxmsg = v.mq_maxmsg; + attr->mq_msgsize = v.mq_msgsize; + attr->mq_curmsgs = v.mq_curmsgs; + return 0; +} + +static inline int put_compat_mq_attr(const struct mq_attr *attr, + struct compat_mq_attr __user *uattr) +{ + struct compat_mq_attr v; + + memset(&v, 0, sizeof(v)); + v.mq_flags = attr->mq_flags; + v.mq_maxmsg = attr->mq_maxmsg; + v.mq_msgsize = attr->mq_msgsize; + v.mq_curmsgs = attr->mq_curmsgs; + if (copy_to_user(uattr, &v, sizeof(*uattr))) + return -EFAULT; + return 0; +} + +COMPAT_SYSCALL_DEFINE4(mq_open, const char __user *, u_name, + int, oflag, compat_mode_t, mode, + struct compat_mq_attr __user *, u_attr) +{ + struct mq_attr attr, *p = NULL; + if (u_attr && oflag & O_CREAT) { + p = &attr; + if (get_compat_mq_attr(&attr, u_attr)) + return -EFAULT; } + return do_mq_open(u_name, oflag, mode, p); +} - inode = file_inode(f.file); - if (unlikely(f.file->f_op != &mqueue_file_operations)) { - ret = -EBADF; - goto out_fput; +COMPAT_SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, + const struct compat_sigevent __user *, u_notification) +{ + struct sigevent n, *p = NULL; + if (u_notification) { + if (get_compat_sigevent(&n, u_notification)) + return -EFAULT; + if (n.sigev_notify == SIGEV_THREAD) + n.sigev_value.sival_ptr = compat_ptr(n.sigev_value.sival_int); + p = &n; } - info = MQUEUE_I(inode); + return do_mq_notify(mqdes, p); +} - spin_lock(&info->lock); +COMPAT_SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, + const struct compat_mq_attr __user *, u_mqstat, + struct compat_mq_attr __user *, u_omqstat) +{ + int ret; + struct mq_attr mqstat, omqstat; + struct mq_attr *new = NULL, *old = NULL; - omqstat = info->attr; - omqstat.mq_flags = f.file->f_flags & O_NONBLOCK; if (u_mqstat) { - audit_mq_getsetattr(mqdes, &mqstat); - spin_lock(&f.file->f_lock); - if (mqstat.mq_flags & O_NONBLOCK) - f.file->f_flags |= O_NONBLOCK; - else - f.file->f_flags &= ~O_NONBLOCK; - spin_unlock(&f.file->f_lock); - - inode->i_atime = inode->i_ctime = CURRENT_TIME; + new = &mqstat; + if (get_compat_mq_attr(new, u_mqstat)) + return -EFAULT; } + if (u_omqstat) + old = &omqstat; - spin_unlock(&info->lock); + ret = do_mq_getsetattr(mqdes, new, old); + if (ret || !old) + return ret; - ret = 0; - if (u_omqstat != NULL && copy_to_user(u_omqstat, &omqstat, - sizeof(struct mq_attr))) - ret = -EFAULT; + if (put_compat_mq_attr(old, u_omqstat)) + return -EFAULT; + return 0; +} +#endif -out_fput: - fdput(f); -out: - return ret; +#ifdef CONFIG_COMPAT_32BIT_TIME +static int compat_prepare_timeout(const struct old_timespec32 __user *p, + struct timespec64 *ts) +{ + if (get_old_timespec32(ts, p)) + return -EFAULT; + if (!timespec64_valid(ts)) + return -EINVAL; + return 0; } +SYSCALL_DEFINE5(mq_timedsend_time32, mqd_t, mqdes, + const char __user *, u_msg_ptr, + unsigned int, msg_len, unsigned int, msg_prio, + const struct old_timespec32 __user *, u_abs_timeout) +{ + struct timespec64 ts, *p = NULL; + if (u_abs_timeout) { + int res = compat_prepare_timeout(u_abs_timeout, &ts); + if (res) + return res; + p = &ts; + } + return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p); +} + +SYSCALL_DEFINE5(mq_timedreceive_time32, mqd_t, mqdes, + char __user *, u_msg_ptr, + unsigned int, msg_len, unsigned int __user *, u_msg_prio, + const struct old_timespec32 __user *, u_abs_timeout) +{ + struct timespec64 ts, *p = NULL; + if (u_abs_timeout) { + int res = compat_prepare_timeout(u_abs_timeout, &ts); + if (res) + return res; + p = &ts; + } + return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p); +} +#endif + static const struct inode_operations mqueue_dir_inode_operations = { .lookup = simple_lookup, .create = mqueue_create, @@ -1385,20 +1606,27 @@ static const struct file_operations mqueue_file_operations = { static const struct super_operations mqueue_super_ops = { .alloc_inode = mqueue_alloc_inode, - .destroy_inode = mqueue_destroy_inode, + .free_inode = mqueue_free_inode, .evict_inode = mqueue_evict_inode, .statfs = simple_statfs, }; +static const struct fs_context_operations mqueue_fs_context_ops = { + .free = mqueue_fs_context_free, + .get_tree = mqueue_get_tree, +}; + static struct file_system_type mqueue_fs_type = { - .name = "mqueue", - .mount = mqueue_mount, - .kill_sb = kill_litter_super, - .fs_flags = FS_USERNS_MOUNT, + .name = "mqueue", + .init_fs_context = mqueue_init_fs_context, + .kill_sb = kill_anon_super, + .fs_flags = FS_USERNS_MOUNT, }; int mq_init_ns(struct ipc_namespace *ns) { + struct vfsmount *m; + ns->mq_queues_count = 0; ns->mq_queues_max = DFLT_QUEUESMAX; ns->mq_msg_max = DFLT_MSGMAX; @@ -1406,12 +1634,10 @@ int mq_init_ns(struct ipc_namespace *ns) ns->mq_msg_default = DFLT_MSG; ns->mq_msgsize_default = DFLT_MSGSIZE; - ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns); - if (IS_ERR(ns->mq_mnt)) { - int err = PTR_ERR(ns->mq_mnt); - ns->mq_mnt = NULL; - return err; - } + m = mq_create_mount(ns); + if (IS_ERR(m)) + return PTR_ERR(m); + ns->mq_mnt = m; return 0; } @@ -1420,23 +1646,21 @@ void mq_clear_sbinfo(struct ipc_namespace *ns) ns->mq_mnt->mnt_sb->s_fs_info = NULL; } -void mq_put_mnt(struct ipc_namespace *ns) -{ - kern_unmount(ns->mq_mnt); -} - static int __init init_mqueue_fs(void) { int error; mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache", sizeof(struct mqueue_inode_info), 0, - SLAB_HWCACHE_ALIGN, init_once); + SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, init_once); if (mqueue_inode_cachep == NULL) return -ENOMEM; - /* ignore failures - they are not fatal */ - mq_sysctl_table = mq_register_sysctl_table(); + if (!setup_mq_sysctls(&init_ipc_ns)) { + pr_warn("sysctl registration failed\n"); + error = -ENOMEM; + goto out_kmem; + } error = register_filesystem(&mqueue_fs_type); if (error) @@ -1453,10 +1677,10 @@ static int __init init_mqueue_fs(void) out_filesystem: unregister_filesystem(&mqueue_fs_type); out_sysctl: - if (mq_sysctl_table) - unregister_sysctl_table(mq_sysctl_table); + retire_mq_sysctls(&init_ipc_ns); +out_kmem: kmem_cache_destroy(mqueue_inode_cachep); return error; } -__initcall(init_mqueue_fs); +device_initcall(init_mqueue_fs); |
