diff options
Diffstat (limited to 'kernel/signal.c')
| -rw-r--r-- | kernel/signal.c | 2128 |
1 files changed, 1447 insertions, 681 deletions
diff --git a/kernel/signal.c b/kernel/signal.c index e1d7ad8e6ab1..e42b8bd6922f 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * linux/kernel/signal.c * @@ -19,7 +20,10 @@ #include <linux/sched/task.h> #include <linux/sched/task_stack.h> #include <linux/sched/cputime.h> +#include <linux/file.h> #include <linux/fs.h> +#include <linux/mm.h> +#include <linux/proc_fs.h> #include <linux/tty.h> #include <linux/binfmts.h> #include <linux/coredump.h> @@ -29,7 +33,7 @@ #include <linux/signal.h> #include <linux/signalfd.h> #include <linux/ratelimit.h> -#include <linux/tracehook.h> +#include <linux/task_work.h> #include <linux/capability.h> #include <linux/freezer.h> #include <linux/pid_namespace.h> @@ -40,7 +44,10 @@ #include <linux/cn_proc.h> #include <linux/compiler.h> #include <linux/posix-timers.h> -#include <linux/livepatch.h> +#include <linux/cgroup.h> +#include <linux/audit.h> +#include <linux/sysctl.h> +#include <uapi/linux/pidfd.h> #define CREATE_TRACE_POINTS #include <trace/events/signal.h> @@ -50,7 +57,9 @@ #include <asm/unistd.h> #include <asm/siginfo.h> #include <asm/cacheflush.h> -#include "audit.h" /* audit_signal_info() */ +#include <asm/syscall.h> /* for syscall_get_* */ + +#include "time/posix-timers.h" /* * SLAB caches for signal bits. @@ -86,6 +95,11 @@ static bool sig_task_ignored(struct task_struct *t, int sig, bool force) handler == SIG_DFL && !(force && sig_kernel_only(sig))) return true; + /* Only allow kernel generated signals to this kthread */ + if (unlikely((t->flags & PF_KTHREAD) && + (handler == SIG_KTHREAD_KERNEL) && !force)) + return true; + return sig_handler_ignored(handler, sig); } @@ -144,9 +158,10 @@ static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked) static bool recalc_sigpending_tsk(struct task_struct *t) { - if ((t->jobctl & JOBCTL_PENDING_MASK) || + if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) || PENDING(&t->pending, &t->blocked) || - PENDING(&t->signal->shared_pending, &t->blocked)) { + PENDING(&t->signal->shared_pending, &t->blocked) || + cgroup_task_frozen(t)) { set_tsk_thread_flag(t, TIF_SIGPENDING); return true; } @@ -159,22 +174,12 @@ static bool recalc_sigpending_tsk(struct task_struct *t) return false; } -/* - * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up. - * This is superfluous when called on current, the wakeup is a harmless no-op. - */ -void recalc_sigpending_and_wake(struct task_struct *t) -{ - if (recalc_sigpending_tsk(t)) - signal_wake_up(t, 0); -} - void recalc_sigpending(void) { - if (!recalc_sigpending_tsk(current) && !freezing(current) && - !klp_patch_pending(current)) - clear_thread_flag(TIF_SIGPENDING); - + if (!recalc_sigpending_tsk(current) && !freezing(current)) { + if (unlikely(test_thread_flag(TIF_SIGPENDING))) + clear_thread_flag(TIF_SIGPENDING); + } } EXPORT_SYMBOL(recalc_sigpending); @@ -344,7 +349,7 @@ void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask) * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop. * Group stop states are cleared and the group stop count is consumed if * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group - * stop, the appropriate %SIGNAL_* flags are set. + * stop, the appropriate `SIGNAL_*` flags are set. * * CONTEXT: * Must be called with @task->sighand->siglock held. @@ -381,64 +386,92 @@ static bool task_participate_group_stop(struct task_struct *task) void task_join_group_stop(struct task_struct *task) { + unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK; + struct signal_struct *sig = current->signal; + + if (sig->group_stop_count) { + sig->group_stop_count++; + mask |= JOBCTL_STOP_CONSUME; + } else if (!(sig->flags & SIGNAL_STOP_STOPPED)) + return; + /* Have the new thread join an on-going signal group stop */ - unsigned long jobctl = current->jobctl; - if (jobctl & JOBCTL_STOP_PENDING) { - struct signal_struct *sig = current->signal; - unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK; - unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME; - if (task_set_jobctl_pending(task, signr | gstop)) { - sig->group_stop_count++; - } - } + task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING); } -/* - * allocate a new signal queue record - * - this may be called without locks if and only if t == current, otherwise an - * appropriate lock must be held to stop the target task from exiting - */ -static struct sigqueue * -__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) +static struct ucounts *sig_get_ucounts(struct task_struct *t, int sig, + int override_rlimit) { - struct sigqueue *q = NULL; - struct user_struct *user; + struct ucounts *ucounts; + long sigpending; /* * Protect access to @t credentials. This can go away when all * callers hold rcu read lock. + * + * NOTE! A pending signal will hold on to the user refcount, + * and we get/put the refcount only when the sigpending count + * changes from/to zero. */ rcu_read_lock(); - user = get_uid(__task_cred(t)->user); - atomic_inc(&user->sigpending); + ucounts = task_ucounts(t); + sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, + override_rlimit); rcu_read_unlock(); + if (!sigpending) + return NULL; - if (override_rlimit || - atomic_read(&user->sigpending) <= - task_rlimit(t, RLIMIT_SIGPENDING)) { - q = kmem_cache_alloc(sigqueue_cachep, flags); - } else { + if (unlikely(!override_rlimit && sigpending > task_rlimit(t, RLIMIT_SIGPENDING))) { + dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING); print_dropped_signal(sig); + return NULL; } - if (unlikely(q == NULL)) { - atomic_dec(&user->sigpending); - free_uid(user); - } else { - INIT_LIST_HEAD(&q->list); - q->flags = 0; - q->user = user; + return ucounts; +} + +static void __sigqueue_init(struct sigqueue *q, struct ucounts *ucounts, + const unsigned int sigqueue_flags) +{ + INIT_LIST_HEAD(&q->list); + q->flags = sigqueue_flags; + q->ucounts = ucounts; +} + +/* + * allocate a new signal queue record + * - this may be called without locks if and only if t == current, otherwise an + * appropriate lock must be held to stop the target task from exiting + */ +static struct sigqueue *sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags, + int override_rlimit) +{ + struct ucounts *ucounts = sig_get_ucounts(t, sig, override_rlimit); + struct sigqueue *q; + + if (!ucounts) + return NULL; + + q = kmem_cache_alloc(sigqueue_cachep, gfp_flags); + if (!q) { + dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING); + return NULL; } + __sigqueue_init(q, ucounts, 0); return q; } static void __sigqueue_free(struct sigqueue *q) { - if (q->flags & SIGQUEUE_PREALLOC) + if (q->flags & SIGQUEUE_PREALLOC) { + posixtimer_sigqueue_putref(q); return; - atomic_dec(&q->user->sigpending); - free_uid(q->user); + } + if (q->ucounts) { + dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING); + q->ucounts = NULL; + } kmem_cache_free(sigqueue_cachep, q); } @@ -469,42 +502,6 @@ void flush_signals(struct task_struct *t) } EXPORT_SYMBOL(flush_signals); -#ifdef CONFIG_POSIX_TIMERS -static void __flush_itimer_signals(struct sigpending *pending) -{ - sigset_t signal, retain; - struct sigqueue *q, *n; - - signal = pending->signal; - sigemptyset(&retain); - - list_for_each_entry_safe(q, n, &pending->list, list) { - int sig = q->info.si_signo; - - if (likely(q->info.si_code != SI_TIMER)) { - sigaddset(&retain, sig); - } else { - sigdelset(&signal, sig); - list_del_init(&q->list); - __sigqueue_free(q); - } - } - - sigorsets(&pending->signal, &signal, &retain); -} - -void flush_itimer_signals(void) -{ - struct task_struct *tsk = current; - unsigned long flags; - - spin_lock_irqsave(&tsk->sighand->siglock, flags); - __flush_itimer_signals(&tsk->pending); - __flush_itimer_signals(&tsk->signal->shared_pending); - spin_unlock_irqrestore(&tsk->sighand->siglock, flags); -} -#endif - void ignore_signals(struct task_struct *t) { int i; @@ -545,12 +542,16 @@ bool unhandled_signal(struct task_struct *tsk, int sig) if (handler != SIG_IGN && handler != SIG_DFL) return false; + /* If dying, we handle all new signals by ignoring them */ + if (fatal_signal_pending(tsk)) + return false; + /* if ptraced, let the tracer determine */ return !tsk->ptrace; } static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info, - bool *resched_timer) + struct sigqueue **timer_sigq) { struct sigqueue *q, *first = NULL; @@ -573,12 +574,17 @@ still_pending: list_del_init(&first->list); copy_siginfo(info, &first->info); - *resched_timer = - (first->flags & SIGQUEUE_PREALLOC) && - (info->si_code == SI_TIMER) && - (info->si_sys_private); - - __sigqueue_free(first); + /* + * posix-timer signals are preallocated and freed when the last + * reference count is dropped in posixtimer_deliver_signal() or + * immediately on timer deletion when the signal is not pending. + * Spare the extra round through __sigqueue_free() which is + * ignoring preallocated signals. + */ + if (unlikely((first->flags & SIGQUEUE_PREALLOC) && (info->si_code == SI_TIMER))) + *timer_sigq = first; + else + __sigqueue_free(first); } else { /* * Ok, it wasn't in the queue. This must be @@ -595,58 +601,39 @@ still_pending: } static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, - kernel_siginfo_t *info, bool *resched_timer) + kernel_siginfo_t *info, struct sigqueue **timer_sigq) { int sig = next_signal(pending, mask); if (sig) - collect_signal(sig, pending, info, resched_timer); + collect_signal(sig, pending, info, timer_sigq); return sig; } /* - * Dequeue a signal and return the element to the caller, which is - * expected to free it. - * - * All callers have to hold the siglock. + * Try to dequeue a signal. If a deliverable signal is found fill in the + * caller provided siginfo and return the signal number. Otherwise return + * 0. */ -int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info) +int dequeue_signal(sigset_t *mask, kernel_siginfo_t *info, enum pid_type *type) { - bool resched_timer = false; + struct task_struct *tsk = current; + struct sigqueue *timer_sigq; int signr; - /* We only dequeue private signals from ourselves, we don't let - * signalfd steal them - */ - signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer); + lockdep_assert_held(&tsk->sighand->siglock); + +again: + *type = PIDTYPE_PID; + timer_sigq = NULL; + signr = __dequeue_signal(&tsk->pending, mask, info, &timer_sigq); if (!signr) { + *type = PIDTYPE_TGID; signr = __dequeue_signal(&tsk->signal->shared_pending, - mask, info, &resched_timer); -#ifdef CONFIG_POSIX_TIMERS - /* - * itimer signal ? - * - * itimers are process shared and we restart periodic - * itimers in the signal delivery path to prevent DoS - * attacks in the high resolution timer case. This is - * compliant with the old way of self-restarting - * itimers, as the SIGALRM is a legacy signal and only - * queued once. Changing the restart behaviour to - * restart the timer in the signal dequeue path is - * reducing the timer noise on heavy loaded !highres - * systems too. - */ - if (unlikely(signr == SIGALRM)) { - struct hrtimer *tmr = &tsk->signal->real_timer; - - if (!hrtimer_is_queued(tmr) && - tsk->signal->it_real_incr != 0) { - hrtimer_forward(tmr, tmr->base->get_time(), - tsk->signal->it_real_incr); - hrtimer_restart(tmr); - } - } -#endif + mask, info, &timer_sigq); + + if (unlikely(signr == SIGALRM)) + posixtimer_rearm_itimer(tsk); } recalc_sigpending(); @@ -668,26 +655,58 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *in */ current->jobctl |= JOBCTL_STOP_DEQUEUED; } -#ifdef CONFIG_POSIX_TIMERS - if (resched_timer) { - /* - * Release the siglock to ensure proper locking order - * of timer locks outside of siglocks. Note, we leave - * irqs disabled here, since the posix-timers code is - * about to disable them again anyway. - */ - spin_unlock(&tsk->sighand->siglock); - posixtimer_rearm(info); - spin_lock(&tsk->sighand->siglock); - /* Don't expose the si_sys_private value to userspace */ - info->si_sys_private = 0; + if (IS_ENABLED(CONFIG_POSIX_TIMERS) && unlikely(timer_sigq)) { + if (!posixtimer_deliver_signal(info, timer_sigq)) + goto again; } -#endif + return signr; } EXPORT_SYMBOL_GPL(dequeue_signal); +static int dequeue_synchronous_signal(kernel_siginfo_t *info) +{ + struct task_struct *tsk = current; + struct sigpending *pending = &tsk->pending; + struct sigqueue *q, *sync = NULL; + + /* + * Might a synchronous signal be in the queue? + */ + if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK)) + return 0; + + /* + * Return the first synchronous signal in the queue. + */ + list_for_each_entry(q, &pending->list, list) { + /* Synchronous signals have a positive si_code */ + if ((q->info.si_code > SI_USER) && + (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) { + sync = q; + goto next; + } + } + return 0; +next: + /* + * Check if there is another siginfo for the same signal. + */ + list_for_each_entry_continue(q, &pending->list, list) { + if (q->info.si_signo == sync->info.si_signo) + goto still_pending; + } + + sigdelset(&pending->signal, sync->info.si_signo); + recalc_sigpending(); +still_pending: + list_del_init(&sync->list); + copy_siginfo(info, &sync->info); + __sigqueue_free(sync); + return info->si_signo; +} + /* * Tell a process that it has a new active signal.. * @@ -701,7 +720,10 @@ EXPORT_SYMBOL_GPL(dequeue_signal); */ void signal_wake_up_state(struct task_struct *t, unsigned int state) { + lockdep_assert_held(&t->sighand->siglock); + set_tsk_thread_flag(t, TIF_SIGPENDING); + /* * TASK_WAKEKILL also means wake it up in the stopped/traced/killable * case. We don't check t->state here because there is a race with it @@ -713,17 +735,24 @@ void signal_wake_up_state(struct task_struct *t, unsigned int state) kick_process(t); } -/* - * Remove signals in mask from the pending set and queue. - * Returns 1 if any signals were found. - * - * All callers must be holding the siglock. - */ -static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s) +static inline void posixtimer_sig_ignore(struct task_struct *tsk, struct sigqueue *q); + +static void sigqueue_free_ignored(struct task_struct *tsk, struct sigqueue *q) +{ + if (likely(!(q->flags & SIGQUEUE_PREALLOC) || q->info.si_code != SI_TIMER)) + __sigqueue_free(q); + else + posixtimer_sig_ignore(tsk, q); +} + +/* Remove signals in mask from the pending set and queue. */ +static void flush_sigqueue_mask(struct task_struct *p, sigset_t *mask, struct sigpending *s) { struct sigqueue *q, *n; sigset_t m; + lockdep_assert_held(&p->sighand->siglock); + sigandsets(&m, mask, &s->signal); if (sigisemptyset(&m)) return; @@ -732,7 +761,7 @@ static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s) list_for_each_entry_safe(q, n, &s->list, list) { if (sigismember(mask, q->info.si_signo)) { list_del_init(&q->list); - __sigqueue_free(q); + sigqueue_free_ignored(p, q); } } } @@ -794,6 +823,7 @@ static int check_kill_permission(int sig, struct kernel_siginfo *info, */ if (!sid || sid == task_session(current)) break; + fallthrough; default: return -EPERM; } @@ -822,7 +852,7 @@ static int check_kill_permission(int sig, struct kernel_siginfo *info, static void ptrace_trap_notify(struct task_struct *t) { WARN_ON_ONCE(!(t->ptrace & PT_SEIZED)); - assert_spin_locked(&t->sighand->siglock); + lockdep_assert_held(&t->sighand->siglock); task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); @@ -844,33 +874,35 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force) struct task_struct *t; sigset_t flush; - if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) { - if (!(signal->flags & SIGNAL_GROUP_EXIT)) + if (signal->flags & SIGNAL_GROUP_EXIT) { + if (signal->core_state) return sig == SIGKILL; /* - * The process is in the middle of dying, nothing to do. + * The process is in the middle of dying, drop the signal. */ + return false; } else if (sig_kernel_stop(sig)) { /* * This is a stop signal. Remove SIGCONT from all queues. */ siginitset(&flush, sigmask(SIGCONT)); - flush_sigqueue_mask(&flush, &signal->shared_pending); + flush_sigqueue_mask(p, &flush, &signal->shared_pending); for_each_thread(p, t) - flush_sigqueue_mask(&flush, &t->pending); + flush_sigqueue_mask(p, &flush, &t->pending); } else if (sig == SIGCONT) { unsigned int why; /* * Remove all stop signals from all queues, wake all threads. */ siginitset(&flush, SIG_KERNEL_STOP_MASK); - flush_sigqueue_mask(&flush, &signal->shared_pending); + flush_sigqueue_mask(p, &flush, &signal->shared_pending); for_each_thread(p, t) { - flush_sigqueue_mask(&flush, &t->pending); + flush_sigqueue_mask(p, &flush, &t->pending); task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING); - if (likely(!(t->ptrace & PT_SEIZED))) + if (likely(!(t->ptrace & PT_SEIZED))) { + t->jobctl &= ~JOBCTL_STOPPED; wake_up_state(t, __TASK_STOPPED); - else + } else ptrace_trap_notify(t); } @@ -925,7 +957,7 @@ static inline bool wants_signal(int sig, struct task_struct *p) if (task_is_stopped_or_traced(p)) return false; - return task_curr(p) || !signal_pending(p); + return task_curr(p) || !task_sigpending(p); } static void complete_signal(int sig, struct task_struct *p, enum pid_type type) @@ -936,8 +968,7 @@ static void complete_signal(int sig, struct task_struct *p, enum pid_type type) /* * Now find a thread we can wake up to take the signal off the queue. * - * If the main thread wants the signal, it gets first crack. - * Probably the least surprising to the average bear. + * Try the suggested task first (may or may not be the main thread). */ if (wants_signal(sig, p)) t = p; @@ -970,7 +1001,7 @@ static void complete_signal(int sig, struct task_struct *p, enum pid_type type) * then start taking the whole group down immediately. */ if (sig_fatal(p, sig) && - !(signal->flags & SIGNAL_GROUP_EXIT) && + (signal->core_state || !(signal->flags & SIGNAL_GROUP_EXIT)) && !sigismember(&t->real_blocked, sig) && (sig == SIGKILL || !p->ptrace)) { /* @@ -986,12 +1017,11 @@ static void complete_signal(int sig, struct task_struct *p, enum pid_type type) signal->flags = SIGNAL_GROUP_EXIT; signal->group_exit_code = sig; signal->group_stop_count = 0; - t = p; - do { + __for_each_thread(signal, t) { task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); sigaddset(&t->pending.signal, SIGKILL); signal_wake_up(t, 1); - } while_each_thread(p, t); + } return; } } @@ -1009,40 +1039,18 @@ static inline bool legacy_queue(struct sigpending *signals, int sig) return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); } -#ifdef CONFIG_USER_NS -static inline void userns_fixup_signal_uid(struct kernel_siginfo *info, struct task_struct *t) -{ - if (current_user_ns() == task_cred_xxx(t, user_ns)) - return; - - if (SI_FROMKERNEL(info)) - return; - - rcu_read_lock(); - info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns), - make_kuid(current_user_ns(), info->si_uid)); - rcu_read_unlock(); -} -#else -static inline void userns_fixup_signal_uid(struct kernel_siginfo *info, struct task_struct *t) -{ - return; -} -#endif - -static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t, - enum pid_type type, int from_ancestor_ns) +static int __send_signal_locked(int sig, struct kernel_siginfo *info, + struct task_struct *t, enum pid_type type, bool force) { struct sigpending *pending; struct sigqueue *q; int override_rlimit; int ret = 0, result; - assert_spin_locked(&t->sighand->siglock); + lockdep_assert_held(&t->sighand->siglock); result = TRACE_SIGNAL_IGNORED; - if (!prepare_signal(sig, t, - from_ancestor_ns || (info == SEND_SIG_PRIV))) + if (!prepare_signal(sig, t, force)) goto ret; pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending; @@ -1057,10 +1065,9 @@ static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struc result = TRACE_SIGNAL_DELIVERED; /* - * Skip useless siginfo allocation for SIGKILL SIGSTOP, - * and kernel threads. + * Skip useless siginfo allocation for SIGKILL and kernel threads. */ - if (sig_kernel_only(sig) || (t->flags & PF_KTHREAD)) + if ((sig == SIGKILL) || (t->flags & PF_KTHREAD)) goto out_set; /* @@ -1077,7 +1084,8 @@ static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struc else override_rlimit = 0; - q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit); + q = sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit); + if (q) { list_add_tail(&q->list, &pending->list); switch ((unsigned long) info) { @@ -1088,7 +1096,11 @@ static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struc q->info.si_code = SI_USER; q->info.si_pid = task_tgid_nr_ns(current, task_active_pid_ns(t)); - q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); + rcu_read_lock(); + q->info.si_uid = + from_kuid_munged(task_cred_xxx(t, user_ns), + current_uid()); + rcu_read_unlock(); break; case (unsigned long) SEND_SIG_PRIV: clear_siginfo(&q->info); @@ -1100,30 +1112,24 @@ static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struc break; default: copy_siginfo(&q->info, info); - if (from_ancestor_ns) - q->info.si_pid = 0; break; } - - userns_fixup_signal_uid(&q->info, t); - - } else if (!is_si_special(info)) { - if (sig >= SIGRTMIN && info->si_code != SI_USER) { - /* - * Queue overflow, abort. We may abort if the - * signal was rt and sent by user using something - * other than kill(). - */ - result = TRACE_SIGNAL_OVERFLOW_FAIL; - ret = -EAGAIN; - goto ret; - } else { - /* - * This is a silent loss of information. We still - * send the signal, but the *info bits are lost. - */ - result = TRACE_SIGNAL_LOSE_INFO; - } + } else if (!is_si_special(info) && + sig >= SIGRTMIN && info->si_code != SI_USER) { + /* + * Queue overflow, abort. We may abort if the + * signal was rt and sent by user using something + * other than kill(). + */ + result = TRACE_SIGNAL_OVERFLOW_FAIL; + ret = -EAGAIN; + goto ret; + } else { + /* + * This is a silent loss of information. We still + * send the signal, but the *info bits are lost. + */ + result = TRACE_SIGNAL_LOSE_INFO; } out_set: @@ -1150,23 +1156,80 @@ ret: return ret; } -static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t, - enum pid_type type) +static inline bool has_si_pid_and_uid(struct kernel_siginfo *info) { - int from_ancestor_ns = 0; + bool ret = false; + switch (siginfo_layout(info->si_signo, info->si_code)) { + case SIL_KILL: + case SIL_CHLD: + case SIL_RT: + ret = true; + break; + case SIL_TIMER: + case SIL_POLL: + case SIL_FAULT: + case SIL_FAULT_TRAPNO: + case SIL_FAULT_MCEERR: + case SIL_FAULT_BNDERR: + case SIL_FAULT_PKUERR: + case SIL_FAULT_PERF_EVENT: + case SIL_SYS: + ret = false; + break; + } + return ret; +} -#ifdef CONFIG_PID_NS - from_ancestor_ns = si_fromuser(info) && - !task_pid_nr_ns(current, task_active_pid_ns(t)); -#endif +int send_signal_locked(int sig, struct kernel_siginfo *info, + struct task_struct *t, enum pid_type type) +{ + /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */ + bool force = false; + + if (info == SEND_SIG_NOINFO) { + /* Force if sent from an ancestor pid namespace */ + force = !task_pid_nr_ns(current, task_active_pid_ns(t)); + } else if (info == SEND_SIG_PRIV) { + /* Don't ignore kernel generated signals */ + force = true; + } else if (has_si_pid_and_uid(info)) { + /* SIGKILL and SIGSTOP is special or has ids */ + struct user_namespace *t_user_ns; + + rcu_read_lock(); + t_user_ns = task_cred_xxx(t, user_ns); + if (current_user_ns() != t_user_ns) { + kuid_t uid = make_kuid(current_user_ns(), info->si_uid); + info->si_uid = from_kuid_munged(t_user_ns, uid); + } + rcu_read_unlock(); + + /* A kernel generated signal? */ + force = (info->si_code == SI_KERNEL); - return __send_signal(sig, info, t, type, from_ancestor_ns); + /* From an ancestor pid namespace? */ + if (!task_pid_nr_ns(current, task_active_pid_ns(t))) { + info->si_pid = 0; + force = true; + } + } + return __send_signal_locked(sig, info, t, type, force); } static void print_fatal_signal(int signr) { - struct pt_regs *regs = signal_pt_regs(); - pr_info("potentially unexpected fatal signal %d.\n", signr); + struct pt_regs *regs = task_pt_regs(current); + struct file *exe_file; + + exe_file = get_task_exe_file(current); + if (exe_file) { + pr_info("%pD: %s: potentially unexpected fatal signal %d.\n", + exe_file, current->comm, signr); + fput(exe_file); + } else { + pr_info("%s: potentially unexpected fatal signal %d.\n", + current->comm, signr); + } #if defined(__i386__) && !defined(__arch_um__) pr_info("code at %08lx: ", regs->ip); @@ -1196,12 +1259,6 @@ static int __init setup_print_fatal_signals(char *str) __setup("print-fatal-signals=", setup_print_fatal_signals); -int -__group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p) -{ - return send_signal(sig, info, p, PIDTYPE_TGID); -} - int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p, enum pid_type type) { @@ -1209,13 +1266,19 @@ int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p int ret = -ESRCH; if (lock_task_sighand(p, &flags)) { - ret = send_signal(sig, info, p, type); + ret = send_signal_locked(sig, info, p, type); unlock_task_sighand(p, &flags); } return ret; } +enum sig_handler { + HANDLER_CURRENT, /* If reachable use the current handler */ + HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */ + HANDLER_EXIT, /* Only visible as the process exit code */ +}; + /* * Force a signal that the process can't ignore: if necessary * we unblock the signal and change any SIG_IGN to SIG_DFL. @@ -1227,47 +1290,58 @@ int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p * We don't want to have recursive SIGSEGV's etc, for example, * that is why we also clear SIGNAL_UNKILLABLE. */ -int -force_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *t) +static int +force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t, + enum sig_handler handler) { unsigned long int flags; int ret, blocked, ignored; struct k_sigaction *action; + int sig = info->si_signo; spin_lock_irqsave(&t->sighand->siglock, flags); action = &t->sighand->action[sig-1]; ignored = action->sa.sa_handler == SIG_IGN; blocked = sigismember(&t->blocked, sig); - if (blocked || ignored) { + if (blocked || ignored || (handler != HANDLER_CURRENT)) { action->sa.sa_handler = SIG_DFL; - if (blocked) { + if (handler == HANDLER_EXIT) + action->sa.sa_flags |= SA_IMMUTABLE; + if (blocked) sigdelset(&t->blocked, sig); - recalc_sigpending_and_wake(t); - } } /* * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect - * debugging to leave init killable. + * debugging to leave init killable. But HANDLER_EXIT is always fatal. */ - if (action->sa.sa_handler == SIG_DFL && !t->ptrace) + if (action->sa.sa_handler == SIG_DFL && + (!t->ptrace || (handler == HANDLER_EXIT))) t->signal->flags &= ~SIGNAL_UNKILLABLE; - ret = send_signal(sig, info, t, PIDTYPE_PID); + ret = send_signal_locked(sig, info, t, PIDTYPE_PID); + /* This can happen if the signal was already pending and blocked */ + if (!task_sigpending(t)) + signal_wake_up(t, 0); spin_unlock_irqrestore(&t->sighand->siglock, flags); return ret; } +int force_sig_info(struct kernel_siginfo *info) +{ + return force_sig_info_to_task(info, current, HANDLER_CURRENT); +} + /* * Nuke all other threads in the group. */ int zap_other_threads(struct task_struct *p) { - struct task_struct *t = p; + struct task_struct *t; int count = 0; p->signal->group_stop_count = 0; - while_each_thread(p, t) { + for_other_threads(p, t) { task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); count++; @@ -1304,7 +1378,7 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, * must see ->sighand == NULL. */ spin_lock_irqsave(&sighand->siglock, *flags); - if (likely(sighand == tsk->sighand)) + if (likely(sighand == rcu_access_pointer(tsk->sighand))) break; spin_unlock_irqrestore(&sighand->siglock, *flags); } @@ -1313,8 +1387,24 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, return sighand; } +#ifdef CONFIG_LOCKDEP +void lockdep_assert_task_sighand_held(struct task_struct *task) +{ + struct sighand_struct *sighand; + + rcu_read_lock(); + sighand = rcu_dereference(task->sighand); + if (sighand) + lockdep_assert_held(&sighand->siglock); + else + WARN_ON_ONCE(1); + rcu_read_unlock(); +} +#endif + /* - * send signal info to all the members of a group + * send signal info to all the members of a thread group or to the + * individual thread if type == PIDTYPE_PID. */ int group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p, enum pid_type type) @@ -1339,19 +1429,25 @@ int group_send_sig_info(int sig, struct kernel_siginfo *info, int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp) { struct task_struct *p = NULL; - int retval, success; + int ret = -ESRCH; - success = 0; - retval = -ESRCH; do_each_pid_task(pgrp, PIDTYPE_PGID, p) { int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID); - success |= !err; - retval = err; + /* + * If group_send_sig_info() succeeds at least once ret + * becomes 0 and after that the code below has no effect. + * Otherwise we return the last err or -ESRCH if this + * process group is empty. + */ + if (ret) + ret = err; } while_each_pid_task(pgrp, PIDTYPE_PGID, p); - return success ? 0 : retval; + + return ret; } -int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid) +static int kill_pid_info_type(int sig, struct kernel_siginfo *info, + struct pid *pid, enum pid_type type) { int error = -ESRCH; struct task_struct *p; @@ -1360,11 +1456,10 @@ int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid) rcu_read_lock(); p = pid_task(pid, PIDTYPE_PID); if (p) - error = group_send_sig_info(sig, info, p, PIDTYPE_TGID); + error = group_send_sig_info(sig, info, p, type); rcu_read_unlock(); if (likely(!p || error != -ESRCH)) return error; - /* * The task was unhashed in between, try again. If it * is dead, pid_task() will return NULL, if we race with @@ -1373,6 +1468,11 @@ int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid) } } +int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid) +{ + return kill_pid_info_type(sig, info, pid, PIDTYPE_TGID); +} + static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid) { int error; @@ -1393,34 +1493,65 @@ static inline bool kill_as_cred_perm(const struct cred *cred, uid_eq(cred->uid, pcred->uid); } -/* like kill_pid_info(), but doesn't use uid/euid of "current" */ -int kill_pid_info_as_cred(int sig, struct kernel_siginfo *info, struct pid *pid, - const struct cred *cred) +/* + * The usb asyncio usage of siginfo is wrong. The glibc support + * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT. + * AKA after the generic fields: + * kernel_pid_t si_pid; + * kernel_uid32_t si_uid; + * sigval_t si_value; + * + * Unfortunately when usb generates SI_ASYNCIO it assumes the layout + * after the generic fields is: + * void __user *si_addr; + * + * This is a practical problem when there is a 64bit big endian kernel + * and a 32bit userspace. As the 32bit address will encoded in the low + * 32bits of the pointer. Those low 32bits will be stored at higher + * address than appear in a 32 bit pointer. So userspace will not + * see the address it was expecting for it's completions. + * + * There is nothing in the encoding that can allow + * copy_siginfo_to_user32 to detect this confusion of formats, so + * handle this by requiring the caller of kill_pid_usb_asyncio to + * notice when this situration takes place and to store the 32bit + * pointer in sival_int, instead of sival_addr of the sigval_t addr + * parameter. + */ +int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, + struct pid *pid, const struct cred *cred) { - int ret = -EINVAL; + struct kernel_siginfo info; struct task_struct *p; unsigned long flags; + int ret = -EINVAL; if (!valid_signal(sig)) return ret; + clear_siginfo(&info); + info.si_signo = sig; + info.si_errno = errno; + info.si_code = SI_ASYNCIO; + *((sigval_t *)&info.si_pid) = addr; + rcu_read_lock(); p = pid_task(pid, PIDTYPE_PID); if (!p) { ret = -ESRCH; goto out_unlock; } - if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) { + if (!kill_as_cred_perm(cred, p)) { ret = -EPERM; goto out_unlock; } - ret = security_task_kill(p, info, sig, cred); + ret = security_task_kill(p, &info, sig, cred); if (ret) goto out_unlock; if (sig) { if (lock_task_sighand(p, &flags)) { - ret = __send_signal(sig, info, p, PIDTYPE_TGID, 0); + ret = __send_signal_locked(sig, &info, p, PIDTYPE_TGID, false); unlock_task_sighand(p, &flags); } else ret = -ESRCH; @@ -1429,7 +1560,7 @@ out_unlock: rcu_read_unlock(); return ret; } -EXPORT_SYMBOL_GPL(kill_pid_info_as_cred); +EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio); /* * kill_something_info() interprets pid in interesting ways just like kill(2). @@ -1442,12 +1573,8 @@ static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid) { int ret; - if (pid > 0) { - rcu_read_lock(); - ret = kill_pid_info(sig, info, find_vpid(pid)); - rcu_read_unlock(); - return ret; - } + if (pid > 0) + return kill_proc_info(sig, info, pid); /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */ if (pid == INT_MIN) @@ -1505,33 +1632,62 @@ send_sig(int sig, struct task_struct *p, int priv) } EXPORT_SYMBOL(send_sig); -void force_sig(int sig, struct task_struct *p) +void force_sig(int sig) { - force_sig_info(sig, SEND_SIG_PRIV, p); + struct kernel_siginfo info; + + clear_siginfo(&info); + info.si_signo = sig; + info.si_errno = 0; + info.si_code = SI_KERNEL; + info.si_pid = 0; + info.si_uid = 0; + force_sig_info(&info); } EXPORT_SYMBOL(force_sig); +void force_fatal_sig(int sig) +{ + struct kernel_siginfo info; + + clear_siginfo(&info); + info.si_signo = sig; + info.si_errno = 0; + info.si_code = SI_KERNEL; + info.si_pid = 0; + info.si_uid = 0; + force_sig_info_to_task(&info, current, HANDLER_SIG_DFL); +} + +void force_exit_sig(int sig) +{ + struct kernel_siginfo info; + + clear_siginfo(&info); + info.si_signo = sig; + info.si_errno = 0; + info.si_code = SI_KERNEL; + info.si_pid = 0; + info.si_uid = 0; + force_sig_info_to_task(&info, current, HANDLER_EXIT); +} + /* * When things go south during signal handling, we * will force a SIGSEGV. And if the signal that caused * the problem was already a SIGSEGV, we'll want to * make sure we don't even try to deliver the signal.. */ -void force_sigsegv(int sig, struct task_struct *p) +void force_sigsegv(int sig) { - if (sig == SIGSEGV) { - unsigned long flags; - spin_lock_irqsave(&p->sighand->siglock, flags); - p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; - spin_unlock_irqrestore(&p->sighand->siglock, flags); - } - force_sig(SIGSEGV, p); + if (sig == SIGSEGV) + force_fatal_sig(SIGSEGV); + else + force_sig(SIGSEGV); } -int force_sig_fault(int sig, int code, void __user *addr - ___ARCH_SI_TRAPNO(int trapno) - ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) - , struct task_struct *t) +int force_sig_fault_to_task(int sig, int code, void __user *addr, + struct task_struct *t) { struct kernel_siginfo info; @@ -1540,21 +1696,15 @@ int force_sig_fault(int sig, int code, void __user *addr info.si_errno = 0; info.si_code = code; info.si_addr = addr; -#ifdef __ARCH_SI_TRAPNO - info.si_trapno = trapno; -#endif -#ifdef __ia64__ - info.si_imm = imm; - info.si_flags = flags; - info.si_isr = isr; -#endif - return force_sig_info(info.si_signo, &info, t); + return force_sig_info_to_task(&info, t, HANDLER_CURRENT); } -int send_sig_fault(int sig, int code, void __user *addr - ___ARCH_SI_TRAPNO(int trapno) - ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) - , struct task_struct *t) +int force_sig_fault(int sig, int code, void __user *addr) +{ + return force_sig_fault_to_task(sig, code, addr, current); +} + +int send_sig_fault(int sig, int code, void __user *addr, struct task_struct *t) { struct kernel_siginfo info; @@ -1563,18 +1713,10 @@ int send_sig_fault(int sig, int code, void __user *addr info.si_errno = 0; info.si_code = code; info.si_addr = addr; -#ifdef __ARCH_SI_TRAPNO - info.si_trapno = trapno; -#endif -#ifdef __ia64__ - info.si_imm = imm; - info.si_flags = flags; - info.si_isr = isr; -#endif return send_sig_info(info.si_signo, &info, t); } -int force_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t) +int force_sig_mceerr(int code, void __user *addr, short lsb) { struct kernel_siginfo info; @@ -1585,7 +1727,7 @@ int force_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct info.si_code = code; info.si_addr = addr; info.si_addr_lsb = lsb; - return force_sig_info(info.si_signo, &info, t); + return force_sig_info(&info); } int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t) @@ -1614,7 +1756,7 @@ int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper) info.si_addr = addr; info.si_lower = lower; info.si_upper = upper; - return force_sig_info(info.si_signo, &info, current); + return force_sig_info(&info); } #ifdef SEGV_PKUERR @@ -1628,10 +1770,59 @@ int force_sig_pkuerr(void __user *addr, u32 pkey) info.si_code = SEGV_PKUERR; info.si_addr = addr; info.si_pkey = pkey; - return force_sig_info(info.si_signo, &info, current); + return force_sig_info(&info); } #endif +int send_sig_perf(void __user *addr, u32 type, u64 sig_data) +{ + struct kernel_siginfo info; + + clear_siginfo(&info); + info.si_signo = SIGTRAP; + info.si_errno = 0; + info.si_code = TRAP_PERF; + info.si_addr = addr; + info.si_perf_data = sig_data; + info.si_perf_type = type; + + /* + * Signals generated by perf events should not terminate the whole + * process if SIGTRAP is blocked, however, delivering the signal + * asynchronously is better than not delivering at all. But tell user + * space if the signal was asynchronous, so it can clearly be + * distinguished from normal synchronous ones. + */ + info.si_perf_flags = sigismember(¤t->blocked, info.si_signo) ? + TRAP_PERF_FLAG_ASYNC : + 0; + + return send_sig_info(info.si_signo, &info, current); +} + +/** + * force_sig_seccomp - signals the task to allow in-process syscall emulation + * @syscall: syscall number to send to userland + * @reason: filter-supplied reason code to send to userland (via si_errno) + * @force_coredump: true to trigger a coredump + * + * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info. + */ +int force_sig_seccomp(int syscall, int reason, bool force_coredump) +{ + struct kernel_siginfo info; + + clear_siginfo(&info); + info.si_signo = SIGSYS; + info.si_code = SYS_SECCOMP; + info.si_call_addr = (void __user *)KSTK_EIP(current); + info.si_errno = reason; + info.si_arch = syscall_get_arch(current); + info.si_syscall = syscall; + return force_sig_info_to_task(&info, current, + force_coredump ? HANDLER_EXIT : HANDLER_CURRENT); +} + /* For the crazy architectures that include trap information in * the errno field, instead of an actual errno value. */ @@ -1644,19 +1835,55 @@ int force_sig_ptrace_errno_trap(int errno, void __user *addr) info.si_errno = errno; info.si_code = TRAP_HWBKPT; info.si_addr = addr; - return force_sig_info(info.si_signo, &info, current); + return force_sig_info(&info); } -int kill_pgrp(struct pid *pid, int sig, int priv) +/* For the rare architectures that include trap information using + * si_trapno. + */ +int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno) { - int ret; + struct kernel_siginfo info; + + clear_siginfo(&info); + info.si_signo = sig; + info.si_errno = 0; + info.si_code = code; + info.si_addr = addr; + info.si_trapno = trapno; + return force_sig_info(&info); +} + +/* For the rare architectures that include trap information using + * si_trapno. + */ +int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno, + struct task_struct *t) +{ + struct kernel_siginfo info; + + clear_siginfo(&info); + info.si_signo = sig; + info.si_errno = 0; + info.si_code = code; + info.si_addr = addr; + info.si_trapno = trapno; + return send_sig_info(info.si_signo, &info, t); +} +static int kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp) +{ + int ret; read_lock(&tasklist_lock); - ret = __kill_pgrp_info(sig, __si_special(priv), pid); + ret = __kill_pgrp_info(sig, info, pgrp); read_unlock(&tasklist_lock); - return ret; } + +int kill_pgrp(struct pid *pid, int sig, int priv) +{ + return kill_pgrp_info(sig, __si_special(priv), pid); +} EXPORT_SYMBOL(kill_pgrp); int kill_pid(struct pid *pid, int sig, int priv) @@ -1665,96 +1892,270 @@ int kill_pid(struct pid *pid, int sig, int priv) } EXPORT_SYMBOL(kill_pid); +#ifdef CONFIG_POSIX_TIMERS /* - * These functions support sending signals using preallocated sigqueue - * structures. This is needed "because realtime applications cannot - * afford to lose notifications of asynchronous events, like timer - * expirations or I/O completions". In the case of POSIX Timers - * we allocate the sigqueue structure from the timer_create. If this - * allocation fails we are able to report the failure to the application - * with an EAGAIN error. + * These functions handle POSIX timer signals. POSIX timers use + * preallocated sigqueue structs for sending signals. */ -struct sigqueue *sigqueue_alloc(void) +static void __flush_itimer_signals(struct sigpending *pending) { - struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); + sigset_t signal, retain; + struct sigqueue *q, *n; + + signal = pending->signal; + sigemptyset(&retain); - if (q) - q->flags |= SIGQUEUE_PREALLOC; + list_for_each_entry_safe(q, n, &pending->list, list) { + int sig = q->info.si_signo; - return q; + if (likely(q->info.si_code != SI_TIMER)) { + sigaddset(&retain, sig); + } else { + sigdelset(&signal, sig); + list_del_init(&q->list); + __sigqueue_free(q); + } + } + + sigorsets(&pending->signal, &signal, &retain); } -void sigqueue_free(struct sigqueue *q) +void flush_itimer_signals(void) { - unsigned long flags; - spinlock_t *lock = ¤t->sighand->siglock; + struct task_struct *tsk = current; - BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); - /* - * We must hold ->siglock while testing q->list - * to serialize with collect_signal() or with - * __exit_signal()->flush_sigqueue(). - */ - spin_lock_irqsave(lock, flags); - q->flags &= ~SIGQUEUE_PREALLOC; - /* - * If it is queued it will be freed when dequeued, - * like the "regular" sigqueue. - */ - if (!list_empty(&q->list)) - q = NULL; - spin_unlock_irqrestore(lock, flags); + guard(spinlock_irqsave)(&tsk->sighand->siglock); + __flush_itimer_signals(&tsk->pending); + __flush_itimer_signals(&tsk->signal->shared_pending); +} - if (q) - __sigqueue_free(q); +bool posixtimer_init_sigqueue(struct sigqueue *q) +{ + struct ucounts *ucounts = sig_get_ucounts(current, -1, 0); + + if (!ucounts) + return false; + clear_siginfo(&q->info); + __sigqueue_init(q, ucounts, SIGQUEUE_PREALLOC); + return true; } -int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type) +static void posixtimer_queue_sigqueue(struct sigqueue *q, struct task_struct *t, enum pid_type type) { - int sig = q->info.si_signo; struct sigpending *pending; + int sig = q->info.si_signo; + + signalfd_notify(t, sig); + pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending; + list_add_tail(&q->list, &pending->list); + sigaddset(&pending->signal, sig); + complete_signal(sig, t, type); +} + +/* + * This function is used by POSIX timers to deliver a timer signal. + * Where type is PIDTYPE_PID (such as for timers with SIGEV_THREAD_ID + * set), the signal must be delivered to the specific thread (queues + * into t->pending). + * + * Where type is not PIDTYPE_PID, signals must be delivered to the + * process. In this case, prefer to deliver to current if it is in + * the same thread group as the target process and its sighand is + * stable, which avoids unnecessarily waking up a potentially idle task. + */ +static inline struct task_struct *posixtimer_get_target(struct k_itimer *tmr) +{ + struct task_struct *t = pid_task(tmr->it_pid, tmr->it_pid_type); + + if (t && tmr->it_pid_type != PIDTYPE_PID && + same_thread_group(t, current) && !current->exit_state) + t = current; + return t; +} + +void posixtimer_send_sigqueue(struct k_itimer *tmr) +{ + struct sigqueue *q = &tmr->sigq; + int sig = q->info.si_signo; struct task_struct *t; unsigned long flags; - int ret, result; + int result; - BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); + guard(rcu)(); - ret = -1; - rcu_read_lock(); - t = pid_task(pid, type); - if (!t || !likely(lock_task_sighand(t, &flags))) - goto ret; + t = posixtimer_get_target(tmr); + if (!t) + return; - ret = 1; /* the signal is ignored */ - result = TRACE_SIGNAL_IGNORED; - if (!prepare_signal(sig, t, false)) + if (!likely(lock_task_sighand(t, &flags))) + return; + + /* + * Update @tmr::sigqueue_seq for posix timer signals with sighand + * locked to prevent a race against dequeue_signal(). + */ + tmr->it_sigqueue_seq = tmr->it_signal_seq; + + /* + * Set the signal delivery status under sighand lock, so that the + * ignored signal handling can distinguish between a periodic and a + * non-periodic timer. + */ + tmr->it_sig_periodic = tmr->it_status == POSIX_TIMER_REQUEUE_PENDING; + + if (!prepare_signal(sig, t, false)) { + result = TRACE_SIGNAL_IGNORED; + + if (!list_empty(&q->list)) { + /* + * The signal was ignored and blocked. The timer + * expiry queued it because blocked signals are + * queued independent of the ignored state. + * + * The unblocking set SIGPENDING, but the signal + * was not yet dequeued from the pending list. + * So prepare_signal() sees unblocked and ignored, + * which ends up here. Leave it queued like a + * regular signal. + * + * The same happens when the task group is exiting + * and the signal is already queued. + * prepare_signal() treats SIGNAL_GROUP_EXIT as + * ignored independent of its queued state. This + * gets cleaned up in __exit_signal(). + */ + goto out; + } + + /* Periodic timers with SIG_IGN are queued on the ignored list */ + if (tmr->it_sig_periodic) { + /* + * Already queued means the timer was rearmed after + * the previous expiry got it on the ignore list. + * Nothing to do for that case. + */ + if (hlist_unhashed(&tmr->ignored_list)) { + /* + * Take a signal reference and queue it on + * the ignored list. + */ + posixtimer_sigqueue_getref(q); + posixtimer_sig_ignore(t, q); + } + } else if (!hlist_unhashed(&tmr->ignored_list)) { + /* + * Covers the case where a timer was periodic and + * then the signal was ignored. Later it was rearmed + * as oneshot timer. The previous signal is invalid + * now, and this oneshot signal has to be dropped. + * Remove it from the ignored list and drop the + * reference count as the signal is not longer + * queued. + */ + hlist_del_init(&tmr->ignored_list); + posixtimer_putref(tmr); + } goto out; + } - ret = 0; if (unlikely(!list_empty(&q->list))) { - /* - * If an SI_TIMER entry is already queue just increment - * the overrun count. - */ - BUG_ON(q->info.si_code != SI_TIMER); - q->info.si_overrun++; + /* This holds a reference count already */ result = TRACE_SIGNAL_ALREADY_PENDING; goto out; } - q->info.si_overrun = 0; - signalfd_notify(t, sig); - pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending; - list_add_tail(&q->list, &pending->list); - sigaddset(&pending->signal, sig); - complete_signal(sig, t, type); + /* + * If the signal is on the ignore list, it got blocked after it was + * ignored earlier. But nothing lifted the ignore. Move it back to + * the pending list to be consistent with the regular signal + * handling. This already holds a reference count. + * + * If it's not on the ignore list acquire a reference count. + */ + if (likely(hlist_unhashed(&tmr->ignored_list))) + posixtimer_sigqueue_getref(q); + else + hlist_del_init(&tmr->ignored_list); + + posixtimer_queue_sigqueue(q, t, tmr->it_pid_type); result = TRACE_SIGNAL_DELIVERED; out: - trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result); + trace_signal_generate(sig, &q->info, t, tmr->it_pid_type != PIDTYPE_PID, result); unlock_task_sighand(t, &flags); -ret: - rcu_read_unlock(); - return ret; +} + +static inline void posixtimer_sig_ignore(struct task_struct *tsk, struct sigqueue *q) +{ + struct k_itimer *tmr = container_of(q, struct k_itimer, sigq); + + /* + * If the timer is marked deleted already or the signal originates + * from a non-periodic timer, then just drop the reference + * count. Otherwise queue it on the ignored list. + */ + if (posixtimer_valid(tmr) && tmr->it_sig_periodic) + hlist_add_head(&tmr->ignored_list, &tsk->signal->ignored_posix_timers); + else + posixtimer_putref(tmr); +} + +static void posixtimer_sig_unignore(struct task_struct *tsk, int sig) +{ + struct hlist_head *head = &tsk->signal->ignored_posix_timers; + struct hlist_node *tmp; + struct k_itimer *tmr; + + if (likely(hlist_empty(head))) + return; + + /* + * Rearming a timer with sighand lock held is not possible due to + * lock ordering vs. tmr::it_lock. Just stick the sigqueue back and + * let the signal delivery path deal with it whether it needs to be + * rearmed or not. This cannot be decided here w/o dropping sighand + * lock and creating a loop retry horror show. + */ + hlist_for_each_entry_safe(tmr, tmp , head, ignored_list) { + struct task_struct *target; + + /* + * tmr::sigq.info.si_signo is immutable, so accessing it + * without holding tmr::it_lock is safe. + */ + if (tmr->sigq.info.si_signo != sig) + continue; + + hlist_del_init(&tmr->ignored_list); + + /* This should never happen and leaks a reference count */ + if (WARN_ON_ONCE(!list_empty(&tmr->sigq.list))) + continue; + + /* + * Get the target for the signal. If target is a thread and + * has exited by now, drop the reference count. + */ + guard(rcu)(); + target = posixtimer_get_target(tmr); + if (target) + posixtimer_queue_sigqueue(&tmr->sigq, target, tmr->it_pid_type); + else + posixtimer_putref(tmr); + } +} +#else /* CONFIG_POSIX_TIMERS */ +static inline void posixtimer_sig_ignore(struct task_struct *tsk, struct sigqueue *q) { } +static inline void posixtimer_sig_unignore(struct task_struct *tsk, int sig) { } +#endif /* !CONFIG_POSIX_TIMERS */ + +void do_notify_pidfd(struct task_struct *task) +{ + struct pid *pid = task_pid(task); + + WARN_ON(task->exit_state == 0); + + __wake_up(&pid->wait_pidfd, TASK_NORMAL, 0, + poll_to_key(EPOLLIN | EPOLLRDNORM)); } /* @@ -1772,20 +2173,23 @@ bool do_notify_parent(struct task_struct *tsk, int sig) bool autoreap = false; u64 utime, stime; - BUG_ON(sig == -1); + WARN_ON_ONCE(sig == -1); - /* do_notify_parent_cldstop should have been called instead. */ - BUG_ON(task_is_stopped_or_traced(tsk)); + /* do_notify_parent_cldstop should have been called instead. */ + WARN_ON_ONCE(task_is_stopped_or_traced(tsk)); - BUG_ON(!tsk->ptrace && + WARN_ON_ONCE(!tsk->ptrace && (tsk->group_leader != tsk || !thread_group_empty(tsk))); + /* ptraced, or group-leader without sub-threads */ + do_notify_pidfd(tsk); + if (sig != SIGCHLD) { /* * This is only possible if parent == real_parent. * Check if it has changed security domain. */ - if (tsk->parent_exec_id != tsk->parent->self_exec_id) + if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id)) sig = SIGCHLD; } @@ -1847,8 +2251,12 @@ bool do_notify_parent(struct task_struct *tsk, int sig) if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) sig = 0; } + /* + * Send with __send_signal as si_pid and si_uid are in the + * parent's namespaces. + */ if (valid_signal(sig) && sig) - __group_send_sig_info(sig, &info, tsk->parent); + __send_signal_locked(sig, &info, tsk->parent, PIDTYPE_TGID, false); __wake_up_parent(tsk, tsk->parent); spin_unlock_irqrestore(&psig->siglock, flags); @@ -1918,7 +2326,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, spin_lock_irqsave(&sighand->siglock, flags); if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) - __group_send_sig_info(SIGCHLD, &info, parent); + send_signal_locked(SIGCHLD, &info, parent, PIDTYPE_TGID); /* * Even if SIGCHLD is not generated, we must wake up wait4 calls. */ @@ -1926,40 +2334,6 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, spin_unlock_irqrestore(&sighand->siglock, flags); } -static inline bool may_ptrace_stop(void) -{ - if (!likely(current->ptrace)) - return false; - /* - * Are we in the middle of do_coredump? - * If so and our tracer is also part of the coredump stopping - * is a deadlock situation, and pointless because our tracer - * is dead so don't allow us to stop. - * If SIGKILL was already sent before the caller unlocked - * ->siglock we must see ->core_state != NULL. Otherwise it - * is safe to enter schedule(). - * - * This is almost outdated, a task with the pending SIGKILL can't - * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported - * after SIGKILL was already dequeued. - */ - if (unlikely(current->mm->core_state) && - unlikely(current->mm == current->parent->mm)) - return false; - - return true; -} - -/* - * Return non-zero if there is a SIGKILL that should be waking us up. - * Called with the siglock held. - */ -static bool sigkill_pending(struct task_struct *tsk) -{ - return sigismember(&tsk->pending.signal, SIGKILL) || - sigismember(&tsk->signal->shared_pending.signal, SIGKILL); -} - /* * This must be called with current->sighand->siglock held. * @@ -1968,16 +2342,18 @@ static bool sigkill_pending(struct task_struct *tsk) * That makes it a way to test a stopped process for * being ptrace-stopped vs being job-control-stopped. * - * If we actually decide not to stop at all because the tracer - * is gone, we keep current->exit_code unless clear_code. + * Returns the signal the ptracer requested the code resume + * with. If the code did not stop because the tracer is gone, + * the stop signal remains unchanged unless clear_code. */ -static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info) +static int ptrace_stop(int exit_code, int why, unsigned long message, + kernel_siginfo_t *info) __releases(¤t->sighand->siglock) __acquires(¤t->sighand->siglock) { bool gstop_done = false; - if (arch_ptrace_stop_needed(exit_code, info)) { + if (arch_ptrace_stop_needed()) { /* * The arch code has something special to do before a * ptrace stop. This is allowed to block, e.g. for faults @@ -1985,18 +2361,23 @@ static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t * calling arch_ptrace_stop, so we must release it now. * To preserve proper semantics, we must do this before * any signal bookkeeping like checking group_stop_count. - * Meanwhile, a SIGKILL could come in before we retake the - * siglock. That must prevent us from sleeping in TASK_TRACED. - * So after regaining the lock, we must check for SIGKILL. */ spin_unlock_irq(¤t->sighand->siglock); - arch_ptrace_stop(exit_code, info); + arch_ptrace_stop(); spin_lock_irq(¤t->sighand->siglock); - if (sigkill_pending(current)) - return; } + /* + * After this point ptrace_signal_wake_up or signal_wake_up + * will clear TASK_TRACED if ptrace_unlink happens or a fatal + * signal comes in. Handle previous ptrace_unlinks and fatal + * signals here to prevent ptrace_stop sleeping in schedule. + */ + if (!current->ptrace || __fatal_signal_pending(current)) + return exit_code; + set_special_state(TASK_TRACED); + current->jobctl |= JOBCTL_TRACED; /* * We're committing to trapping. TRACED should be visible before @@ -2018,6 +2399,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t */ smp_wmb(); + current->ptrace_message = message; current->last_siginfo = info; current->exit_code = exit_code; @@ -2041,51 +2423,56 @@ static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t spin_unlock_irq(¤t->sighand->siglock); read_lock(&tasklist_lock); - if (may_ptrace_stop()) { - /* - * Notify parents of the stop. - * - * While ptraced, there are two parents - the ptracer and - * the real_parent of the group_leader. The ptracer should - * know about every stop while the real parent is only - * interested in the completion of group stop. The states - * for the two don't interact with each other. Notify - * separately unless they're gonna be duplicates. - */ + /* + * Notify parents of the stop. + * + * While ptraced, there are two parents - the ptracer and + * the real_parent of the group_leader. The ptracer should + * know about every stop while the real parent is only + * interested in the completion of group stop. The states + * for the two don't interact with each other. Notify + * separately unless they're gonna be duplicates. + */ + if (current->ptrace) do_notify_parent_cldstop(current, true, why); - if (gstop_done && ptrace_reparented(current)) - do_notify_parent_cldstop(current, false, why); + if (gstop_done && (!current->ptrace || ptrace_reparented(current))) + do_notify_parent_cldstop(current, false, why); - /* - * Don't want to allow preemption here, because - * sys_ptrace() needs this task to be inactive. - * - * XXX: implement read_unlock_no_resched(). - */ + /* + * The previous do_notify_parent_cldstop() invocation woke ptracer. + * One a PREEMPTION kernel this can result in preemption requirement + * which will be fulfilled after read_unlock() and the ptracer will be + * put on the CPU. + * The ptracer is in wait_task_inactive(, __TASK_TRACED) waiting for + * this task wait in schedule(). If this task gets preempted then it + * remains enqueued on the runqueue. The ptracer will observe this and + * then sleep for a delay of one HZ tick. In the meantime this task + * gets scheduled, enters schedule() and will wait for the ptracer. + * + * This preemption point is not bad from a correctness point of + * view but extends the runtime by one HZ tick time due to the + * ptracer's sleep. The preempt-disable section ensures that there + * will be no preemption between unlock and schedule() and so + * improving the performance since the ptracer will observe that + * the tracee is scheduled out once it gets on the CPU. + * + * On PREEMPT_RT locking tasklist_lock does not disable preemption. + * Therefore the task can be preempted after do_notify_parent_cldstop() + * before unlocking tasklist_lock so there is no benefit in doing this. + * + * In fact disabling preemption is harmful on PREEMPT_RT because + * the spinlock_t in cgroup_enter_frozen() must not be acquired + * with preemption disabled due to the 'sleeping' spinlock + * substitution of RT. + */ + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) preempt_disable(); - read_unlock(&tasklist_lock); + read_unlock(&tasklist_lock); + cgroup_enter_frozen(); + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) preempt_enable_no_resched(); - freezable_schedule(); - } else { - /* - * By the time we got the lock, our tracer went away. - * Don't drop the lock yet, another tracer may come. - * - * If @gstop_done, the ptracer went away between group stop - * completion and here. During detach, it would have set - * JOBCTL_STOP_PENDING on us and we'll re-enter - * TASK_STOPPED in do_signal_stop() on return, so notifying - * the real parent of the group stop completion is enough. - */ - if (gstop_done) - do_notify_parent_cldstop(current, false, why); - - /* tasklist protects us from ptrace_freeze_traced() */ - __set_current_state(TASK_RUNNING); - if (clear_code) - current->exit_code = 0; - read_unlock(&tasklist_lock); - } + schedule(); + cgroup_leave_frozen(true); /* * We are back. Now reacquire the siglock before touching @@ -2093,10 +2480,13 @@ static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t * any signal-sending on another CPU that wants to examine it. */ spin_lock_irq(¤t->sighand->siglock); + exit_code = current->exit_code; current->last_siginfo = NULL; + current->ptrace_message = 0; + current->exit_code = 0; /* LISTENING can be set only during STOP traps, clear it */ - current->jobctl &= ~JOBCTL_LISTENING; + current->jobctl &= ~(JOBCTL_LISTENING | JOBCTL_PTRACE_FROZEN); /* * Queued signals ignored us while we were stopped for tracing. @@ -2104,9 +2494,10 @@ static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t * This sets TIF_SIGPENDING, but never clears it. */ recalc_sigpending_tsk(current); + return exit_code; } -static void ptrace_do_notify(int signr, int exit_code, int why) +static int ptrace_do_notify(int signr, int exit_code, int why, unsigned long message) { kernel_siginfo_t info; @@ -2117,18 +2508,21 @@ static void ptrace_do_notify(int signr, int exit_code, int why) info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); /* Let the debugger run. */ - ptrace_stop(exit_code, why, 1, &info); + return ptrace_stop(exit_code, why, message, &info); } -void ptrace_notify(int exit_code) +int ptrace_notify(int exit_code, unsigned long message) { + int signr; + BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); - if (unlikely(current->task_works)) + if (unlikely(task_work_pending(current))) task_work_run(); spin_lock_irq(¤t->sighand->siglock); - ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED); + signr = ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED, message); spin_unlock_irq(¤t->sighand->siglock); + return signr; } /** @@ -2166,7 +2560,8 @@ static bool do_signal_stop(int signr) WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK); if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) || - unlikely(signal_group_exit(sig))) + unlikely(sig->flags & SIGNAL_GROUP_EXIT) || + unlikely(sig->group_exec_task)) return false; /* * There is no group stop already in progress. We must @@ -2191,12 +2586,10 @@ static bool do_signal_stop(int signr) sig->group_exit_code = signr; sig->group_stop_count = 0; - if (task_set_jobctl_pending(current, signr | gstop)) sig->group_stop_count++; - t = current; - while_each_thread(current, t) { + for_other_threads(current, t) { /* * Setting state to TASK_STOPPED for a group * stop is always done with the siglock held, @@ -2224,6 +2617,7 @@ static bool do_signal_stop(int signr) if (task_participate_group_stop(current)) notify = CLD_STOPPED; + current->jobctl |= JOBCTL_STOPPED; set_special_state(TASK_STOPPED); spin_unlock_irq(¤t->sighand->siglock); @@ -2243,7 +2637,8 @@ static bool do_signal_stop(int signr) } /* Now we don't run again until woken by SIGCONT or SIGKILL */ - freezable_schedule(); + cgroup_enter_frozen(); + schedule(); return true; } else { /* @@ -2281,15 +2676,59 @@ static void do_jobctl_trap(void) signr = SIGTRAP; WARN_ON_ONCE(!signr); ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8), - CLD_STOPPED); + CLD_STOPPED, 0); } else { WARN_ON_ONCE(!signr); ptrace_stop(signr, CLD_STOPPED, 0, NULL); - current->exit_code = 0; } } -static int ptrace_signal(int signr, kernel_siginfo_t *info) +/** + * do_freezer_trap - handle the freezer jobctl trap + * + * Puts the task into frozen state, if only the task is not about to quit. + * In this case it drops JOBCTL_TRAP_FREEZE. + * + * CONTEXT: + * Must be called with @current->sighand->siglock held, + * which is always released before returning. + */ +static void do_freezer_trap(void) + __releases(¤t->sighand->siglock) +{ + /* + * If there are other trap bits pending except JOBCTL_TRAP_FREEZE, + * let's make another loop to give it a chance to be handled. + * In any case, we'll return back. + */ + if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) != + JOBCTL_TRAP_FREEZE) { + spin_unlock_irq(¤t->sighand->siglock); + return; + } + + /* + * Now we're sure that there is no pending fatal signal and no + * pending traps. Clear TIF_SIGPENDING to not get out of schedule() + * immediately (if there is a non-fatal signal pending), and + * put the task into sleep. + */ + __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE); + clear_thread_flag(TIF_SIGPENDING); + spin_unlock_irq(¤t->sighand->siglock); + cgroup_enter_frozen(); + schedule(); + + /* + * We could've been woken by task_work, run it to clear + * TIF_NOTIFY_SIGNAL. The caller will retry if necessary. + */ + clear_notify_signal(); + if (unlikely(task_work_pending(current))) + task_work_run(); +} + +static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type) { /* * We do not check sig_kernel_stop(signr) but set this marker @@ -2301,15 +2740,12 @@ static int ptrace_signal(int signr, kernel_siginfo_t *info) * comment in dequeue_signal(). */ current->jobctl |= JOBCTL_STOP_DEQUEUED; - ptrace_stop(signr, CLD_TRAPPED, 0, info); + signr = ptrace_stop(signr, CLD_TRAPPED, 0, info); /* We're back. Did the debugger cancel the sig? */ - signr = current->exit_code; if (signr == 0) return signr; - current->exit_code = 0; - /* * Update the siginfo structure if the signal has * changed. If the debugger wanted something @@ -2329,23 +2765,50 @@ static int ptrace_signal(int signr, kernel_siginfo_t *info) } /* If the (new) signal is now blocked, requeue it. */ - if (sigismember(¤t->blocked, signr)) { - send_signal(signr, info, current, PIDTYPE_PID); + if (sigismember(¤t->blocked, signr) || + fatal_signal_pending(current)) { + send_signal_locked(signr, info, current, type); signr = 0; } return signr; } +static void hide_si_addr_tag_bits(struct ksignal *ksig) +{ + switch (siginfo_layout(ksig->sig, ksig->info.si_code)) { + case SIL_FAULT: + case SIL_FAULT_TRAPNO: + case SIL_FAULT_MCEERR: + case SIL_FAULT_BNDERR: + case SIL_FAULT_PKUERR: + case SIL_FAULT_PERF_EVENT: + ksig->info.si_addr = arch_untagged_si_addr( + ksig->info.si_addr, ksig->sig, ksig->info.si_code); + break; + case SIL_KILL: + case SIL_TIMER: + case SIL_POLL: + case SIL_CHLD: + case SIL_RT: + case SIL_SYS: + break; + } +} + bool get_signal(struct ksignal *ksig) { struct sighand_struct *sighand = current->sighand; struct signal_struct *signal = current->signal; int signr; - if (unlikely(current->task_works)) + clear_notify_signal(); + if (unlikely(task_work_pending(current))) task_work_run(); + if (!task_sigpending(current)) + return false; + if (unlikely(uprobe_deny_signal())) return false; @@ -2358,6 +2821,7 @@ bool get_signal(struct ksignal *ksig) relock: spin_lock_irq(&sighand->siglock); + /* * Every stopped thread goes here after wakeup. Check to see if * we should notify the parent, prepare_signal(SIGCONT) encodes @@ -2396,24 +2860,65 @@ relock: for (;;) { struct k_sigaction *ka; + enum pid_type type; + + /* Has this task already been marked for death? */ + if ((signal->flags & SIGNAL_GROUP_EXIT) || + signal->group_exec_task) { + signr = SIGKILL; + sigdelset(¤t->pending.signal, SIGKILL); + trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO, + &sighand->action[SIGKILL-1]); + recalc_sigpending(); + /* + * implies do_group_exit() or return to PF_USER_WORKER, + * no need to initialize ksig->info/etc. + */ + goto fatal; + } if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) && do_signal_stop(0)) goto relock; - if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) { - do_jobctl_trap(); + if (unlikely(current->jobctl & + (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) { + if (current->jobctl & JOBCTL_TRAP_MASK) { + do_jobctl_trap(); + spin_unlock_irq(&sighand->siglock); + } else if (current->jobctl & JOBCTL_TRAP_FREEZE) + do_freezer_trap(); + + goto relock; + } + + /* + * If the task is leaving the frozen state, let's update + * cgroup counters and reset the frozen bit. + */ + if (unlikely(cgroup_task_frozen(current))) { spin_unlock_irq(&sighand->siglock); + cgroup_leave_frozen(false); goto relock; } - signr = dequeue_signal(current, ¤t->blocked, &ksig->info); + /* + * Signals generated by the execution of an instruction + * need to be delivered before any other pending signals + * so that the instruction pointer in the signal stack + * frame points to the faulting instruction. + */ + type = PIDTYPE_PID; + signr = dequeue_synchronous_signal(&ksig->info); + if (!signr) + signr = dequeue_signal(¤t->blocked, &ksig->info, &type); if (!signr) break; /* will return 0 */ - if (unlikely(current->ptrace) && signr != SIGKILL) { - signr = ptrace_signal(signr, &ksig->info); + if (unlikely(current->ptrace) && (signr != SIGKILL) && + !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) { + signr = ptrace_signal(signr, &ksig->info, type); if (!signr) continue; } @@ -2477,7 +2982,7 @@ relock: spin_lock_irq(&sighand->siglock); } - if (likely(do_signal_stop(ksig->info.si_signo))) { + if (likely(do_signal_stop(signr))) { /* It released the siglock. */ goto relock; } @@ -2489,7 +2994,10 @@ relock: continue; } + fatal: spin_unlock_irq(&sighand->siglock); + if (unlikely(cgroup_task_frozen(current))) + cgroup_leave_frozen(true); /* * Anything else is fatal, maybe with a core dump. @@ -2498,7 +3006,7 @@ relock: if (sig_kernel_coredump(signr)) { if (print_fatal_signals) - print_fatal_signal(ksig->info.si_signo); + print_fatal_signal(signr); proc_coredump_connector(current); /* * If it was able to dump core, this kills all @@ -2508,29 +3016,42 @@ relock: * first and our do_group_exit call below will use * that value and ignore the one we pass it. */ - do_coredump(&ksig->info); + vfs_coredump(&ksig->info); } /* + * PF_USER_WORKER threads will catch and exit on fatal signals + * themselves. They have cleanup that must be performed, so we + * cannot call do_exit() on their behalf. Note that ksig won't + * be properly initialized, PF_USER_WORKER's shouldn't use it. + */ + if (current->flags & PF_USER_WORKER) + goto out; + + /* * Death signals, no core dump. */ - do_group_exit(ksig->info.si_signo); + do_group_exit(signr); /* NOTREACHED */ } spin_unlock_irq(&sighand->siglock); ksig->sig = signr; - return ksig->sig > 0; + + if (signr && !(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS)) + hide_si_addr_tag_bits(ksig); +out: + return signr > 0; } /** - * signal_delivered - + * signal_delivered - called after signal delivery to update blocked signals * @ksig: kernel signal struct * @stepping: nonzero if debugger single-step or block-step in use * * This function should be called when a signal has successfully been * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask - * is always blocked, and the signal itself is blocked unless %SA_NODEFER + * is always blocked), and the signal itself is blocked unless %SA_NODEFER * is set in @ksig->ka.sa.sa_flags. Tracing is notified. */ static void signal_delivered(struct ksignal *ksig, int stepping) @@ -2547,13 +3068,16 @@ static void signal_delivered(struct ksignal *ksig, int stepping) if (!(ksig->ka.sa.sa_flags & SA_NODEFER)) sigaddset(&blocked, ksig->sig); set_current_blocked(&blocked); - tracehook_signal_handler(stepping); + if (current->sas_ss_flags & SS_AUTODISARM) + sas_ss_reset(current); + if (stepping) + ptrace_notify(SIGTRAP, 0); } void signal_setup_done(int failed, struct ksignal *ksig, int stepping) { if (failed) - force_sigsegv(ksig->sig, current); + force_sigsegv(ksig->sig); else signal_delivered(ksig, stepping); } @@ -2572,8 +3096,7 @@ static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which) if (sigisemptyset(&retarget)) return; - t = tsk; - while_each_thread(tsk, t) { + for_other_threads(tsk, t) { if (t->flags & PF_EXITING) continue; @@ -2582,7 +3105,7 @@ static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which) /* Remove the signals this thread can handle. */ sigandsets(&retarget, &retarget, &t->blocked); - if (!signal_pending(t)) + if (!task_sigpending(t)) signal_wake_up(t, 0); if (sigisemptyset(&retarget)) @@ -2601,7 +3124,7 @@ void exit_signals(struct task_struct *tsk) */ cgroup_threadgroup_change_begin(tsk); - if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { + if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) { tsk->flags |= PF_EXITING; cgroup_threadgroup_change_end(tsk); return; @@ -2616,7 +3139,7 @@ void exit_signals(struct task_struct *tsk) cgroup_threadgroup_change_end(tsk); - if (!signal_pending(tsk)) + if (!task_sigpending(tsk)) goto out; unblocked = tsk->blocked; @@ -2660,7 +3183,7 @@ long do_no_restart_syscall(struct restart_block *param) static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset) { - if (signal_pending(tsk) && !thread_group_empty(tsk)) { + if (task_sigpending(tsk) && !thread_group_empty(tsk)) { sigset_t newblocked; /* A set of now blocked but previously unblocked signals. */ sigandnsets(&newblocked, newset, ¤t->blocked); @@ -2740,79 +3263,49 @@ EXPORT_SYMBOL(sigprocmask); * * This is useful for syscalls such as ppoll, pselect, io_pgetevents and * epoll_pwait where a new sigmask is passed from userland for the syscalls. + * + * Note that it does set_restore_sigmask() in advance, so it must be always + * paired with restore_saved_sigmask_unless() before return from syscall. */ -int set_user_sigmask(const sigset_t __user *usigmask, sigset_t *set, - sigset_t *oldset, size_t sigsetsize) +int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize) { - if (!usigmask) - return 0; + sigset_t kmask; + if (!umask) + return 0; if (sigsetsize != sizeof(sigset_t)) return -EINVAL; - if (copy_from_user(set, usigmask, sizeof(sigset_t))) + if (copy_from_user(&kmask, umask, sizeof(sigset_t))) return -EFAULT; - *oldset = current->blocked; - set_current_blocked(set); + set_restore_sigmask(); + current->saved_sigmask = current->blocked; + set_current_blocked(&kmask); return 0; } -EXPORT_SYMBOL(set_user_sigmask); #ifdef CONFIG_COMPAT -int set_compat_user_sigmask(const compat_sigset_t __user *usigmask, - sigset_t *set, sigset_t *oldset, +int set_compat_user_sigmask(const compat_sigset_t __user *umask, size_t sigsetsize) { - if (!usigmask) - return 0; + sigset_t kmask; + if (!umask) + return 0; if (sigsetsize != sizeof(compat_sigset_t)) return -EINVAL; - if (get_compat_sigset(set, usigmask)) + if (get_compat_sigset(&kmask, umask)) return -EFAULT; - *oldset = current->blocked; - set_current_blocked(set); + set_restore_sigmask(); + current->saved_sigmask = current->blocked; + set_current_blocked(&kmask); return 0; } -EXPORT_SYMBOL(set_compat_user_sigmask); #endif -/* - * restore_user_sigmask: - * usigmask: sigmask passed in from userland. - * sigsaved: saved sigmask when the syscall started and changed the sigmask to - * usigmask. - * - * This is useful for syscalls such as ppoll, pselect, io_pgetevents and - * epoll_pwait where a new sigmask is passed in from userland for the syscalls. - */ -void restore_user_sigmask(const void __user *usigmask, sigset_t *sigsaved) -{ - - if (!usigmask) - return; - /* - * When signals are pending, do not restore them here. - * Restoring sigmask here can lead to delivering signals that the above - * syscalls are intended to block because of the sigmask passed in. - */ - if (signal_pending(current)) { - current->saved_sigmask = *sigsaved; - set_restore_sigmask(); - return; - } - - /* - * This is needed because the fast syscall return path does not restore - * saved_sigmask when signals are not pending. - */ - set_current_blocked(sigsaved); -} -EXPORT_SYMBOL(restore_user_sigmask); - /** * sys_rt_sigprocmask - change the list of currently blocked signals * @how: whether to add, remove, or set signals @@ -2974,6 +3467,15 @@ enum siginfo_layout siginfo_layout(unsigned sig, int si_code) else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR)) layout = SIL_FAULT_PKUERR; #endif + else if ((sig == SIGTRAP) && (si_code == TRAP_PERF)) + layout = SIL_FAULT_PERF_EVENT; + else if (IS_ENABLED(CONFIG_SPARC) && + (sig == SIGILL) && (si_code == ILL_ILLTRP)) + layout = SIL_FAULT_TRAPNO; + else if (IS_ENABLED(CONFIG_ALPHA) && + ((sig == SIGFPE) || + ((sig == SIGTRAP) && (si_code == TRAP_UNK)))) + layout = SIL_FAULT_TRAPNO; } else if (si_code <= NSIGPOLL) layout = SIL_POLL; @@ -3043,94 +3545,92 @@ int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from) } #ifdef CONFIG_COMPAT -int copy_siginfo_to_user32(struct compat_siginfo __user *to, - const struct kernel_siginfo *from) -#if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION) -{ - return __copy_siginfo_to_user32(to, from, in_x32_syscall()); -} -int __copy_siginfo_to_user32(struct compat_siginfo __user *to, - const struct kernel_siginfo *from, bool x32_ABI) -#endif +/** + * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo + * @to: compat siginfo destination + * @from: kernel siginfo source + * + * Note: This function does not work properly for the SIGCHLD on x32, but + * fortunately it doesn't have to. The only valid callers for this function are + * copy_siginfo_to_user32, which is overriden for x32 and the coredump code. + * The latter does not care because SIGCHLD will never cause a coredump. + */ +void copy_siginfo_to_external32(struct compat_siginfo *to, + const struct kernel_siginfo *from) { - struct compat_siginfo new; - memset(&new, 0, sizeof(new)); + memset(to, 0, sizeof(*to)); - new.si_signo = from->si_signo; - new.si_errno = from->si_errno; - new.si_code = from->si_code; + to->si_signo = from->si_signo; + to->si_errno = from->si_errno; + to->si_code = from->si_code; switch(siginfo_layout(from->si_signo, from->si_code)) { case SIL_KILL: - new.si_pid = from->si_pid; - new.si_uid = from->si_uid; + to->si_pid = from->si_pid; + to->si_uid = from->si_uid; break; case SIL_TIMER: - new.si_tid = from->si_tid; - new.si_overrun = from->si_overrun; - new.si_int = from->si_int; + to->si_tid = from->si_tid; + to->si_overrun = from->si_overrun; + to->si_int = from->si_int; break; case SIL_POLL: - new.si_band = from->si_band; - new.si_fd = from->si_fd; + to->si_band = from->si_band; + to->si_fd = from->si_fd; break; case SIL_FAULT: - new.si_addr = ptr_to_compat(from->si_addr); -#ifdef __ARCH_SI_TRAPNO - new.si_trapno = from->si_trapno; -#endif + to->si_addr = ptr_to_compat(from->si_addr); + break; + case SIL_FAULT_TRAPNO: + to->si_addr = ptr_to_compat(from->si_addr); + to->si_trapno = from->si_trapno; break; case SIL_FAULT_MCEERR: - new.si_addr = ptr_to_compat(from->si_addr); -#ifdef __ARCH_SI_TRAPNO - new.si_trapno = from->si_trapno; -#endif - new.si_addr_lsb = from->si_addr_lsb; + to->si_addr = ptr_to_compat(from->si_addr); + to->si_addr_lsb = from->si_addr_lsb; break; case SIL_FAULT_BNDERR: - new.si_addr = ptr_to_compat(from->si_addr); -#ifdef __ARCH_SI_TRAPNO - new.si_trapno = from->si_trapno; -#endif - new.si_lower = ptr_to_compat(from->si_lower); - new.si_upper = ptr_to_compat(from->si_upper); + to->si_addr = ptr_to_compat(from->si_addr); + to->si_lower = ptr_to_compat(from->si_lower); + to->si_upper = ptr_to_compat(from->si_upper); break; case SIL_FAULT_PKUERR: - new.si_addr = ptr_to_compat(from->si_addr); -#ifdef __ARCH_SI_TRAPNO - new.si_trapno = from->si_trapno; -#endif - new.si_pkey = from->si_pkey; + to->si_addr = ptr_to_compat(from->si_addr); + to->si_pkey = from->si_pkey; + break; + case SIL_FAULT_PERF_EVENT: + to->si_addr = ptr_to_compat(from->si_addr); + to->si_perf_data = from->si_perf_data; + to->si_perf_type = from->si_perf_type; + to->si_perf_flags = from->si_perf_flags; break; case SIL_CHLD: - new.si_pid = from->si_pid; - new.si_uid = from->si_uid; - new.si_status = from->si_status; -#ifdef CONFIG_X86_X32_ABI - if (x32_ABI) { - new._sifields._sigchld_x32._utime = from->si_utime; - new._sifields._sigchld_x32._stime = from->si_stime; - } else -#endif - { - new.si_utime = from->si_utime; - new.si_stime = from->si_stime; - } + to->si_pid = from->si_pid; + to->si_uid = from->si_uid; + to->si_status = from->si_status; + to->si_utime = from->si_utime; + to->si_stime = from->si_stime; break; case SIL_RT: - new.si_pid = from->si_pid; - new.si_uid = from->si_uid; - new.si_int = from->si_int; + to->si_pid = from->si_pid; + to->si_uid = from->si_uid; + to->si_int = from->si_int; break; case SIL_SYS: - new.si_call_addr = ptr_to_compat(from->si_call_addr); - new.si_syscall = from->si_syscall; - new.si_arch = from->si_arch; + to->si_call_addr = ptr_to_compat(from->si_call_addr); + to->si_syscall = from->si_syscall; + to->si_arch = from->si_arch; break; } +} +int __copy_siginfo_to_user32(struct compat_siginfo __user *to, + const struct kernel_siginfo *from) +{ + struct compat_siginfo new; + + copy_siginfo_to_external32(&new, from); if (copy_to_user(to, &new, sizeof(struct compat_siginfo))) return -EFAULT; - return 0; } @@ -3157,32 +3657,30 @@ static int post_copy_siginfo_from_user32(kernel_siginfo_t *to, break; case SIL_FAULT: to->si_addr = compat_ptr(from->si_addr); -#ifdef __ARCH_SI_TRAPNO + break; + case SIL_FAULT_TRAPNO: + to->si_addr = compat_ptr(from->si_addr); to->si_trapno = from->si_trapno; -#endif break; case SIL_FAULT_MCEERR: to->si_addr = compat_ptr(from->si_addr); -#ifdef __ARCH_SI_TRAPNO - to->si_trapno = from->si_trapno; -#endif to->si_addr_lsb = from->si_addr_lsb; break; case SIL_FAULT_BNDERR: to->si_addr = compat_ptr(from->si_addr); -#ifdef __ARCH_SI_TRAPNO - to->si_trapno = from->si_trapno; -#endif to->si_lower = compat_ptr(from->si_lower); to->si_upper = compat_ptr(from->si_upper); break; case SIL_FAULT_PKUERR: to->si_addr = compat_ptr(from->si_addr); -#ifdef __ARCH_SI_TRAPNO - to->si_trapno = from->si_trapno; -#endif to->si_pkey = from->si_pkey; break; + case SIL_FAULT_PERF_EVENT: + to->si_addr = compat_ptr(from->si_addr); + to->si_perf_data = from->si_perf_data; + to->si_perf_type = from->si_perf_type; + to->si_perf_flags = from->si_perf_flags; + break; case SIL_CHLD: to->si_pid = from->si_pid; to->si_uid = from->si_uid; @@ -3248,6 +3746,7 @@ static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info, ktime_t *to = NULL, timeout = KTIME_MAX; struct task_struct *tsk = current; sigset_t mask = *which; + enum pid_type type; int sig, ret = 0; if (ts) { @@ -3264,7 +3763,7 @@ static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info, signotset(&mask); spin_lock_irq(&tsk->sighand->siglock); - sig = dequeue_signal(tsk, &mask, info); + sig = dequeue_signal(&mask, info, &type); if (!sig && timeout) { /* * None ready, temporarily unblock those we're interested @@ -3277,13 +3776,13 @@ static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info, recalc_sigpending(); spin_unlock_irq(&tsk->sighand->siglock); - __set_current_state(TASK_INTERRUPTIBLE); - ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns, - HRTIMER_MODE_REL); + __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE); + ret = schedule_hrtimeout_range(to, tsk->timer_slack_ns, + HRTIMER_MODE_REL); spin_lock_irq(&tsk->sighand->siglock); __set_task_blocked(tsk, &tsk->real_blocked); sigemptyset(&tsk->real_blocked); - sig = dequeue_signal(tsk, &mask, info); + sig = dequeue_signal(&mask, info, &type); } spin_unlock_irq(&tsk->sighand->siglock); @@ -3397,7 +3896,7 @@ COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese, } #ifdef CONFIG_COMPAT_32BIT_TIME -COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese, +COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese, struct compat_siginfo __user *, uinfo, struct old_timespec32 __user *, uts, compat_size_t, sigsetsize) { @@ -3429,6 +3928,17 @@ COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese, #endif #endif +static void prepare_kill_siginfo(int sig, struct kernel_siginfo *info, + enum pid_type type) +{ + clear_siginfo(info); + info->si_signo = sig; + info->si_errno = 0; + info->si_code = (type == PIDTYPE_PID) ? SI_TKILL : SI_USER; + info->si_pid = task_tgid_vnr(current); + info->si_uid = from_kuid_munged(current_user_ns(), current_uid()); +} + /** * sys_kill - send a signal to a process * @pid: the PID of the process @@ -3438,16 +3948,171 @@ SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) { struct kernel_siginfo info; - clear_siginfo(&info); - info.si_signo = sig; - info.si_errno = 0; - info.si_code = SI_USER; - info.si_pid = task_tgid_vnr(current); - info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); + prepare_kill_siginfo(sig, &info, PIDTYPE_TGID); return kill_something_info(sig, &info, pid); } +/* + * Verify that the signaler and signalee either are in the same pid namespace + * or that the signaler's pid namespace is an ancestor of the signalee's pid + * namespace. + */ +static bool access_pidfd_pidns(struct pid *pid) +{ + struct pid_namespace *active = task_active_pid_ns(current); + struct pid_namespace *p = ns_of_pid(pid); + + for (;;) { + if (!p) + return false; + if (p == active) + break; + p = p->parent; + } + + return true; +} + +static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, + siginfo_t __user *info) +{ +#ifdef CONFIG_COMPAT + /* + * Avoid hooking up compat syscalls and instead handle necessary + * conversions here. Note, this is a stop-gap measure and should not be + * considered a generic solution. + */ + if (in_compat_syscall()) + return copy_siginfo_from_user32( + kinfo, (struct compat_siginfo __user *)info); +#endif + return copy_siginfo_from_user(kinfo, info); +} + +static struct pid *pidfd_to_pid(const struct file *file) +{ + struct pid *pid; + + pid = pidfd_pid(file); + if (!IS_ERR(pid)) + return pid; + + return tgid_pidfd_to_pid(file); +} + +#define PIDFD_SEND_SIGNAL_FLAGS \ + (PIDFD_SIGNAL_THREAD | PIDFD_SIGNAL_THREAD_GROUP | \ + PIDFD_SIGNAL_PROCESS_GROUP) + +static int do_pidfd_send_signal(struct pid *pid, int sig, enum pid_type type, + siginfo_t __user *info, unsigned int flags) +{ + kernel_siginfo_t kinfo; + + switch (flags) { + case PIDFD_SIGNAL_THREAD: + type = PIDTYPE_PID; + break; + case PIDFD_SIGNAL_THREAD_GROUP: + type = PIDTYPE_TGID; + break; + case PIDFD_SIGNAL_PROCESS_GROUP: + type = PIDTYPE_PGID; + break; + } + + if (info) { + int ret; + + ret = copy_siginfo_from_user_any(&kinfo, info); + if (unlikely(ret)) + return ret; + + if (unlikely(sig != kinfo.si_signo)) + return -EINVAL; + + /* Only allow sending arbitrary signals to yourself. */ + if ((task_pid(current) != pid || type > PIDTYPE_TGID) && + (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL)) + return -EPERM; + } else { + prepare_kill_siginfo(sig, &kinfo, type); + } + + if (type == PIDTYPE_PGID) + return kill_pgrp_info(sig, &kinfo, pid); + + return kill_pid_info_type(sig, &kinfo, pid, type); +} + +/** + * sys_pidfd_send_signal - Signal a process through a pidfd + * @pidfd: file descriptor of the process + * @sig: signal to send + * @info: signal info + * @flags: future flags + * + * Send the signal to the thread group or to the individual thread depending + * on PIDFD_THREAD. + * In the future extension to @flags may be used to override the default scope + * of @pidfd. + * + * Return: 0 on success, negative errno on failure + */ +SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig, + siginfo_t __user *, info, unsigned int, flags) +{ + struct pid *pid; + enum pid_type type; + int ret; + + /* Enforce flags be set to 0 until we add an extension. */ + if (flags & ~PIDFD_SEND_SIGNAL_FLAGS) + return -EINVAL; + + /* Ensure that only a single signal scope determining flag is set. */ + if (hweight32(flags & PIDFD_SEND_SIGNAL_FLAGS) > 1) + return -EINVAL; + + switch (pidfd) { + case PIDFD_SELF_THREAD: + pid = get_task_pid(current, PIDTYPE_PID); + type = PIDTYPE_PID; + break; + case PIDFD_SELF_THREAD_GROUP: + pid = get_task_pid(current, PIDTYPE_TGID); + type = PIDTYPE_TGID; + break; + default: { + CLASS(fd, f)(pidfd); + if (fd_empty(f)) + return -EBADF; + + /* Is this a pidfd? */ + pid = pidfd_to_pid(fd_file(f)); + if (IS_ERR(pid)) + return PTR_ERR(pid); + + if (!access_pidfd_pidns(pid)) + return -EINVAL; + + /* Infer scope from the type of pidfd. */ + if (fd_file(f)->f_flags & PIDFD_THREAD) + type = PIDTYPE_PID; + else + type = PIDTYPE_TGID; + + return do_pidfd_send_signal(pid, sig, type, info, flags); + } + } + + ret = do_pidfd_send_signal(pid, sig, type, info, flags); + put_pid(pid); + + return ret; +} + static int do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info) { @@ -3482,12 +4147,7 @@ static int do_tkill(pid_t tgid, pid_t pid, int sig) { struct kernel_siginfo info; - clear_siginfo(&info); - info.si_signo = sig; - info.si_errno = 0; - info.si_code = SI_TKILL; - info.si_pid = task_tgid_vnr(current); - info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); + prepare_kill_siginfo(sig, &info, PIDTYPE_PID); return do_send_specific(tgid, pid, sig, &info); } @@ -3624,8 +4284,8 @@ void kernel_sigaction(int sig, __sighandler_t action) sigemptyset(&mask); sigaddset(&mask, sig); - flush_sigqueue_mask(&mask, ¤t->signal->shared_pending); - flush_sigqueue_mask(&mask, ¤t->pending); + flush_sigqueue_mask(current, &mask, ¤t->signal->shared_pending); + flush_sigqueue_mask(current, &mask, ¤t->pending); recalc_sigpending(); } spin_unlock_irq(¤t->sighand->siglock); @@ -3649,12 +4309,34 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) k = &p->sighand->action[sig-1]; spin_lock_irq(&p->sighand->siglock); + if (k->sa.sa_flags & SA_IMMUTABLE) { + spin_unlock_irq(&p->sighand->siglock); + return -EINVAL; + } if (oact) *oact = *k; + /* + * Make sure that we never accidentally claim to support SA_UNSUPPORTED, + * e.g. by having an architecture use the bit in their uapi. + */ + BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED); + + /* + * Clear unknown flag bits in order to allow userspace to detect missing + * support for flag bits and to allow the kernel to use non-uapi bits + * internally. + */ + if (act) + act->sa.sa_flags &= UAPI_SA_FLAGS; + if (oact) + oact->sa.sa_flags &= UAPI_SA_FLAGS; + sigaction_compat_abi(act, oact); if (act) { + bool was_ignored = k->sa.sa_handler == SIG_IGN; + sigdelsetmask(&act->sa.sa_mask, sigmask(SIGKILL) | sigmask(SIGSTOP)); *k = *act; @@ -3672,9 +4354,11 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) if (sig_handler_ignored(sig_handler(p, sig), sig)) { sigemptyset(&mask); sigaddset(&mask, sig); - flush_sigqueue_mask(&mask, &p->signal->shared_pending); + flush_sigqueue_mask(p, &mask, &p->signal->shared_pending); for_each_thread(p, t) - flush_sigqueue_mask(&mask, &t->pending); + flush_sigqueue_mask(p, &mask, &t->pending); + } else if (was_ignored) { + posixtimer_sig_unignore(p, sig); } } @@ -3682,11 +4366,29 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) return 0; } +#ifdef CONFIG_DYNAMIC_SIGFRAME +static inline void sigaltstack_lock(void) + __acquires(¤t->sighand->siglock) +{ + spin_lock_irq(¤t->sighand->siglock); +} + +static inline void sigaltstack_unlock(void) + __releases(¤t->sighand->siglock) +{ + spin_unlock_irq(¤t->sighand->siglock); +} +#else +static inline void sigaltstack_lock(void) { } +static inline void sigaltstack_unlock(void) { } +#endif + static int do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp, size_t min_ss_size) { struct task_struct *t = current; + int ret = 0; if (oss) { memset(oss, 0, sizeof(stack_t)); @@ -3710,19 +4412,33 @@ do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp, ss_mode != 0)) return -EINVAL; + /* + * Return before taking any locks if no actual + * sigaltstack changes were requested. + */ + if (t->sas_ss_sp == (unsigned long)ss_sp && + t->sas_ss_size == ss_size && + t->sas_ss_flags == ss_flags) + return 0; + + sigaltstack_lock(); if (ss_mode == SS_DISABLE) { ss_size = 0; ss_sp = NULL; } else { if (unlikely(ss_size < min_ss_size)) - return -ENOMEM; + ret = -ENOMEM; + if (!sigaltstack_size_valid(ss_size)) + ret = -ENOMEM; } - - t->sas_ss_sp = (unsigned long) ss_sp; - t->sas_ss_size = ss_size; - t->sas_ss_flags = ss_flags; + if (!ret) { + t->sas_ss_sp = (unsigned long) ss_sp; + t->sas_ss_size = ss_size; + t->sas_ss_flags = ss_flags; + } + sigaltstack_unlock(); } - return 0; + return ret; } SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss) @@ -3756,11 +4472,7 @@ int __save_altstack(stack_t __user *uss, unsigned long sp) int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) | __put_user(t->sas_ss_flags, &uss->ss_flags) | __put_user(t->sas_ss_size, &uss->ss_size); - if (err) - return err; - if (t->sas_ss_flags & SS_AUTODISARM) - sas_ss_reset(t); - return 0; + return err; } #ifdef CONFIG_COMPAT @@ -3815,11 +4527,7 @@ int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp) &uss->ss_sp) | __put_user(t->sas_ss_flags, &uss->ss_flags) | __put_user(t->sas_ss_size, &uss->ss_size); - if (err) - return err; - if (t->sas_ss_flags & SS_AUTODISARM) - sas_ss_reset(t); - return 0; + return err; } #endif @@ -4220,10 +4928,14 @@ static inline void siginfo_buildtime_checks(void) /* sigfault */ CHECK_OFFSET(si_addr); + CHECK_OFFSET(si_trapno); CHECK_OFFSET(si_addr_lsb); CHECK_OFFSET(si_lower); CHECK_OFFSET(si_upper); CHECK_OFFSET(si_pkey); + CHECK_OFFSET(si_perf_data); + CHECK_OFFSET(si_perf_type); + CHECK_OFFSET(si_perf_flags); /* sigpoll */ CHECK_OFFSET(si_band); @@ -4234,13 +4946,67 @@ static inline void siginfo_buildtime_checks(void) CHECK_OFFSET(si_syscall); CHECK_OFFSET(si_arch); #undef CHECK_OFFSET + + /* usb asyncio */ + BUILD_BUG_ON(offsetof(struct siginfo, si_pid) != + offsetof(struct siginfo, si_addr)); + if (sizeof(int) == sizeof(void __user *)) { + BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) != + sizeof(void __user *)); + } else { + BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) + + sizeof_field(struct siginfo, si_uid)) != + sizeof(void __user *)); + BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) != + offsetof(struct siginfo, si_uid)); + } +#ifdef CONFIG_COMPAT + BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) != + offsetof(struct compat_siginfo, si_addr)); + BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) != + sizeof(compat_uptr_t)); + BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) != + sizeof_field(struct siginfo, si_pid)); +#endif +} + +#if defined(CONFIG_SYSCTL) +static const struct ctl_table signal_debug_table[] = { +#ifdef CONFIG_SYSCTL_EXCEPTION_TRACE + { + .procname = "exception-trace", + .data = &show_unhandled_signals, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec + }, +#endif +}; + +static const struct ctl_table signal_table[] = { + { + .procname = "print-fatal-signals", + .data = &print_fatal_signals, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, +}; + +static int __init init_signal_sysctls(void) +{ + register_sysctl_init("debug", signal_debug_table); + register_sysctl_init("kernel", signal_table); + return 0; } +early_initcall(init_signal_sysctls); +#endif /* CONFIG_SYSCTL */ void __init signals_init(void) { siginfo_buildtime_checks(); - sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC); + sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT); } #ifdef CONFIG_KGDB_KDB @@ -4263,7 +5029,7 @@ void kdb_send_sig(struct task_struct *t, int sig) } new_t = kdb_prev_t != t; kdb_prev_t = t; - if (t->state != TASK_RUNNING && new_t) { + if (!task_is_running(t) && new_t) { spin_unlock(&t->sighand->siglock); kdb_printf("Process is not RUNNING, sending a signal from " "kdb risks deadlock\n" @@ -4273,7 +5039,7 @@ void kdb_send_sig(struct task_struct *t, int sig) "the deadlock.\n"); return; } - ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID); + ret = send_signal_locked(sig, SEND_SIG_PRIV, t, PIDTYPE_PID); spin_unlock(&t->sighand->siglock); if (ret) kdb_printf("Fail to deliver Signal %d to process %d.\n", |
