diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/events/ring_buffer.c | 2 | ||||
| -rw-r--r-- | kernel/livepatch/patch.c | 8 | ||||
| -rw-r--r-- | kernel/livepatch/transition.c | 36 | ||||
| -rw-r--r-- | kernel/signal.c | 20 |
4 files changed, 52 insertions, 14 deletions
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 2831480c63a2..ee97196bb151 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -580,7 +580,7 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event, int ret = -ENOMEM, max_order = 0; if (!has_aux(event)) - return -ENOTSUPP; + return -EOPNOTSUPP; if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) { /* diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c index f8269036bf0b..52c4e907c14b 100644 --- a/kernel/livepatch/patch.c +++ b/kernel/livepatch/patch.c @@ -59,7 +59,11 @@ static void notrace klp_ftrace_handler(unsigned long ip, ops = container_of(fops, struct klp_ops, fops); - rcu_read_lock(); + /* + * A variant of synchronize_sched() is used to allow patching functions + * where RCU is not watching, see klp_synchronize_transition(). + */ + preempt_disable_notrace(); func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, stack_node); @@ -115,7 +119,7 @@ static void notrace klp_ftrace_handler(unsigned long ip, klp_arch_set_pc(regs, (unsigned long)func->new_func); unlock: - rcu_read_unlock(); + preempt_enable_notrace(); } /* diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c index adc0cc64aa4b..b004a1fb6032 100644 --- a/kernel/livepatch/transition.c +++ b/kernel/livepatch/transition.c @@ -49,6 +49,28 @@ static void klp_transition_work_fn(struct work_struct *work) static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn); /* + * This function is just a stub to implement a hard force + * of synchronize_sched(). This requires synchronizing + * tasks even in userspace and idle. + */ +static void klp_sync(struct work_struct *work) +{ +} + +/* + * We allow to patch also functions where RCU is not watching, + * e.g. before user_exit(). We can not rely on the RCU infrastructure + * to do the synchronization. Instead hard force the sched synchronization. + * + * This approach allows to use RCU functions for manipulating func_stack + * safely. + */ +static void klp_synchronize_transition(void) +{ + schedule_on_each_cpu(klp_sync); +} + +/* * The transition to the target patch state is complete. Clean up the data * structures. */ @@ -73,7 +95,7 @@ static void klp_complete_transition(void) * func->transition gets cleared, the handler may choose a * removed function. */ - synchronize_rcu(); + klp_synchronize_transition(); } if (klp_transition_patch->immediate) @@ -92,7 +114,7 @@ static void klp_complete_transition(void) /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */ if (klp_target_state == KLP_PATCHED) - synchronize_rcu(); + klp_synchronize_transition(); read_lock(&tasklist_lock); for_each_process_thread(g, task) { @@ -136,7 +158,11 @@ void klp_cancel_transition(void) */ void klp_update_patch_state(struct task_struct *task) { - rcu_read_lock(); + /* + * A variant of synchronize_sched() is used to allow patching functions + * where RCU is not watching, see klp_synchronize_transition(). + */ + preempt_disable_notrace(); /* * This test_and_clear_tsk_thread_flag() call also serves as a read @@ -153,7 +179,7 @@ void klp_update_patch_state(struct task_struct *task) if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING)) task->patch_state = READ_ONCE(klp_target_state); - rcu_read_unlock(); + preempt_enable_notrace(); } /* @@ -539,7 +565,7 @@ void klp_reverse_transition(void) clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING); /* Let any remaining calls to klp_update_patch_state() complete */ - synchronize_rcu(); + klp_synchronize_transition(); klp_start_transition(); } diff --git a/kernel/signal.c b/kernel/signal.c index ca92bcfeb322..45b4c1ffe14e 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -510,7 +510,8 @@ int unhandled_signal(struct task_struct *tsk, int sig) return !tsk->ptrace; } -static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) +static void collect_signal(int sig, struct sigpending *list, siginfo_t *info, + bool *resched_timer) { struct sigqueue *q, *first = NULL; @@ -532,6 +533,12 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) still_pending: list_del_init(&first->list); copy_siginfo(info, &first->info); + + *resched_timer = + (first->flags & SIGQUEUE_PREALLOC) && + (info->si_code == SI_TIMER) && + (info->si_sys_private); + __sigqueue_free(first); } else { /* @@ -548,12 +555,12 @@ still_pending: } static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, - siginfo_t *info) + siginfo_t *info, bool *resched_timer) { int sig = next_signal(pending, mask); if (sig) - collect_signal(sig, pending, info); + collect_signal(sig, pending, info, resched_timer); return sig; } @@ -565,15 +572,16 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, */ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) { + bool resched_timer = false; int signr; /* We only dequeue private signals from ourselves, we don't let * signalfd steal them */ - signr = __dequeue_signal(&tsk->pending, mask, info); + signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer); if (!signr) { signr = __dequeue_signal(&tsk->signal->shared_pending, - mask, info); + mask, info, &resched_timer); #ifdef CONFIG_POSIX_TIMERS /* * itimer signal ? @@ -621,7 +629,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) current->jobctl |= JOBCTL_STOP_DEQUEUED; } #ifdef CONFIG_POSIX_TIMERS - if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { + if (resched_timer) { /* * Release the siglock to ensure proper locking order * of timer locks outside of siglocks. Note, we leave |
