diff options
Diffstat (limited to 'kernel/livepatch')
| -rw-r--r-- | kernel/livepatch/core.c | 10 | ||||
| -rw-r--r-- | kernel/livepatch/transition.c | 122 | 
2 files changed, 109 insertions, 23 deletions
| diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 4bd2d5e10f20..61328328c474 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -33,6 +33,7 @@   *   * - klp_ftrace_handler()   * - klp_update_patch_state() + * - __klp_sched_try_switch()   */  DEFINE_MUTEX(klp_mutex); @@ -142,8 +143,7 @@ static int klp_match_callback(void *data, unsigned long addr)  	return 0;  } -static int klp_find_callback(void *data, const char *name, -			     struct module *mod, unsigned long addr) +static int klp_find_callback(void *data, const char *name, unsigned long addr)  {  	struct klp_find_arg *args = data; @@ -596,7 +596,7 @@ static void klp_kobj_release_patch(struct kobject *kobj)  	complete(&patch->finish);  } -static struct kobj_type klp_ktype_patch = { +static const struct kobj_type klp_ktype_patch = {  	.release = klp_kobj_release_patch,  	.sysfs_ops = &kobj_sysfs_ops,  	.default_groups = klp_patch_groups, @@ -612,7 +612,7 @@ static void klp_kobj_release_object(struct kobject *kobj)  		klp_free_object_dynamic(obj);  } -static struct kobj_type klp_ktype_object = { +static const struct kobj_type klp_ktype_object = {  	.release = klp_kobj_release_object,  	.sysfs_ops = &kobj_sysfs_ops,  	.default_groups = klp_object_groups, @@ -628,7 +628,7 @@ static void klp_kobj_release_func(struct kobject *kobj)  		klp_free_func_nop(func);  } -static struct kobj_type klp_ktype_func = { +static const struct kobj_type klp_ktype_func = {  	.release = klp_kobj_release_func,  	.sysfs_ops = &kobj_sysfs_ops,  }; diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c index f1b25ec581e0..e9fd83a02228 100644 --- a/kernel/livepatch/transition.c +++ b/kernel/livepatch/transition.c @@ -9,11 +9,14 @@  #include <linux/cpu.h>  #include <linux/stacktrace.h> +#include <linux/static_call.h>  #include "core.h"  #include "patch.h"  #include "transition.h"  #define MAX_STACK_ENTRIES  100 +DEFINE_PER_CPU(unsigned long[MAX_STACK_ENTRIES], klp_stack_entries); +  #define STACK_ERR_BUF_SIZE 128  #define SIGNALS_TIMEOUT 15 @@ -25,6 +28,25 @@ static int klp_target_state = KLP_UNDEFINED;  static unsigned int klp_signals_cnt;  /* + * When a livepatch is in progress, enable klp stack checking in + * cond_resched().  This helps CPU-bound kthreads get patched. + */ +#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) + +#define klp_cond_resched_enable() sched_dynamic_klp_enable() +#define klp_cond_resched_disable() sched_dynamic_klp_disable() + +#else /* !CONFIG_PREEMPT_DYNAMIC || !CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */ + +DEFINE_STATIC_KEY_FALSE(klp_sched_try_switch_key); +EXPORT_SYMBOL(klp_sched_try_switch_key); + +#define klp_cond_resched_enable() static_branch_enable(&klp_sched_try_switch_key) +#define klp_cond_resched_disable() static_branch_disable(&klp_sched_try_switch_key) + +#endif /* CONFIG_PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */ + +/*   * This work can be performed periodically to finish patching or unpatching any   * "straggler" tasks which failed to transition in the first attempt.   */ @@ -172,8 +194,8 @@ void klp_update_patch_state(struct task_struct *task)  	 * barrier (smp_rmb) for two cases:  	 *  	 * 1) Enforce the order of the TIF_PATCH_PENDING read and the -	 *    klp_target_state read.  The corresponding write barrier is in -	 *    klp_init_transition(). +	 *    klp_target_state read.  The corresponding write barriers are in +	 *    klp_init_transition() and klp_reverse_transition().  	 *  	 * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read  	 *    of func->transition, if klp_ftrace_handler() is called later on @@ -240,12 +262,15 @@ static int klp_check_stack_func(struct klp_func *func, unsigned long *entries,   */  static int klp_check_stack(struct task_struct *task, const char **oldname)  { -	static unsigned long entries[MAX_STACK_ENTRIES]; +	unsigned long *entries = this_cpu_ptr(klp_stack_entries);  	struct klp_object *obj;  	struct klp_func *func;  	int ret, nr_entries; -	ret = stack_trace_save_tsk_reliable(task, entries, ARRAY_SIZE(entries)); +	/* Protect 'klp_stack_entries' */ +	lockdep_assert_preemption_disabled(); + +	ret = stack_trace_save_tsk_reliable(task, entries, MAX_STACK_ENTRIES);  	if (ret < 0)  		return -EINVAL;  	nr_entries = ret; @@ -307,7 +332,11 @@ static bool klp_try_switch_task(struct task_struct *task)  	 * functions.  If all goes well, switch the task to the target patch  	 * state.  	 */ -	ret = task_call_func(task, klp_check_and_switch_task, &old_name); +	if (task == current) +		ret = klp_check_and_switch_task(current, &old_name); +	else +		ret = task_call_func(task, klp_check_and_switch_task, &old_name); +  	switch (ret) {  	case 0:		/* success */  		break; @@ -334,6 +363,44 @@ static bool klp_try_switch_task(struct task_struct *task)  	return !ret;  } +void __klp_sched_try_switch(void) +{ +	if (likely(!klp_patch_pending(current))) +		return; + +	/* +	 * This function is called from cond_resched() which is called in many +	 * places throughout the kernel.  Using the klp_mutex here might +	 * deadlock. +	 * +	 * Instead, disable preemption to prevent racing with other callers of +	 * klp_try_switch_task().  Thanks to task_call_func() they won't be +	 * able to switch this task while it's running. +	 */ +	preempt_disable(); + +	/* +	 * Make sure current didn't get patched between the above check and +	 * preempt_disable(). +	 */ +	if (unlikely(!klp_patch_pending(current))) +		goto out; + +	/* +	 * Enforce the order of the TIF_PATCH_PENDING read above and the +	 * klp_target_state read in klp_try_switch_task().  The corresponding +	 * write barriers are in klp_init_transition() and +	 * klp_reverse_transition(). +	 */ +	smp_rmb(); + +	klp_try_switch_task(current); + +out: +	preempt_enable(); +} +EXPORT_SYMBOL(__klp_sched_try_switch); +  /*   * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set.   * Kthreads with TIF_PATCH_PENDING set are woken up. @@ -440,7 +507,8 @@ void klp_try_complete_transition(void)  		return;  	} -	/* we're done, now cleanup the data structures */ +	/* Done!  Now cleanup the data structures. */ +	klp_cond_resched_disable();  	patch = klp_transition_patch;  	klp_complete_transition(); @@ -492,6 +560,8 @@ void klp_start_transition(void)  			set_tsk_thread_flag(task, TIF_PATCH_PENDING);  	} +	klp_cond_resched_enable(); +  	klp_signals_cnt = 0;  } @@ -547,8 +617,9 @@ void klp_init_transition(struct klp_patch *patch, int state)  	 * see a func in transition with a task->patch_state of KLP_UNDEFINED.  	 *  	 * Also enforce the order of the klp_target_state write and future -	 * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() doesn't -	 * set a task->patch_state to KLP_UNDEFINED. +	 * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() and +	 * __klp_sched_try_switch() don't set a task->patch_state to +	 * KLP_UNDEFINED.  	 */  	smp_wmb(); @@ -584,14 +655,10 @@ void klp_reverse_transition(void)  		 klp_target_state == KLP_PATCHED ? "patching to unpatching" :  						   "unpatching to patching"); -	klp_transition_patch->enabled = !klp_transition_patch->enabled; - -	klp_target_state = !klp_target_state; -  	/*  	 * Clear all TIF_PATCH_PENDING flags to prevent races caused by -	 * klp_update_patch_state() running in parallel with -	 * klp_start_transition(). +	 * klp_update_patch_state() or __klp_sched_try_switch() running in +	 * parallel with the reverse transition.  	 */  	read_lock(&tasklist_lock);  	for_each_process_thread(g, task) @@ -601,9 +668,28 @@ void klp_reverse_transition(void)  	for_each_possible_cpu(cpu)  		clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING); -	/* Let any remaining calls to klp_update_patch_state() complete */ +	/* +	 * Make sure all existing invocations of klp_update_patch_state() and +	 * __klp_sched_try_switch() see the cleared TIF_PATCH_PENDING before +	 * starting the reverse transition. +	 */  	klp_synchronize_transition(); +	/* +	 * All patching has stopped, now re-initialize the global variables to +	 * prepare for the reverse transition. +	 */ +	klp_transition_patch->enabled = !klp_transition_patch->enabled; +	klp_target_state = !klp_target_state; + +	/* +	 * Enforce the order of the klp_target_state write and the +	 * TIF_PATCH_PENDING writes in klp_start_transition() to ensure +	 * klp_update_patch_state() and __klp_sched_try_switch() don't set +	 * task->patch_state to the wrong value. +	 */ +	smp_wmb(); +  	klp_start_transition();  } @@ -617,9 +703,9 @@ void klp_copy_process(struct task_struct *child)  	 * the task flag up to date with the parent here.  	 *  	 * The operation is serialized against all klp_*_transition() -	 * operations by the tasklist_lock. The only exception is -	 * klp_update_patch_state(current), but we cannot race with -	 * that because we are current. +	 * operations by the tasklist_lock. The only exceptions are +	 * klp_update_patch_state(current) and __klp_sched_try_switch(), but we +	 * cannot race with them because we are current.  	 */  	if (test_tsk_thread_flag(current, TIF_PATCH_PENDING))  		set_tsk_thread_flag(child, TIF_PATCH_PENDING); | 
