summaryrefslogtreecommitdiff
path: root/kernel/livepatch
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/livepatch')
-rw-r--r--kernel/livepatch/core.c58
-rw-r--r--kernel/livepatch/patch.c4
-rw-r--r--kernel/livepatch/transition.c103
3 files changed, 90 insertions, 75 deletions
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index ecbc9b6aba3a..0e73fac55f8e 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -59,7 +59,7 @@ static void klp_find_object_module(struct klp_object *obj)
if (!klp_is_module(obj))
return;
- rcu_read_lock_sched();
+ guard(rcu)();
/*
* We do not want to block removal of patched modules and therefore
* we do not take a reference here. The patches are removed by
@@ -75,8 +75,6 @@ static void klp_find_object_module(struct klp_object *obj)
*/
if (mod && mod->klp_alive)
obj->mod = mod;
-
- rcu_read_unlock_sched();
}
static bool klp_initialized(void)
@@ -346,6 +344,8 @@ int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
* /sys/kernel/livepatch/<patch>/enabled
* /sys/kernel/livepatch/<patch>/transition
* /sys/kernel/livepatch/<patch>/force
+ * /sys/kernel/livepatch/<patch>/replace
+ * /sys/kernel/livepatch/<patch>/stack_order
* /sys/kernel/livepatch/<patch>/<object>
* /sys/kernel/livepatch/<patch>/<object>/patched
* /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
@@ -401,7 +401,7 @@ static ssize_t enabled_show(struct kobject *kobj,
struct klp_patch *patch;
patch = container_of(kobj, struct klp_patch, kobj);
- return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled);
+ return sysfs_emit(buf, "%d\n", patch->enabled);
}
static ssize_t transition_show(struct kobject *kobj,
@@ -410,8 +410,7 @@ static ssize_t transition_show(struct kobject *kobj,
struct klp_patch *patch;
patch = container_of(kobj, struct klp_patch, kobj);
- return snprintf(buf, PAGE_SIZE-1, "%d\n",
- patch == klp_transition_patch);
+ return sysfs_emit(buf, "%d\n", patch == klp_transition_patch);
}
static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
@@ -443,13 +442,47 @@ static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
return count;
}
+static ssize_t replace_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct klp_patch *patch;
+
+ patch = container_of(kobj, struct klp_patch, kobj);
+ return sysfs_emit(buf, "%d\n", patch->replace);
+}
+
+static ssize_t stack_order_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct klp_patch *patch, *this_patch;
+ int stack_order = 0;
+
+ this_patch = container_of(kobj, struct klp_patch, kobj);
+
+ mutex_lock(&klp_mutex);
+
+ klp_for_each_patch(patch) {
+ stack_order++;
+ if (patch == this_patch)
+ break;
+ }
+
+ mutex_unlock(&klp_mutex);
+
+ return sysfs_emit(buf, "%d\n", stack_order);
+}
+
static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
+static struct kobj_attribute replace_kobj_attr = __ATTR_RO(replace);
+static struct kobj_attribute stack_order_kobj_attr = __ATTR_RO(stack_order);
static struct attribute *klp_patch_attrs[] = {
&enabled_kobj_attr.attr,
&transition_kobj_attr.attr,
&force_kobj_attr.attr,
+ &replace_kobj_attr.attr,
+ &stack_order_kobj_attr.attr,
NULL
};
ATTRIBUTE_GROUPS(klp_patch);
@@ -566,9 +599,12 @@ static int klp_add_object_nops(struct klp_patch *patch,
}
/*
- * Add 'nop' functions which simply return to the caller to run
- * the original function. The 'nop' functions are added to a
- * patch to facilitate a 'replace' mode.
+ * Add 'nop' functions which simply return to the caller to run the
+ * original function.
+ *
+ * They are added only when the atomic replace mode is used and only for
+ * functions which are currently livepatched but are no longer included
+ * in the new livepatch.
*/
static int klp_add_nops(struct klp_patch *patch)
{
@@ -973,7 +1009,7 @@ static int __klp_disable_patch(struct klp_patch *patch)
if (klp_transition_patch)
return -EBUSY;
- klp_init_transition(patch, KLP_UNPATCHED);
+ klp_init_transition(patch, KLP_TRANSITION_UNPATCHED);
klp_for_each_object(patch, obj)
if (obj->patched)
@@ -1008,7 +1044,7 @@ static int __klp_enable_patch(struct klp_patch *patch)
pr_notice("enabling patch '%s'\n", patch->mod->name);
- klp_init_transition(patch, KLP_PATCHED);
+ klp_init_transition(patch, KLP_TRANSITION_PATCHED);
/*
* Enforce the order of the func->transition writes in
diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c
index 4152c71507e2..90408500e5a3 100644
--- a/kernel/livepatch/patch.c
+++ b/kernel/livepatch/patch.c
@@ -95,9 +95,9 @@ static void notrace klp_ftrace_handler(unsigned long ip,
patch_state = current->patch_state;
- WARN_ON_ONCE(patch_state == KLP_UNDEFINED);
+ WARN_ON_ONCE(patch_state == KLP_TRANSITION_IDLE);
- if (patch_state == KLP_UNPATCHED) {
+ if (patch_state == KLP_TRANSITION_UNPATCHED) {
/*
* Use the previously patched version of the function.
* If no previous patches exist, continue with the
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
index e54c3d60a904..2351a19ac2a9 100644
--- a/kernel/livepatch/transition.c
+++ b/kernel/livepatch/transition.c
@@ -23,28 +23,19 @@ static DEFINE_PER_CPU(unsigned long[MAX_STACK_ENTRIES], klp_stack_entries);
struct klp_patch *klp_transition_patch;
-static int klp_target_state = KLP_UNDEFINED;
+static int klp_target_state = KLP_TRANSITION_IDLE;
static unsigned int klp_signals_cnt;
/*
* When a livepatch is in progress, enable klp stack checking in
- * cond_resched(). This helps CPU-bound kthreads get patched.
+ * schedule(). This helps CPU-bound kthreads get patched.
*/
-#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
-
-#define klp_cond_resched_enable() sched_dynamic_klp_enable()
-#define klp_cond_resched_disable() sched_dynamic_klp_disable()
-
-#else /* !CONFIG_PREEMPT_DYNAMIC || !CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
DEFINE_STATIC_KEY_FALSE(klp_sched_try_switch_key);
-EXPORT_SYMBOL(klp_sched_try_switch_key);
-#define klp_cond_resched_enable() static_branch_enable(&klp_sched_try_switch_key)
-#define klp_cond_resched_disable() static_branch_disable(&klp_sched_try_switch_key)
-
-#endif /* CONFIG_PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
+#define klp_resched_enable() static_branch_enable(&klp_sched_try_switch_key)
+#define klp_resched_disable() static_branch_disable(&klp_sched_try_switch_key)
/*
* This work can be performed periodically to finish patching or unpatching any
@@ -96,16 +87,16 @@ static void klp_complete_transition(void)
pr_debug("'%s': completing %s transition\n",
klp_transition_patch->mod->name,
- klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
+ klp_target_state == KLP_TRANSITION_PATCHED ? "patching" : "unpatching");
- if (klp_transition_patch->replace && klp_target_state == KLP_PATCHED) {
+ if (klp_transition_patch->replace && klp_target_state == KLP_TRANSITION_PATCHED) {
klp_unpatch_replaced_patches(klp_transition_patch);
klp_discard_nops(klp_transition_patch);
}
- if (klp_target_state == KLP_UNPATCHED) {
+ if (klp_target_state == KLP_TRANSITION_UNPATCHED) {
/*
- * All tasks have transitioned to KLP_UNPATCHED so we can now
+ * All tasks have transitioned to KLP_TRANSITION_UNPATCHED so we can now
* remove the new functions from the func_stack.
*/
klp_unpatch_objects(klp_transition_patch);
@@ -123,36 +114,36 @@ static void klp_complete_transition(void)
klp_for_each_func(obj, func)
func->transition = false;
- /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
- if (klp_target_state == KLP_PATCHED)
+ /* Prevent klp_ftrace_handler() from seeing KLP_TRANSITION_IDLE state */
+ if (klp_target_state == KLP_TRANSITION_PATCHED)
klp_synchronize_transition();
read_lock(&tasklist_lock);
for_each_process_thread(g, task) {
WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
- task->patch_state = KLP_UNDEFINED;
+ task->patch_state = KLP_TRANSITION_IDLE;
}
read_unlock(&tasklist_lock);
for_each_possible_cpu(cpu) {
task = idle_task(cpu);
WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
- task->patch_state = KLP_UNDEFINED;
+ task->patch_state = KLP_TRANSITION_IDLE;
}
klp_for_each_object(klp_transition_patch, obj) {
if (!klp_is_object_loaded(obj))
continue;
- if (klp_target_state == KLP_PATCHED)
+ if (klp_target_state == KLP_TRANSITION_PATCHED)
klp_post_patch_callback(obj);
- else if (klp_target_state == KLP_UNPATCHED)
+ else if (klp_target_state == KLP_TRANSITION_UNPATCHED)
klp_post_unpatch_callback(obj);
}
pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
- klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
+ klp_target_state == KLP_TRANSITION_PATCHED ? "patching" : "unpatching");
- klp_target_state = KLP_UNDEFINED;
+ klp_target_state = KLP_TRANSITION_IDLE;
klp_transition_patch = NULL;
}
@@ -164,13 +155,13 @@ static void klp_complete_transition(void)
*/
void klp_cancel_transition(void)
{
- if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED))
+ if (WARN_ON_ONCE(klp_target_state != KLP_TRANSITION_PATCHED))
return;
pr_debug("'%s': canceling patching transition, going to unpatch\n",
klp_transition_patch->mod->name);
- klp_target_state = KLP_UNPATCHED;
+ klp_target_state = KLP_TRANSITION_UNPATCHED;
klp_complete_transition();
}
@@ -218,7 +209,7 @@ static int klp_check_stack_func(struct klp_func *func, unsigned long *entries,
struct klp_ops *ops;
int i;
- if (klp_target_state == KLP_UNPATCHED) {
+ if (klp_target_state == KLP_TRANSITION_UNPATCHED) {
/*
* Check for the to-be-unpatched function
* (the func itself).
@@ -365,26 +356,18 @@ static bool klp_try_switch_task(struct task_struct *task)
void __klp_sched_try_switch(void)
{
- if (likely(!klp_patch_pending(current)))
- return;
-
/*
- * This function is called from cond_resched() which is called in many
- * places throughout the kernel. Using the klp_mutex here might
- * deadlock.
- *
- * Instead, disable preemption to prevent racing with other callers of
- * klp_try_switch_task(). Thanks to task_call_func() they won't be
- * able to switch this task while it's running.
+ * This function is called from __schedule() while a context switch is
+ * about to happen. Preemption is already disabled and klp_mutex
+ * can't be acquired.
+ * Disabled preemption is used to prevent racing with other callers of
+ * klp_try_switch_task(). Thanks to task_call_func() they won't be
+ * able to switch to this task while it's running.
*/
- preempt_disable();
+ lockdep_assert_preemption_disabled();
- /*
- * Make sure current didn't get patched between the above check and
- * preempt_disable().
- */
- if (unlikely(!klp_patch_pending(current)))
- goto out;
+ if (likely(!klp_patch_pending(current)))
+ return;
/*
* Enforce the order of the TIF_PATCH_PENDING read above and the
@@ -395,11 +378,7 @@ void __klp_sched_try_switch(void)
smp_rmb();
klp_try_switch_task(current);
-
-out:
- preempt_enable();
}
-EXPORT_SYMBOL(__klp_sched_try_switch);
/*
* Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set.
@@ -455,7 +434,7 @@ void klp_try_complete_transition(void)
struct klp_patch *patch;
bool complete = true;
- WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
+ WARN_ON_ONCE(klp_target_state == KLP_TRANSITION_IDLE);
/*
* Try to switch the tasks to the target patch state by walking their
@@ -508,7 +487,7 @@ void klp_try_complete_transition(void)
}
/* Done! Now cleanup the data structures. */
- klp_cond_resched_disable();
+ klp_resched_disable();
patch = klp_transition_patch;
klp_complete_transition();
@@ -532,11 +511,11 @@ void klp_start_transition(void)
struct task_struct *g, *task;
unsigned int cpu;
- WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
+ WARN_ON_ONCE(klp_target_state == KLP_TRANSITION_IDLE);
pr_notice("'%s': starting %s transition\n",
klp_transition_patch->mod->name,
- klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
+ klp_target_state == KLP_TRANSITION_PATCHED ? "patching" : "unpatching");
/*
* Mark all normal tasks as needing a patch state update. They'll
@@ -560,7 +539,7 @@ void klp_start_transition(void)
set_tsk_thread_flag(task, TIF_PATCH_PENDING);
}
- klp_cond_resched_enable();
+ klp_resched_enable();
klp_signals_cnt = 0;
}
@@ -578,7 +557,7 @@ void klp_init_transition(struct klp_patch *patch, int state)
struct klp_func *func;
int initial_state = !state;
- WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED);
+ WARN_ON_ONCE(klp_target_state != KLP_TRANSITION_IDLE);
klp_transition_patch = patch;
@@ -589,7 +568,7 @@ void klp_init_transition(struct klp_patch *patch, int state)
klp_target_state = state;
pr_debug("'%s': initializing %s transition\n", patch->mod->name,
- klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
+ klp_target_state == KLP_TRANSITION_PATCHED ? "patching" : "unpatching");
/*
* Initialize all tasks to the initial patch state to prepare them for
@@ -597,7 +576,7 @@ void klp_init_transition(struct klp_patch *patch, int state)
*/
read_lock(&tasklist_lock);
for_each_process_thread(g, task) {
- WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
+ WARN_ON_ONCE(task->patch_state != KLP_TRANSITION_IDLE);
task->patch_state = initial_state;
}
read_unlock(&tasklist_lock);
@@ -607,19 +586,19 @@ void klp_init_transition(struct klp_patch *patch, int state)
*/
for_each_possible_cpu(cpu) {
task = idle_task(cpu);
- WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
+ WARN_ON_ONCE(task->patch_state != KLP_TRANSITION_IDLE);
task->patch_state = initial_state;
}
/*
* Enforce the order of the task->patch_state initializations and the
* func->transition updates to ensure that klp_ftrace_handler() doesn't
- * see a func in transition with a task->patch_state of KLP_UNDEFINED.
+ * see a func in transition with a task->patch_state of KLP_TRANSITION_IDLE.
*
* Also enforce the order of the klp_target_state write and future
* TIF_PATCH_PENDING writes to ensure klp_update_patch_state() and
* __klp_sched_try_switch() don't set a task->patch_state to
- * KLP_UNDEFINED.
+ * KLP_TRANSITION_IDLE.
*/
smp_wmb();
@@ -652,7 +631,7 @@ void klp_reverse_transition(void)
pr_debug("'%s': reversing transition from %s\n",
klp_transition_patch->mod->name,
- klp_target_state == KLP_PATCHED ? "patching to unpatching" :
+ klp_target_state == KLP_TRANSITION_PATCHED ? "patching to unpatching" :
"unpatching to patching");
/*
@@ -741,7 +720,7 @@ void klp_force_transition(void)
klp_update_patch_state(idle_task(cpu));
/* Set forced flag for patches being removed. */
- if (klp_target_state == KLP_UNPATCHED)
+ if (klp_target_state == KLP_TRANSITION_UNPATCHED)
klp_transition_patch->forced = true;
else if (klp_transition_patch->replace) {
klp_for_each_patch(patch) {