diff options
Diffstat (limited to 'kernel/livepatch')
-rw-r--r-- | kernel/livepatch/core.c | 208 | ||||
-rw-r--r-- | kernel/livepatch/patch.c | 37 | ||||
-rw-r--r-- | kernel/livepatch/shadow.c | 6 | ||||
-rw-r--r-- | kernel/livepatch/transition.c | 334 |
4 files changed, 380 insertions, 205 deletions
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index f76fdb925532..0cd39954d5a1 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -19,6 +19,7 @@ #include <linux/moduleloader.h> #include <linux/completion.h> #include <linux/memory.h> +#include <linux/rcupdate.h> #include <asm/cacheflush.h> #include "core.h" #include "patch.h" @@ -32,6 +33,7 @@ * * - klp_ftrace_handler() * - klp_update_patch_state() + * - __klp_sched_try_switch() */ DEFINE_MUTEX(klp_mutex); @@ -57,7 +59,7 @@ static void klp_find_object_module(struct klp_object *obj) if (!klp_is_module(obj)) return; - mutex_lock(&module_mutex); + rcu_read_lock_sched(); /* * We do not want to block removal of patched modules and therefore * we do not take a reference here. The patches are removed by @@ -74,7 +76,7 @@ static void klp_find_object_module(struct klp_object *obj) if (mod && mod->klp_alive) obj->mod = mod; - mutex_unlock(&module_mutex); + rcu_read_unlock_sched(); } static bool klp_initialized(void) @@ -117,27 +119,16 @@ static struct klp_object *klp_find_object(struct klp_patch *patch, } struct klp_find_arg { - const char *objname; const char *name; unsigned long addr; unsigned long count; unsigned long pos; }; -static int klp_find_callback(void *data, const char *name, - struct module *mod, unsigned long addr) +static int klp_match_callback(void *data, unsigned long addr) { struct klp_find_arg *args = data; - if ((mod && !args->objname) || (!mod && args->objname)) - return 0; - - if (strcmp(args->name, name)) - return 0; - - if (args->objname && strcmp(args->objname, mod->name)) - return 0; - args->addr = addr; args->count++; @@ -152,23 +143,30 @@ static int klp_find_callback(void *data, const char *name, return 0; } +static int klp_find_callback(void *data, const char *name, unsigned long addr) +{ + struct klp_find_arg *args = data; + + if (strcmp(args->name, name)) + return 0; + + return klp_match_callback(data, addr); +} + static int klp_find_object_symbol(const char *objname, const char *name, unsigned long sympos, unsigned long *addr) { struct klp_find_arg args = { - .objname = objname, .name = name, .addr = 0, .count = 0, .pos = sympos, }; - mutex_lock(&module_mutex); if (objname) - module_kallsyms_on_each_symbol(klp_find_callback, &args); + module_kallsyms_on_each_symbol(objname, klp_find_callback, &args); else - kallsyms_on_each_symbol(klp_find_callback, &args); - mutex_unlock(&module_mutex); + kallsyms_on_each_match_symbol(klp_match_callback, name, &args); /* * Ensure an address was found. If sympos is 0, ensure symbol is unique; @@ -191,7 +189,7 @@ static int klp_find_object_symbol(const char *objname, const char *name, return -EINVAL; } -static int klp_resolve_symbols(Elf64_Shdr *sechdrs, const char *strtab, +static int klp_resolve_symbols(Elf_Shdr *sechdrs, const char *strtab, unsigned int symndx, Elf_Shdr *relasec, const char *sec_objname) { @@ -214,12 +212,12 @@ static int klp_resolve_symbols(Elf64_Shdr *sechdrs, const char *strtab, * we use the smallest/strictest upper bound possible (56, based on * the current definition of MODULE_NAME_LEN) to prevent overflows. */ - BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128); + BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 512); relas = (Elf_Rela *) relasec->sh_addr; /* For each rela in this klp relocation section */ for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) { - sym = (Elf64_Sym *)sechdrs[symndx].sh_addr + ELF_R_SYM(relas[i].r_info); + sym = (Elf_Sym *)sechdrs[symndx].sh_addr + ELF_R_SYM(relas[i].r_info); if (sym->st_shndx != SHN_LIVEPATCH) { pr_err("symbol %s is not marked as a livepatch symbol\n", strtab + sym->st_name); @@ -228,7 +226,7 @@ static int klp_resolve_symbols(Elf64_Shdr *sechdrs, const char *strtab, /* Format: .klp.sym.sym_objname.sym_name,sympos */ cnt = sscanf(strtab + sym->st_name, - ".klp.sym.%55[^.].%127[^,],%lu", + ".klp.sym.%55[^.].%511[^,],%lu", sym_objname, sym_name, &sympos); if (cnt != 3) { pr_err("symbol %s has an incorrectly formatted name\n", @@ -245,7 +243,7 @@ static int klp_resolve_symbols(Elf64_Shdr *sechdrs, const char *strtab, * symbols are exported and normal relas can be used instead. */ if (!sec_vmlinux && sym_vmlinux) { - pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section", + pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section\n", sym_name); return -EINVAL; } @@ -262,6 +260,14 @@ static int klp_resolve_symbols(Elf64_Shdr *sechdrs, const char *strtab, return 0; } +void __weak clear_relocate_add(Elf_Shdr *sechdrs, + const char *strtab, + unsigned int symindex, + unsigned int relsec, + struct module *me) +{ +} + /* * At a high-level, there are two types of klp relocation sections: those which * reference symbols which live in vmlinux; and those which reference symbols @@ -285,10 +291,10 @@ static int klp_resolve_symbols(Elf64_Shdr *sechdrs, const char *strtab, * the to-be-patched module to be loaded and patched sometime *after* the * klp module is loaded. */ -int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs, - const char *shstrtab, const char *strtab, - unsigned int symndx, unsigned int secndx, - const char *objname) +static int klp_write_section_relocs(struct module *pmod, Elf_Shdr *sechdrs, + const char *shstrtab, const char *strtab, + unsigned int symndx, unsigned int secndx, + const char *objname, bool apply) { int cnt, ret; char sec_objname[MODULE_NAME_LEN]; @@ -310,11 +316,26 @@ int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs, if (strcmp(objname ? objname : "vmlinux", sec_objname)) return 0; - ret = klp_resolve_symbols(sechdrs, strtab, symndx, sec, sec_objname); - if (ret) - return ret; + if (apply) { + ret = klp_resolve_symbols(sechdrs, strtab, symndx, + sec, sec_objname); + if (ret) + return ret; + + return apply_relocate_add(sechdrs, strtab, symndx, secndx, pmod); + } + + clear_relocate_add(sechdrs, strtab, symndx, secndx, pmod); + return 0; +} - return apply_relocate_add(sechdrs, strtab, symndx, secndx, pmod); +int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs, + const char *shstrtab, const char *strtab, + unsigned int symndx, unsigned int secndx, + const char *objname) +{ + return klp_write_section_relocs(pmod, sechdrs, shstrtab, strtab, symndx, + secndx, objname, true); } /* @@ -325,7 +346,10 @@ int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs, * /sys/kernel/livepatch/<patch>/enabled * /sys/kernel/livepatch/<patch>/transition * /sys/kernel/livepatch/<patch>/force + * /sys/kernel/livepatch/<patch>/replace + * /sys/kernel/livepatch/<patch>/stack_order * /sys/kernel/livepatch/<patch>/<object> + * /sys/kernel/livepatch/<patch>/<object>/patched * /sys/kernel/livepatch/<patch>/<object>/<function,sympos> */ static int __klp_disable_patch(struct klp_patch *patch); @@ -379,7 +403,7 @@ static ssize_t enabled_show(struct kobject *kobj, struct klp_patch *patch; patch = container_of(kobj, struct klp_patch, kobj); - return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled); + return sysfs_emit(buf, "%d\n", patch->enabled); } static ssize_t transition_show(struct kobject *kobj, @@ -388,8 +412,7 @@ static ssize_t transition_show(struct kobject *kobj, struct klp_patch *patch; patch = container_of(kobj, struct klp_patch, kobj); - return snprintf(buf, PAGE_SIZE-1, "%d\n", - patch == klp_transition_patch); + return sysfs_emit(buf, "%d\n", patch == klp_transition_patch); } static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr, @@ -421,17 +444,67 @@ static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr, return count; } +static ssize_t replace_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct klp_patch *patch; + + patch = container_of(kobj, struct klp_patch, kobj); + return sysfs_emit(buf, "%d\n", patch->replace); +} + +static ssize_t stack_order_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct klp_patch *patch, *this_patch; + int stack_order = 0; + + this_patch = container_of(kobj, struct klp_patch, kobj); + + mutex_lock(&klp_mutex); + + klp_for_each_patch(patch) { + stack_order++; + if (patch == this_patch) + break; + } + + mutex_unlock(&klp_mutex); + + return sysfs_emit(buf, "%d\n", stack_order); +} + static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled); static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition); static struct kobj_attribute force_kobj_attr = __ATTR_WO(force); +static struct kobj_attribute replace_kobj_attr = __ATTR_RO(replace); +static struct kobj_attribute stack_order_kobj_attr = __ATTR_RO(stack_order); static struct attribute *klp_patch_attrs[] = { &enabled_kobj_attr.attr, &transition_kobj_attr.attr, &force_kobj_attr.attr, + &replace_kobj_attr.attr, + &stack_order_kobj_attr.attr, NULL }; ATTRIBUTE_GROUPS(klp_patch); +static ssize_t patched_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct klp_object *obj; + + obj = container_of(kobj, struct klp_object, kobj); + return sysfs_emit(buf, "%d\n", obj->patched); +} + +static struct kobj_attribute patched_kobj_attr = __ATTR_RO(patched); +static struct attribute *klp_object_attrs[] = { + &patched_kobj_attr.attr, + NULL, +}; +ATTRIBUTE_GROUPS(klp_object); + static void klp_free_object_dynamic(struct klp_object *obj) { kfree(obj->name); @@ -558,7 +631,7 @@ static void klp_kobj_release_patch(struct kobject *kobj) complete(&patch->finish); } -static struct kobj_type klp_ktype_patch = { +static const struct kobj_type klp_ktype_patch = { .release = klp_kobj_release_patch, .sysfs_ops = &kobj_sysfs_ops, .default_groups = klp_patch_groups, @@ -574,9 +647,10 @@ static void klp_kobj_release_object(struct kobject *kobj) klp_free_object_dynamic(obj); } -static struct kobj_type klp_ktype_object = { +static const struct kobj_type klp_ktype_object = { .release = klp_kobj_release_object, .sysfs_ops = &kobj_sysfs_ops, + .default_groups = klp_object_groups, }; static void klp_kobj_release_func(struct kobject *kobj) @@ -589,7 +663,7 @@ static void klp_kobj_release_func(struct kobject *kobj) klp_free_func_nop(func); } -static struct kobj_type klp_ktype_func = { +static const struct kobj_type klp_ktype_func = { .release = klp_kobj_release_func, .sysfs_ops = &kobj_sysfs_ops, }; @@ -745,8 +819,9 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func) func->old_sympos ? func->old_sympos : 1); } -static int klp_apply_object_relocs(struct klp_patch *patch, - struct klp_object *obj) +static int klp_write_object_relocs(struct klp_patch *patch, + struct klp_object *obj, + bool apply) { int i, ret; struct klp_modinfo *info = patch->mod->klp_info; @@ -757,10 +832,10 @@ static int klp_apply_object_relocs(struct klp_patch *patch, if (!(sec->sh_flags & SHF_RELA_LIVEPATCH)) continue; - ret = klp_apply_section_relocs(patch->mod, info->sechdrs, + ret = klp_write_section_relocs(patch->mod, info->sechdrs, info->secstrings, patch->mod->core_kallsyms.strtab, - info->symndx, i, obj->name); + info->symndx, i, obj->name, apply); if (ret) return ret; } @@ -768,6 +843,18 @@ static int klp_apply_object_relocs(struct klp_patch *patch, return 0; } +static int klp_apply_object_relocs(struct klp_patch *patch, + struct klp_object *obj) +{ + return klp_write_object_relocs(patch, obj, true); +} + +static void klp_clear_object_relocs(struct klp_patch *patch, + struct klp_object *obj) +{ + klp_write_object_relocs(patch, obj, false); +} + /* parts of the initialization that is done only when the object is loaded */ static int klp_init_object_loaded(struct klp_patch *patch, struct klp_object *obj) @@ -863,14 +950,11 @@ static void klp_init_object_early(struct klp_patch *patch, list_add_tail(&obj->node, &patch->obj_list); } -static int klp_init_patch_early(struct klp_patch *patch) +static void klp_init_patch_early(struct klp_patch *patch) { struct klp_object *obj; struct klp_func *func; - if (!patch->objs) - return -EINVAL; - INIT_LIST_HEAD(&patch->list); INIT_LIST_HEAD(&patch->obj_list); kobject_init(&patch->kobj, &klp_ktype_patch); @@ -880,20 +964,12 @@ static int klp_init_patch_early(struct klp_patch *patch) init_completion(&patch->finish); klp_for_each_object_static(patch, obj) { - if (!obj->funcs) - return -EINVAL; - klp_init_object_early(patch, obj); klp_for_each_func_static(obj, func) { klp_init_func_early(obj, func); } } - - if (!try_module_get(patch->mod)) - return -ENODEV; - - return 0; } static int klp_init_patch(struct klp_patch *patch) @@ -932,7 +1008,7 @@ static int __klp_disable_patch(struct klp_patch *patch) if (klp_transition_patch) return -EBUSY; - klp_init_transition(patch, KLP_UNPATCHED); + klp_init_transition(patch, KLP_TRANSITION_UNPATCHED); klp_for_each_object(patch, obj) if (obj->patched) @@ -967,7 +1043,7 @@ static int __klp_enable_patch(struct klp_patch *patch) pr_notice("enabling patch '%s'\n", patch->mod->name); - klp_init_transition(patch, KLP_PATCHED); + klp_init_transition(patch, KLP_TRANSITION_PATCHED); /* * Enforce the order of the func->transition writes in @@ -1025,10 +1101,17 @@ err: int klp_enable_patch(struct klp_patch *patch) { int ret; + struct klp_object *obj; - if (!patch || !patch->mod) + if (!patch || !patch->mod || !patch->objs) return -EINVAL; + klp_for_each_object_static(patch, obj) { + if (!obj->funcs) + return -EINVAL; + } + + if (!is_livepatch_module(patch->mod)) { pr_err("module %s is not marked as a livepatch module\n", patch->mod->name); @@ -1052,12 +1135,13 @@ int klp_enable_patch(struct klp_patch *patch) return -EINVAL; } - ret = klp_init_patch_early(patch); - if (ret) { + if (!try_module_get(patch->mod)) { mutex_unlock(&klp_mutex); - return ret; + return -ENODEV; } + klp_init_patch_early(patch); + ret = klp_init_patch(patch); if (ret) goto err; @@ -1158,7 +1242,7 @@ static void klp_cleanup_module_patches_limited(struct module *mod, klp_unpatch_object(obj); klp_post_unpatch_callback(obj); - + klp_clear_object_relocs(patch, obj); klp_free_object_loaded(obj); break; } @@ -1175,7 +1259,7 @@ int klp_module_coming(struct module *mod) return -EINVAL; if (!strcmp(mod->name, "vmlinux")) { - pr_err("vmlinux.ko: invalid module name"); + pr_err("vmlinux.ko: invalid module name\n"); return -EINVAL; } diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c index e8029aea67f1..90408500e5a3 100644 --- a/kernel/livepatch/patch.c +++ b/kernel/livepatch/patch.c @@ -49,14 +49,15 @@ static void notrace klp_ftrace_handler(unsigned long ip, ops = container_of(fops, struct klp_ops, fops); + /* + * The ftrace_test_recursion_trylock() will disable preemption, + * which is required for the variant of synchronize_rcu() that is + * used to allow patching functions where RCU is not watching. + * See klp_synchronize_transition() for more details. + */ bit = ftrace_test_recursion_trylock(ip, parent_ip); if (WARN_ON_ONCE(bit < 0)) return; - /* - * A variant of synchronize_rcu() is used to allow patching functions - * where RCU is not watching, see klp_synchronize_transition(). - */ - preempt_disable_notrace(); func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, stack_node); @@ -94,9 +95,9 @@ static void notrace klp_ftrace_handler(unsigned long ip, patch_state = current->patch_state; - WARN_ON_ONCE(patch_state == KLP_UNDEFINED); + WARN_ON_ONCE(patch_state == KLP_TRANSITION_IDLE); - if (patch_state == KLP_UNPATCHED) { + if (patch_state == KLP_TRANSITION_UNPATCHED) { /* * Use the previously patched version of the function. * If no previous patches exist, continue with the @@ -117,26 +118,12 @@ static void notrace klp_ftrace_handler(unsigned long ip, if (func->nop) goto unlock; - klp_arch_set_pc(fregs, (unsigned long)func->new_func); + ftrace_regs_set_instruction_pointer(fregs, (unsigned long)func->new_func); unlock: - preempt_enable_notrace(); ftrace_test_recursion_unlock(bit); } -/* - * Convert a function address into the appropriate ftrace location. - * - * Usually this is just the address of the function, but on some architectures - * it's more complicated so allow them to provide a custom behaviour. - */ -#ifndef klp_get_ftrace_location -static unsigned long klp_get_ftrace_location(unsigned long faddr) -{ - return faddr; -} -#endif - static void klp_unpatch_func(struct klp_func *func) { struct klp_ops *ops; @@ -153,8 +140,7 @@ static void klp_unpatch_func(struct klp_func *func) if (list_is_singular(&ops->func_stack)) { unsigned long ftrace_loc; - ftrace_loc = - klp_get_ftrace_location((unsigned long)func->old_func); + ftrace_loc = ftrace_location((unsigned long)func->old_func); if (WARN_ON(!ftrace_loc)) return; @@ -186,8 +172,7 @@ static int klp_patch_func(struct klp_func *func) if (!ops) { unsigned long ftrace_loc; - ftrace_loc = - klp_get_ftrace_location((unsigned long)func->old_func); + ftrace_loc = ftrace_location((unsigned long)func->old_func); if (!ftrace_loc) { pr_err("failed to find location for function '%s'\n", func->old_name); diff --git a/kernel/livepatch/shadow.c b/kernel/livepatch/shadow.c index e5c9fb295ba9..c2e724d97ddf 100644 --- a/kernel/livepatch/shadow.c +++ b/kernel/livepatch/shadow.c @@ -272,12 +272,12 @@ void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor) EXPORT_SYMBOL_GPL(klp_shadow_free); /** - * klp_shadow_free_all() - detach and free all <*, id> shadow variables + * klp_shadow_free_all() - detach and free all <_, id> shadow variables * @id: data identifier * @dtor: custom callback that can be used to unregister the variable * and/or free data that the shadow variable points to (optional) * - * This function releases the memory for all <*, id> shadow variable + * This function releases the memory for all <_, id> shadow variable * instances, callers should stop referencing them accordingly. */ void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor) @@ -288,7 +288,7 @@ void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor) spin_lock_irqsave(&klp_shadow_lock, flags); - /* Delete all <*, id> from hash */ + /* Delete all <_, id> from hash */ hash_for_each(klp_shadow_hash, i, shadow, node) { if (klp_shadow_match(shadow, shadow->obj, id)) klp_shadow_free_struct(shadow, dtor); diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c index f6310f848f34..ba069459c101 100644 --- a/kernel/livepatch/transition.c +++ b/kernel/livepatch/transition.c @@ -9,23 +9,44 @@ #include <linux/cpu.h> #include <linux/stacktrace.h> +#include <linux/static_call.h> #include "core.h" #include "patch.h" #include "transition.h" -#include "../sched/sched.h" #define MAX_STACK_ENTRIES 100 +static DEFINE_PER_CPU(unsigned long[MAX_STACK_ENTRIES], klp_stack_entries); + #define STACK_ERR_BUF_SIZE 128 #define SIGNALS_TIMEOUT 15 struct klp_patch *klp_transition_patch; -static int klp_target_state = KLP_UNDEFINED; +static int klp_target_state = KLP_TRANSITION_IDLE; static unsigned int klp_signals_cnt; /* + * When a livepatch is in progress, enable klp stack checking in + * cond_resched(). This helps CPU-bound kthreads get patched. + */ +#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) + +#define klp_cond_resched_enable() sched_dynamic_klp_enable() +#define klp_cond_resched_disable() sched_dynamic_klp_disable() + +#else /* !CONFIG_PREEMPT_DYNAMIC || !CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */ + +DEFINE_STATIC_KEY_FALSE(klp_sched_try_switch_key); +EXPORT_SYMBOL(klp_sched_try_switch_key); + +#define klp_cond_resched_enable() static_branch_enable(&klp_sched_try_switch_key) +#define klp_cond_resched_disable() static_branch_disable(&klp_sched_try_switch_key) + +#endif /* CONFIG_PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */ + +/* * This work can be performed periodically to finish patching or unpatching any * "straggler" tasks which failed to transition in the first attempt. */ @@ -75,16 +96,16 @@ static void klp_complete_transition(void) pr_debug("'%s': completing %s transition\n", klp_transition_patch->mod->name, - klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); + klp_target_state == KLP_TRANSITION_PATCHED ? "patching" : "unpatching"); - if (klp_transition_patch->replace && klp_target_state == KLP_PATCHED) { + if (klp_transition_patch->replace && klp_target_state == KLP_TRANSITION_PATCHED) { klp_unpatch_replaced_patches(klp_transition_patch); klp_discard_nops(klp_transition_patch); } - if (klp_target_state == KLP_UNPATCHED) { + if (klp_target_state == KLP_TRANSITION_UNPATCHED) { /* - * All tasks have transitioned to KLP_UNPATCHED so we can now + * All tasks have transitioned to KLP_TRANSITION_UNPATCHED so we can now * remove the new functions from the func_stack. */ klp_unpatch_objects(klp_transition_patch); @@ -102,36 +123,36 @@ static void klp_complete_transition(void) klp_for_each_func(obj, func) func->transition = false; - /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */ - if (klp_target_state == KLP_PATCHED) + /* Prevent klp_ftrace_handler() from seeing KLP_TRANSITION_IDLE state */ + if (klp_target_state == KLP_TRANSITION_PATCHED) klp_synchronize_transition(); read_lock(&tasklist_lock); for_each_process_thread(g, task) { WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); - task->patch_state = KLP_UNDEFINED; + task->patch_state = KLP_TRANSITION_IDLE; } read_unlock(&tasklist_lock); for_each_possible_cpu(cpu) { task = idle_task(cpu); WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); - task->patch_state = KLP_UNDEFINED; + task->patch_state = KLP_TRANSITION_IDLE; } klp_for_each_object(klp_transition_patch, obj) { if (!klp_is_object_loaded(obj)) continue; - if (klp_target_state == KLP_PATCHED) + if (klp_target_state == KLP_TRANSITION_PATCHED) klp_post_patch_callback(obj); - else if (klp_target_state == KLP_UNPATCHED) + else if (klp_target_state == KLP_TRANSITION_UNPATCHED) klp_post_unpatch_callback(obj); } pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name, - klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); + klp_target_state == KLP_TRANSITION_PATCHED ? "patching" : "unpatching"); - klp_target_state = KLP_UNDEFINED; + klp_target_state = KLP_TRANSITION_IDLE; klp_transition_patch = NULL; } @@ -143,13 +164,13 @@ static void klp_complete_transition(void) */ void klp_cancel_transition(void) { - if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED)) + if (WARN_ON_ONCE(klp_target_state != KLP_TRANSITION_PATCHED)) return; pr_debug("'%s': canceling patching transition, going to unpatch\n", klp_transition_patch->mod->name); - klp_target_state = KLP_UNPATCHED; + klp_target_state = KLP_TRANSITION_UNPATCHED; klp_complete_transition(); } @@ -173,8 +194,8 @@ void klp_update_patch_state(struct task_struct *task) * barrier (smp_rmb) for two cases: * * 1) Enforce the order of the TIF_PATCH_PENDING read and the - * klp_target_state read. The corresponding write barrier is in - * klp_init_transition(). + * klp_target_state read. The corresponding write barriers are in + * klp_init_transition() and klp_reverse_transition(). * * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read * of func->transition, if klp_ftrace_handler() is called later on @@ -197,36 +218,36 @@ static int klp_check_stack_func(struct klp_func *func, unsigned long *entries, struct klp_ops *ops; int i; - for (i = 0; i < nr_entries; i++) { - address = entries[i]; + if (klp_target_state == KLP_TRANSITION_UNPATCHED) { + /* + * Check for the to-be-unpatched function + * (the func itself). + */ + func_addr = (unsigned long)func->new_func; + func_size = func->new_size; + } else { + /* + * Check for the to-be-patched function + * (the previous func). + */ + ops = klp_find_ops(func->old_func); - if (klp_target_state == KLP_UNPATCHED) { - /* - * Check for the to-be-unpatched function - * (the func itself). - */ - func_addr = (unsigned long)func->new_func; - func_size = func->new_size; + if (list_is_singular(&ops->func_stack)) { + /* original function */ + func_addr = (unsigned long)func->old_func; + func_size = func->old_size; } else { - /* - * Check for the to-be-patched function - * (the previous func). - */ - ops = klp_find_ops(func->old_func); - - if (list_is_singular(&ops->func_stack)) { - /* original function */ - func_addr = (unsigned long)func->old_func; - func_size = func->old_size; - } else { - /* previously patched function */ - struct klp_func *prev; - - prev = list_next_entry(func, stack_node); - func_addr = (unsigned long)prev->new_func; - func_size = prev->new_size; - } + /* previously patched function */ + struct klp_func *prev; + + prev = list_next_entry(func, stack_node); + func_addr = (unsigned long)prev->new_func; + func_size = prev->new_size; } + } + + for (i = 0; i < nr_entries; i++) { + address = entries[i]; if (address >= func_addr && address < func_addr + func_size) return -EAGAIN; @@ -239,20 +260,19 @@ static int klp_check_stack_func(struct klp_func *func, unsigned long *entries, * Determine whether it's safe to transition the task to the target patch state * by looking for any to-be-patched or to-be-unpatched functions on its stack. */ -static int klp_check_stack(struct task_struct *task, char *err_buf) +static int klp_check_stack(struct task_struct *task, const char **oldname) { - static unsigned long entries[MAX_STACK_ENTRIES]; + unsigned long *entries = this_cpu_ptr(klp_stack_entries); struct klp_object *obj; struct klp_func *func; int ret, nr_entries; - ret = stack_trace_save_tsk_reliable(task, entries, ARRAY_SIZE(entries)); - if (ret < 0) { - snprintf(err_buf, STACK_ERR_BUF_SIZE, - "%s: %s:%d has an unreliable stack\n", - __func__, task->comm, task->pid); - return ret; - } + /* Protect 'klp_stack_entries' */ + lockdep_assert_preemption_disabled(); + + ret = stack_trace_save_tsk_reliable(task, entries, MAX_STACK_ENTRIES); + if (ret < 0) + return -EINVAL; nr_entries = ret; klp_for_each_object(klp_transition_patch, obj) { @@ -261,11 +281,8 @@ static int klp_check_stack(struct task_struct *task, char *err_buf) klp_for_each_func(obj, func) { ret = klp_check_stack_func(func, entries, nr_entries); if (ret) { - snprintf(err_buf, STACK_ERR_BUF_SIZE, - "%s: %s:%d is sleeping on function %s\n", - __func__, task->comm, task->pid, - func->old_name); - return ret; + *oldname = func->old_name; + return -EADDRINUSE; } } } @@ -273,6 +290,22 @@ static int klp_check_stack(struct task_struct *task, char *err_buf) return 0; } +static int klp_check_and_switch_task(struct task_struct *task, void *arg) +{ + int ret; + + if (task_curr(task) && task != current) + return -EBUSY; + + ret = klp_check_stack(task, arg); + if (ret) + return ret; + + clear_tsk_thread_flag(task, TIF_PATCH_PENDING); + task->patch_state = klp_target_state; + return 0; +} + /* * Try to safely switch a task to the target patch state. If it's currently * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or @@ -280,13 +313,8 @@ static int klp_check_stack(struct task_struct *task, char *err_buf) */ static bool klp_try_switch_task(struct task_struct *task) { - static char err_buf[STACK_ERR_BUF_SIZE]; - struct rq *rq; - struct rq_flags flags; + const char *old_name; int ret; - bool success = false; - - err_buf[0] = '\0'; /* check if this task has already switched over */ if (task->patch_state == klp_target_state) @@ -304,37 +332,74 @@ static bool klp_try_switch_task(struct task_struct *task) * functions. If all goes well, switch the task to the target patch * state. */ - rq = task_rq_lock(task, &flags); + if (task == current) + ret = klp_check_and_switch_task(current, &old_name); + else + ret = task_call_func(task, klp_check_and_switch_task, &old_name); + + switch (ret) { + case 0: /* success */ + break; - if (task_running(rq, task) && task != current) { - snprintf(err_buf, STACK_ERR_BUF_SIZE, - "%s: %s:%d is running\n", __func__, task->comm, - task->pid); - goto done; + case -EBUSY: /* klp_check_and_switch_task() */ + pr_debug("%s: %s:%d is running\n", + __func__, task->comm, task->pid); + break; + case -EINVAL: /* klp_check_and_switch_task() */ + pr_debug("%s: %s:%d has an unreliable stack\n", + __func__, task->comm, task->pid); + break; + case -EADDRINUSE: /* klp_check_and_switch_task() */ + pr_debug("%s: %s:%d is sleeping on function %s\n", + __func__, task->comm, task->pid, old_name); + break; + + default: + pr_debug("%s: Unknown error code (%d) when trying to switch %s:%d\n", + __func__, ret, task->comm, task->pid); + break; } - ret = klp_check_stack(task, err_buf); - if (ret) - goto done; + return !ret; +} - success = true; +void __klp_sched_try_switch(void) +{ + if (likely(!klp_patch_pending(current))) + return; - clear_tsk_thread_flag(task, TIF_PATCH_PENDING); - task->patch_state = klp_target_state; + /* + * This function is called from cond_resched() which is called in many + * places throughout the kernel. Using the klp_mutex here might + * deadlock. + * + * Instead, disable preemption to prevent racing with other callers of + * klp_try_switch_task(). Thanks to task_call_func() they won't be + * able to switch this task while it's running. + */ + preempt_disable(); -done: - task_rq_unlock(rq, task, &flags); + /* + * Make sure current didn't get patched between the above check and + * preempt_disable(). + */ + if (unlikely(!klp_patch_pending(current))) + goto out; /* - * Due to console deadlock issues, pr_debug() can't be used while - * holding the task rq lock. Instead we have to use a temporary buffer - * and print the debug message after releasing the lock. + * Enforce the order of the TIF_PATCH_PENDING read above and the + * klp_target_state read in klp_try_switch_task(). The corresponding + * write barriers are in klp_init_transition() and + * klp_reverse_transition(). */ - if (err_buf[0] != '\0') - pr_debug("%s", err_buf); + smp_rmb(); - return success; + klp_try_switch_task(current); + +out: + preempt_enable(); } +EXPORT_SYMBOL(__klp_sched_try_switch); /* * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set. @@ -369,9 +434,7 @@ static void klp_send_signals(void) * Send fake signal to all non-kthread tasks which are * still not migrated. */ - spin_lock_irq(&task->sighand->siglock); - signal_wake_up(task, 0); - spin_unlock_irq(&task->sighand->siglock); + set_notify_signal(task); } } read_unlock(&tasklist_lock); @@ -392,7 +455,7 @@ void klp_try_complete_transition(void) struct klp_patch *patch; bool complete = true; - WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED); + WARN_ON_ONCE(klp_target_state == KLP_TRANSITION_IDLE); /* * Try to switch the tasks to the target patch state by walking their @@ -412,19 +475,22 @@ void klp_try_complete_transition(void) /* * Ditto for the idle "swapper" tasks. */ - get_online_cpus(); + cpus_read_lock(); for_each_possible_cpu(cpu) { task = idle_task(cpu); if (cpu_online(cpu)) { - if (!klp_try_switch_task(task)) + if (!klp_try_switch_task(task)) { complete = false; + /* Make idle task go through the main loop. */ + wake_up_if_idle(cpu); + } } else if (task->patch_state != klp_target_state) { /* offline idle tasks can be switched immediately */ clear_tsk_thread_flag(task, TIF_PATCH_PENDING); task->patch_state = klp_target_state; } } - put_online_cpus(); + cpus_read_unlock(); if (!complete) { if (klp_signals_cnt && !(klp_signals_cnt % SIGNALS_TIMEOUT)) @@ -441,7 +507,8 @@ void klp_try_complete_transition(void) return; } - /* we're done, now cleanup the data structures */ + /* Done! Now cleanup the data structures. */ + klp_cond_resched_disable(); patch = klp_transition_patch; klp_complete_transition(); @@ -465,11 +532,11 @@ void klp_start_transition(void) struct task_struct *g, *task; unsigned int cpu; - WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED); + WARN_ON_ONCE(klp_target_state == KLP_TRANSITION_IDLE); pr_notice("'%s': starting %s transition\n", klp_transition_patch->mod->name, - klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); + klp_target_state == KLP_TRANSITION_PATCHED ? "patching" : "unpatching"); /* * Mark all normal tasks as needing a patch state update. They'll @@ -493,6 +560,8 @@ void klp_start_transition(void) set_tsk_thread_flag(task, TIF_PATCH_PENDING); } + klp_cond_resched_enable(); + klp_signals_cnt = 0; } @@ -509,7 +578,7 @@ void klp_init_transition(struct klp_patch *patch, int state) struct klp_func *func; int initial_state = !state; - WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED); + WARN_ON_ONCE(klp_target_state != KLP_TRANSITION_IDLE); klp_transition_patch = patch; @@ -520,7 +589,7 @@ void klp_init_transition(struct klp_patch *patch, int state) klp_target_state = state; pr_debug("'%s': initializing %s transition\n", patch->mod->name, - klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); + klp_target_state == KLP_TRANSITION_PATCHED ? "patching" : "unpatching"); /* * Initialize all tasks to the initial patch state to prepare them for @@ -528,7 +597,7 @@ void klp_init_transition(struct klp_patch *patch, int state) */ read_lock(&tasklist_lock); for_each_process_thread(g, task) { - WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED); + WARN_ON_ONCE(task->patch_state != KLP_TRANSITION_IDLE); task->patch_state = initial_state; } read_unlock(&tasklist_lock); @@ -538,18 +607,19 @@ void klp_init_transition(struct klp_patch *patch, int state) */ for_each_possible_cpu(cpu) { task = idle_task(cpu); - WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED); + WARN_ON_ONCE(task->patch_state != KLP_TRANSITION_IDLE); task->patch_state = initial_state; } /* * Enforce the order of the task->patch_state initializations and the * func->transition updates to ensure that klp_ftrace_handler() doesn't - * see a func in transition with a task->patch_state of KLP_UNDEFINED. + * see a func in transition with a task->patch_state of KLP_TRANSITION_IDLE. * * Also enforce the order of the klp_target_state write and future - * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() doesn't - * set a task->patch_state to KLP_UNDEFINED. + * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() and + * __klp_sched_try_switch() don't set a task->patch_state to + * KLP_TRANSITION_IDLE. */ smp_wmb(); @@ -582,17 +652,13 @@ void klp_reverse_transition(void) pr_debug("'%s': reversing transition from %s\n", klp_transition_patch->mod->name, - klp_target_state == KLP_PATCHED ? "patching to unpatching" : + klp_target_state == KLP_TRANSITION_PATCHED ? "patching to unpatching" : "unpatching to patching"); - klp_transition_patch->enabled = !klp_transition_patch->enabled; - - klp_target_state = !klp_target_state; - /* * Clear all TIF_PATCH_PENDING flags to prevent races caused by - * klp_update_patch_state() running in parallel with - * klp_start_transition(). + * klp_update_patch_state() or __klp_sched_try_switch() running in + * parallel with the reverse transition. */ read_lock(&tasklist_lock); for_each_process_thread(g, task) @@ -602,18 +668,51 @@ void klp_reverse_transition(void) for_each_possible_cpu(cpu) clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING); - /* Let any remaining calls to klp_update_patch_state() complete */ + /* + * Make sure all existing invocations of klp_update_patch_state() and + * __klp_sched_try_switch() see the cleared TIF_PATCH_PENDING before + * starting the reverse transition. + */ klp_synchronize_transition(); + /* + * All patching has stopped, now re-initialize the global variables to + * prepare for the reverse transition. + */ + klp_transition_patch->enabled = !klp_transition_patch->enabled; + klp_target_state = !klp_target_state; + + /* + * Enforce the order of the klp_target_state write and the + * TIF_PATCH_PENDING writes in klp_start_transition() to ensure + * klp_update_patch_state() and __klp_sched_try_switch() don't set + * task->patch_state to the wrong value. + */ + smp_wmb(); + klp_start_transition(); } /* Called from copy_process() during fork */ void klp_copy_process(struct task_struct *child) { - child->patch_state = current->patch_state; - /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */ + /* + * The parent process may have gone through a KLP transition since + * the thread flag was copied in setup_thread_stack earlier. Bring + * the task flag up to date with the parent here. + * + * The operation is serialized against all klp_*_transition() + * operations by the tasklist_lock. The only exceptions are + * klp_update_patch_state(current) and __klp_sched_try_switch(), but we + * cannot race with them because we are current. + */ + if (test_tsk_thread_flag(current, TIF_PATCH_PENDING)) + set_tsk_thread_flag(child, TIF_PATCH_PENDING); + else + clear_tsk_thread_flag(child, TIF_PATCH_PENDING); + + child->patch_state = current->patch_state; } /* @@ -641,6 +740,13 @@ void klp_force_transition(void) for_each_possible_cpu(cpu) klp_update_patch_state(idle_task(cpu)); - klp_for_each_patch(patch) - patch->forced = true; + /* Set forced flag for patches being removed. */ + if (klp_target_state == KLP_TRANSITION_UNPATCHED) + klp_transition_patch->forced = true; + else if (klp_transition_patch->replace) { + klp_for_each_patch(patch) { + if (patch != klp_transition_patch) + patch->forced = true; + } + } } |