summaryrefslogtreecommitdiff
path: root/kernel/livepatch
diff options
context:
space:
mode:
authorPetr Mladek <pmladek@suse.com>2019-01-09 13:43:19 +0100
committerJiri Kosina <jkosina@suse.cz>2019-01-11 20:51:23 +0100
commit19514910d021c93c7823ec32067e6b7dea224f0f (patch)
tree3df053de0eb62fec2557e24bd807cae328d4f21a /kernel/livepatch
parent1686cc1a31f45a3fd090e5d0c6fce777422e13fa (diff)
livepatch: Change unsigned long old_addr -> void *old_func in struct klp_func
The address of the to be patched function and new function is stored in struct klp_func as: void *new_func; unsigned long old_addr; The different naming scheme and type are derived from the way the addresses are set. @old_addr is assigned at runtime using kallsyms-based search. @new_func is statically initialized, for example: static struct klp_func funcs[] = { { .old_name = "cmdline_proc_show", .new_func = livepatch_cmdline_proc_show, }, { } }; This patch changes unsigned long old_addr -> void *old_func. It removes some confusion when these address are later used in the code. It is motivated by a followup patch that adds special NOP struct klp_func where we want to assign func->new_func = func->old_addr respectively func->new_func = func->old_func. This patch does not modify the existing behavior. Suggested-by: Josh Poimboeuf <jpoimboe@redhat.com> Signed-off-by: Petr Mladek <pmladek@suse.com> Acked-by: Miroslav Benes <mbenes@suse.cz> Acked-by: Joe Lawrence <joe.lawrence@redhat.com> Acked-by: Alice Ferrazzi <alice.ferrazzi@gmail.com> Acked-by: Josh Poimboeuf <jpoimboe@redhat.com> Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Diffstat (limited to 'kernel/livepatch')
-rw-r--r--kernel/livepatch/core.c6
-rw-r--r--kernel/livepatch/patch.c18
-rw-r--r--kernel/livepatch/patch.h4
-rw-r--r--kernel/livepatch/transition.c4
4 files changed, 17 insertions, 15 deletions
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 5b77a7314e01..cb59c7fb94cb 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -648,7 +648,7 @@ static void klp_free_object_loaded(struct klp_object *obj)
obj->mod = NULL;
klp_for_each_func(obj, func)
- func->old_addr = 0;
+ func->old_func = NULL;
}
/*
@@ -721,11 +721,11 @@ static int klp_init_object_loaded(struct klp_patch *patch,
klp_for_each_func(obj, func) {
ret = klp_find_object_symbol(obj->name, func->old_name,
func->old_sympos,
- &func->old_addr);
+ (unsigned long *)&func->old_func);
if (ret)
return ret;
- ret = kallsyms_lookup_size_offset(func->old_addr,
+ ret = kallsyms_lookup_size_offset((unsigned long)func->old_func,
&func->old_size, NULL);
if (!ret) {
pr_err("kallsyms size lookup failed for '%s'\n",
diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c
index 7702cb4064fc..825022d70912 100644
--- a/kernel/livepatch/patch.c
+++ b/kernel/livepatch/patch.c
@@ -34,7 +34,7 @@
static LIST_HEAD(klp_ops);
-struct klp_ops *klp_find_ops(unsigned long old_addr)
+struct klp_ops *klp_find_ops(void *old_func)
{
struct klp_ops *ops;
struct klp_func *func;
@@ -42,7 +42,7 @@ struct klp_ops *klp_find_ops(unsigned long old_addr)
list_for_each_entry(ops, &klp_ops, node) {
func = list_first_entry(&ops->func_stack, struct klp_func,
stack_node);
- if (func->old_addr == old_addr)
+ if (func->old_func == old_func)
return ops;
}
@@ -142,17 +142,18 @@ static void klp_unpatch_func(struct klp_func *func)
if (WARN_ON(!func->patched))
return;
- if (WARN_ON(!func->old_addr))
+ if (WARN_ON(!func->old_func))
return;
- ops = klp_find_ops(func->old_addr);
+ ops = klp_find_ops(func->old_func);
if (WARN_ON(!ops))
return;
if (list_is_singular(&ops->func_stack)) {
unsigned long ftrace_loc;
- ftrace_loc = klp_get_ftrace_location(func->old_addr);
+ ftrace_loc =
+ klp_get_ftrace_location((unsigned long)func->old_func);
if (WARN_ON(!ftrace_loc))
return;
@@ -174,17 +175,18 @@ static int klp_patch_func(struct klp_func *func)
struct klp_ops *ops;
int ret;
- if (WARN_ON(!func->old_addr))
+ if (WARN_ON(!func->old_func))
return -EINVAL;
if (WARN_ON(func->patched))
return -EINVAL;
- ops = klp_find_ops(func->old_addr);
+ ops = klp_find_ops(func->old_func);
if (!ops) {
unsigned long ftrace_loc;
- ftrace_loc = klp_get_ftrace_location(func->old_addr);
+ ftrace_loc =
+ klp_get_ftrace_location((unsigned long)func->old_func);
if (!ftrace_loc) {
pr_err("failed to find location for function '%s'\n",
func->old_name);
diff --git a/kernel/livepatch/patch.h b/kernel/livepatch/patch.h
index e72d8250d04b..a9b16e513656 100644
--- a/kernel/livepatch/patch.h
+++ b/kernel/livepatch/patch.h
@@ -10,7 +10,7 @@
* struct klp_ops - structure for tracking registered ftrace ops structs
*
* A single ftrace_ops is shared between all enabled replacement functions
- * (klp_func structs) which have the same old_addr. This allows the switch
+ * (klp_func structs) which have the same old_func. This allows the switch
* between function versions to happen instantaneously by updating the klp_ops
* struct's func_stack list. The winner is the klp_func at the top of the
* func_stack (front of the list).
@@ -25,7 +25,7 @@ struct klp_ops {
struct ftrace_ops fops;
};
-struct klp_ops *klp_find_ops(unsigned long old_addr);
+struct klp_ops *klp_find_ops(void *old_func);
int klp_patch_object(struct klp_object *obj);
void klp_unpatch_object(struct klp_object *obj);
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
index 304d5eb8a98c..f27a378ad5e1 100644
--- a/kernel/livepatch/transition.c
+++ b/kernel/livepatch/transition.c
@@ -224,11 +224,11 @@ static int klp_check_stack_func(struct klp_func *func,
* Check for the to-be-patched function
* (the previous func).
*/
- ops = klp_find_ops(func->old_addr);
+ ops = klp_find_ops(func->old_func);
if (list_is_singular(&ops->func_stack)) {
/* original function */
- func_addr = func->old_addr;
+ func_addr = (unsigned long)func->old_func;
func_size = func->old_size;
} else {
/* previously patched function */