From 926a59b1dfe2580e1a00bb2ba1664e5472077cbd Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 27 May 2015 11:09:35 +0930 Subject: module: Annotate module version magic Due to the new lockdep checks in the coming patch, we go: [ 9.759380] ------------[ cut here ]------------ [ 9.759389] WARNING: CPU: 31 PID: 597 at ../kernel/module.c:216 each_symbol_section+0x121/0x130() [ 9.759391] Modules linked in: [ 9.759393] CPU: 31 PID: 597 Comm: modprobe Not tainted 4.0.0-rc1+ #65 [ 9.759393] Hardware name: Intel Corporation S2600GZ/S2600GZ, BIOS SE5C600.86B.02.02.0002.122320131210 12/23/2013 [ 9.759396] ffffffff817d8676 ffff880424567ca8 ffffffff8157e98b 0000000000000001 [ 9.759398] 0000000000000000 ffff880424567ce8 ffffffff8105fbc7 ffff880424567cd8 [ 9.759400] 0000000000000000 ffffffff810ec160 ffff880424567d40 0000000000000000 [ 9.759400] Call Trace: [ 9.759407] [] dump_stack+0x4f/0x7b [ 9.759410] [] warn_slowpath_common+0x97/0xe0 [ 9.759412] [] ? section_objs+0x60/0x60 [ 9.759414] [] warn_slowpath_null+0x1a/0x20 [ 9.759415] [] each_symbol_section+0x121/0x130 [ 9.759417] [] find_symbol+0x31/0x70 [ 9.759420] [] load_module+0x20f/0x2660 [ 9.759422] [] ? __do_page_fault+0x190/0x4e0 [ 9.759426] [] ? retint_restore_args+0x13/0x13 [ 9.759427] [] ? retint_restore_args+0x13/0x13 [ 9.759433] [] ? trace_hardirqs_on_caller+0x11d/0x1e0 [ 9.759437] [] ? trace_hardirqs_on_thunk+0x3a/0x3f [ 9.759439] [] ? retint_restore_args+0x13/0x13 [ 9.759441] [] SyS_init_module+0xce/0x100 [ 9.759443] [] system_call_fastpath+0x12/0x17 [ 9.759445] ---[ end trace 9294429076a9c644 ]--- As per the comment this site should be fine, but lets wrap it in preempt_disable() anyhow to placate lockdep. Cc: Rusty Russell Acked-by: Paul E. McKenney Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Rusty Russell --- kernel/module.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index 42a1d2afb217..1150d5239205 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -1169,11 +1169,17 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs, { const unsigned long *crc; - /* Since this should be found in kernel (which can't be removed), - * no locking is necessary. */ + /* + * Since this should be found in kernel (which can't be removed), no + * locking is necessary -- use preempt_disable() to placate lockdep. + */ + preempt_disable(); if (!find_symbol(VMLINUX_SYMBOL_STR(module_layout), NULL, - &crc, true, false)) + &crc, true, false)) { + preempt_enable(); BUG(); + } + preempt_enable(); return check_version(sechdrs, versindex, VMLINUX_SYMBOL_STR(module_layout), mod, crc, NULL); -- cgit From bed831f9a251968272dae10a83b512c7db256ef0 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 27 May 2015 11:09:35 +0930 Subject: module, jump_label: Fix module locking As per the module core lockdep annotations in the coming patch: [ 18.034047] ---[ end trace 9294429076a9c673 ]--- [ 18.047760] Hardware name: Intel Corporation S2600GZ/S2600GZ, BIOS SE5C600.86B.02.02.0002.122320131210 12/23/2013 [ 18.059228] ffffffff817d8676 ffff880036683c38 ffffffff8157e98b 0000000000000001 [ 18.067541] 0000000000000000 ffff880036683c78 ffffffff8105fbc7 ffff880036683c68 [ 18.075851] ffffffffa0046b08 0000000000000000 ffffffffa0046d00 ffffffffa0046cc8 [ 18.084173] Call Trace: [ 18.086906] [] dump_stack+0x4f/0x7b [ 18.092649] [] warn_slowpath_common+0x97/0xe0 [ 18.099361] [] warn_slowpath_null+0x1a/0x20 [ 18.105880] [] __module_address+0x1d2/0x1e0 [ 18.112400] [] jump_label_module_notify+0x143/0x1e0 [ 18.119710] [] notifier_call_chain+0x4f/0x70 [ 18.126326] [] __blocking_notifier_call_chain+0x5e/0x90 [ 18.134009] [] blocking_notifier_call_chain+0x16/0x20 [ 18.141490] [] load_module+0x1b50/0x2660 [ 18.147720] [] SyS_init_module+0xce/0x100 [ 18.154045] [] system_call_fastpath+0x12/0x17 [ 18.160748] ---[ end trace 9294429076a9c674 ]--- Jump labels is not doing it right; fix this. Cc: Rusty Russell Cc: Jason Baron Acked-by: Paul E. McKenney Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Rusty Russell --- kernel/jump_label.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 9019f15deab2..52ebaca1b9fc 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -302,7 +302,7 @@ static int jump_label_add_module(struct module *mod) continue; key = iterk; - if (__module_address(iter->key) == mod) { + if (within_module(iter->key, mod)) { /* * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH. */ @@ -339,7 +339,7 @@ static void jump_label_del_module(struct module *mod) key = (struct static_key *)(unsigned long)iter->key; - if (__module_address(iter->key) == mod) + if (within_module(iter->key, mod)) continue; prev = &key->next; @@ -443,14 +443,16 @@ static void jump_label_update(struct static_key *key, int enable) { struct jump_entry *stop = __stop___jump_table; struct jump_entry *entry = jump_label_get_entries(key); - #ifdef CONFIG_MODULES - struct module *mod = __module_address((unsigned long)key); + struct module *mod; __jump_label_mod_update(key, enable); + preempt_disable(); + mod = __module_address((unsigned long)key); if (mod) stop = mod->jump_entries + mod->num_jump_entries; + preempt_enable(); #endif /* if there are no users, entry can be NULL */ if (entry) -- cgit From 0be964be0d45084245673c971d72a4b51690231d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 27 May 2015 11:09:35 +0930 Subject: module: Sanitize RCU usage and locking Currently the RCU usage in module is an inconsistent mess of RCU and RCU-sched, this is broken for CONFIG_PREEMPT where synchronize_rcu() does not imply synchronize_sched(). Most usage sites use preempt_{dis,en}able() which is RCU-sched, but (most of) the modification sites use synchronize_rcu(). With the exception of the module bug list, which actually uses RCU. Convert everything over to RCU-sched. Furthermore add lockdep asserts to all sites, because it's not at all clear to me the required locking is observed, esp. on exported functions. Cc: Rusty Russell Acked-by: "Paul E. McKenney" Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Rusty Russell --- kernel/module.c | 40 ++++++++++++++++++++++++++++++++-------- 1 file changed, 32 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index 1150d5239205..a15899e00ca9 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -105,6 +105,22 @@ static LIST_HEAD(modules); struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */ #endif /* CONFIG_KGDB_KDB */ +static void module_assert_mutex(void) +{ + lockdep_assert_held(&module_mutex); +} + +static void module_assert_mutex_or_preempt(void) +{ +#ifdef CONFIG_LOCKDEP + if (unlikely(!debug_locks)) + return; + + WARN_ON(!rcu_read_lock_sched_held() && + !lockdep_is_held(&module_mutex)); +#endif +} + #ifdef CONFIG_MODULE_SIG #ifdef CONFIG_MODULE_SIG_FORCE static bool sig_enforce = true; @@ -318,6 +334,8 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr, #endif }; + module_assert_mutex_or_preempt(); + if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data)) return true; @@ -457,6 +475,8 @@ static struct module *find_module_all(const char *name, size_t len, { struct module *mod; + module_assert_mutex(); + list_for_each_entry(mod, &modules, list) { if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) continue; @@ -1860,8 +1880,8 @@ static void free_module(struct module *mod) list_del_rcu(&mod->list); /* Remove this module from bug list, this uses list_del_rcu */ module_bug_cleanup(mod); - /* Wait for RCU synchronizing before releasing mod->list and buglist. */ - synchronize_rcu(); + /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */ + synchronize_sched(); mutex_unlock(&module_mutex); /* This may be NULL, but that's OK */ @@ -3133,11 +3153,11 @@ static noinline int do_init_module(struct module *mod) mod->init_text_size = 0; /* * We want to free module_init, but be aware that kallsyms may be - * walking this with preempt disabled. In all the failure paths, - * we call synchronize_rcu/synchronize_sched, but we don't want - * to slow down the success path, so use actual RCU here. + * walking this with preempt disabled. In all the failure paths, we + * call synchronize_sched(), but we don't want to slow down the success + * path, so use actual RCU here. */ - call_rcu(&freeinit->rcu, do_free_init); + call_rcu_sched(&freeinit->rcu, do_free_init); mutex_unlock(&module_mutex); wake_up_all(&module_wq); @@ -3395,8 +3415,8 @@ static int load_module(struct load_info *info, const char __user *uargs, /* Unlink carefully: kallsyms could be walking list. */ list_del_rcu(&mod->list); wake_up_all(&module_wq); - /* Wait for RCU synchronizing before releasing mod->list. */ - synchronize_rcu(); + /* Wait for RCU-sched synchronizing before releasing mod->list. */ + synchronize_sched(); mutex_unlock(&module_mutex); free_module: /* Free lock-classes; relies on the preceding sync_rcu() */ @@ -3663,6 +3683,8 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, unsigned int i; int ret; + module_assert_mutex(); + list_for_each_entry(mod, &modules, list) { if (mod->state == MODULE_STATE_UNFORMED) continue; @@ -3837,6 +3859,8 @@ struct module *__module_address(unsigned long addr) if (addr < module_addr_min || addr > module_addr_max) return NULL; + module_assert_mutex_or_preempt(); + list_for_each_entry_rcu(mod, &modules, list) { if (mod->state == MODULE_STATE_UNFORMED) continue; -- cgit From 6695b92a60bc7160c92d6dc5b17cc79673017c2f Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 27 May 2015 11:09:36 +0930 Subject: seqlock: Better document raw_write_seqcount_latch() Improve the documentation of the latch technique as used in the current timekeeping code, such that it can be readily employed elsewhere. Borrow from the comments in timekeeping and replace those with a reference to this more generic comment. Cc: Andrea Arcangeli Cc: David Woodhouse Cc: Rik van Riel Cc: "Paul E. McKenney" Cc: Oleg Nesterov Reviewed-by: Mathieu Desnoyers Acked-by: Michel Lespinasse Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Rusty Russell --- kernel/time/timekeeping.c | 27 +-------------------------- 1 file changed, 1 insertion(+), 26 deletions(-) (limited to 'kernel') diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 946acb72179f..cbfedddbf0cb 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -330,32 +330,7 @@ static inline s64 timekeeping_get_ns(struct tk_read_base *tkr) * We want to use this from any context including NMI and tracing / * instrumenting the timekeeping code itself. * - * So we handle this differently than the other timekeeping accessor - * functions which retry when the sequence count has changed. The - * update side does: - * - * smp_wmb(); <- Ensure that the last base[1] update is visible - * tkf->seq++; - * smp_wmb(); <- Ensure that the seqcount update is visible - * update(tkf->base[0], tkr); - * smp_wmb(); <- Ensure that the base[0] update is visible - * tkf->seq++; - * smp_wmb(); <- Ensure that the seqcount update is visible - * update(tkf->base[1], tkr); - * - * The reader side does: - * - * do { - * seq = tkf->seq; - * smp_rmb(); - * idx = seq & 0x01; - * now = now(tkf->base[idx]); - * smp_rmb(); - * } while (seq != tkf->seq) - * - * As long as we update base[0] readers are forced off to - * base[1]. Once base[0] is updated readers are redirected to base[0] - * and the base[1] update takes place. + * Employ the latch technique; see @raw_write_seqcount_latch. * * So if a NMI hits the update of base[0] then it will use base[1] * which is still consistent. In the worst case this can result is a -- cgit From 7fc26327b75685f37f58d64bdb061460f834f80d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 27 May 2015 11:09:36 +0930 Subject: seqlock: Introduce raw_read_seqcount_latch() Because with latches there is a strict data dependency on the seq load we can avoid the rmb in favour of a read_barrier_depends. Suggested-by: Ingo Molnar Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Rusty Russell --- kernel/time/timekeeping.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index cbfedddbf0cb..266dafe8f015 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -393,7 +393,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf) u64 now; do { - seq = raw_read_seqcount(&tkf->seq); + seq = raw_read_seqcount_latch(&tkf->seq); tkr = tkf->base + (seq & 0x01); now = ktime_to_ns(tkr->base) + timekeeping_get_ns(tkr); } while (read_seqcount_retry(&tkf->seq, seq)); -- cgit From 93c2e105f6bcee231c951ba0e56e84505c4b0483 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 27 May 2015 11:09:37 +0930 Subject: module: Optimize __module_address() using a latched RB-tree Currently __module_address() is using a linear search through all modules in order to find the module corresponding to the provided address. With a lot of modules this can take a lot of time. One of the users of this is kernel_text_address() which is employed in many stack unwinders; which in turn are used by perf-callchain and ftrace (possibly from NMI context). So by optimizing __module_address() we optimize many stack unwinders which are used by both perf and tracing in performance sensitive code. Cc: Rusty Russell Cc: Steven Rostedt Cc: Mathieu Desnoyers Cc: Oleg Nesterov Cc: "Paul E. McKenney" Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Rusty Russell --- kernel/module.c | 115 +++++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 110 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index a15899e00ca9..e0db5c31cb53 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -101,6 +101,108 @@ DEFINE_MUTEX(module_mutex); EXPORT_SYMBOL_GPL(module_mutex); static LIST_HEAD(modules); + +/* + * Use a latched RB-tree for __module_address(); this allows us to use + * RCU-sched lookups of the address from any context. + * + * Because modules have two address ranges: init and core, we need two + * latch_tree_nodes entries. Therefore we need the back-pointer from + * mod_tree_node. + * + * Because init ranges are short lived we mark them unlikely and have placed + * them outside the critical cacheline in struct module. + */ + +static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n) +{ + struct mod_tree_node *mtn = container_of(n, struct mod_tree_node, node); + struct module *mod = mtn->mod; + + if (unlikely(mtn == &mod->mtn_init)) + return (unsigned long)mod->module_init; + + return (unsigned long)mod->module_core; +} + +static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n) +{ + struct mod_tree_node *mtn = container_of(n, struct mod_tree_node, node); + struct module *mod = mtn->mod; + + if (unlikely(mtn == &mod->mtn_init)) + return (unsigned long)mod->init_size; + + return (unsigned long)mod->core_size; +} + +static __always_inline bool +mod_tree_less(struct latch_tree_node *a, struct latch_tree_node *b) +{ + return __mod_tree_val(a) < __mod_tree_val(b); +} + +static __always_inline int +mod_tree_comp(void *key, struct latch_tree_node *n) +{ + unsigned long val = (unsigned long)key; + unsigned long start, end; + + start = __mod_tree_val(n); + if (val < start) + return -1; + + end = start + __mod_tree_size(n); + if (val >= end) + return 1; + + return 0; +} + +static const struct latch_tree_ops mod_tree_ops = { + .less = mod_tree_less, + .comp = mod_tree_comp, +}; + +static struct latch_tree_root mod_tree __cacheline_aligned; + +/* + * These modifications: insert, remove_init and remove; are serialized by the + * module_mutex. + */ +static void mod_tree_insert(struct module *mod) +{ + mod->mtn_core.mod = mod; + mod->mtn_init.mod = mod; + + latch_tree_insert(&mod->mtn_core.node, &mod_tree, &mod_tree_ops); + if (mod->init_size) + latch_tree_insert(&mod->mtn_init.node, &mod_tree, &mod_tree_ops); +} + +static void mod_tree_remove_init(struct module *mod) +{ + if (mod->init_size) + latch_tree_erase(&mod->mtn_init.node, &mod_tree, &mod_tree_ops); +} + +static void mod_tree_remove(struct module *mod) +{ + latch_tree_erase(&mod->mtn_core.node, &mod_tree, &mod_tree_ops); + mod_tree_remove_init(mod); +} + +static struct module *mod_tree_find(unsigned long addr) +{ + struct latch_tree_node *ltn; + + ltn = latch_tree_find((void *)addr, &mod_tree, &mod_tree_ops); + if (!ltn) + return NULL; + + return container_of(ltn, struct mod_tree_node, node)->mod; +} + #ifdef CONFIG_KGDB_KDB struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */ #endif /* CONFIG_KGDB_KDB */ @@ -1878,6 +1980,7 @@ static void free_module(struct module *mod) mutex_lock(&module_mutex); /* Unlink carefully: kallsyms could be walking list. */ list_del_rcu(&mod->list); + mod_tree_remove(mod); /* Remove this module from bug list, this uses list_del_rcu */ module_bug_cleanup(mod); /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */ @@ -3145,6 +3248,7 @@ static noinline int do_init_module(struct module *mod) mod->symtab = mod->core_symtab; mod->strtab = mod->core_strtab; #endif + mod_tree_remove_init(mod); unset_module_init_ro_nx(mod); module_arch_freeing_init(mod); mod->module_init = NULL; @@ -3215,6 +3319,7 @@ again: goto out; } list_add_rcu(&mod->list, &modules); + mod_tree_insert(mod); err = 0; out: @@ -3861,13 +3966,13 @@ struct module *__module_address(unsigned long addr) module_assert_mutex_or_preempt(); - list_for_each_entry_rcu(mod, &modules, list) { + mod = mod_tree_find(addr); + if (mod) { + BUG_ON(!within_module(addr, mod)); if (mod->state == MODULE_STATE_UNFORMED) - continue; - if (within_module(addr, mod)) - return mod; + mod = NULL; } - return NULL; + return mod; } EXPORT_SYMBOL_GPL(__module_address); -- cgit From 6c9692e2d6a2206d8fd75ea247daa47fb75e4a02 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 27 May 2015 11:09:37 +0930 Subject: module: Make the mod_tree stuff conditional on PERF_EVENTS || TRACING Andrew worried about the overhead on small systems; only use the fancy code when either perf or tracing is enabled. Cc: Rusty Russell Cc: Steven Rostedt Requested-by: Andrew Morton Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Rusty Russell --- kernel/module.c | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index e0db5c31cb53..ac3044ceca3f 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -102,6 +102,8 @@ DEFINE_MUTEX(module_mutex); EXPORT_SYMBOL_GPL(module_mutex); static LIST_HEAD(modules); +#ifdef CONFIG_MODULES_TREE_LOOKUP + /* * Use a latched RB-tree for __module_address(); this allows us to use * RCU-sched lookups of the address from any context. @@ -112,6 +114,10 @@ static LIST_HEAD(modules); * * Because init ranges are short lived we mark them unlikely and have placed * them outside the critical cacheline in struct module. + * + * This is conditional on PERF_EVENTS || TRACING because those can really hit + * __module_address() hard by doing a lot of stack unwinding; potentially from + * NMI context. */ static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n) @@ -192,7 +198,7 @@ static void mod_tree_remove(struct module *mod) mod_tree_remove_init(mod); } -static struct module *mod_tree_find(unsigned long addr) +static struct module *mod_find(unsigned long addr) { struct latch_tree_node *ltn; @@ -203,6 +209,26 @@ static struct module *mod_tree_find(unsigned long addr) return container_of(ltn, struct mod_tree_node, node)->mod; } +#else /* MODULES_TREE_LOOKUP */ + +static void mod_tree_insert(struct module *mod) { } +static void mod_tree_remove_init(struct module *mod) { } +static void mod_tree_remove(struct module *mod) { } + +static struct module *mod_find(unsigned long addr) +{ + struct module *mod; + + list_for_each_entry_rcu(mod, &modules, list) { + if (within_module(addr, mod)) + return mod; + } + + return NULL; +} + +#endif /* MODULES_TREE_LOOKUP */ + #ifdef CONFIG_KGDB_KDB struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */ #endif /* CONFIG_KGDB_KDB */ @@ -3966,7 +3992,7 @@ struct module *__module_address(unsigned long addr) module_assert_mutex_or_preempt(); - mod = mod_tree_find(addr); + mod = mod_find(addr); if (mod) { BUG_ON(!within_module(addr, mod)); if (mod->state == MODULE_STATE_UNFORMED) -- cgit From b7df4d1b23bfca830f1076412d21524686c5a441 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 27 May 2015 11:09:37 +0930 Subject: module: Use __module_address() for module_address_lookup() Use the generic __module_address() addr to struct module lookup instead of open coding it once more. Cc: Rusty Russell Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Rusty Russell --- kernel/module.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index ac3044ceca3f..293dfaf4ce52 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -3671,19 +3671,15 @@ const char *module_address_lookup(unsigned long addr, char **modname, char *namebuf) { - struct module *mod; const char *ret = NULL; + struct module *mod; preempt_disable(); - list_for_each_entry_rcu(mod, &modules, list) { - if (mod->state == MODULE_STATE_UNFORMED) - continue; - if (within_module(addr, mod)) { - if (modname) - *modname = mod->name; - ret = get_ksymbol(mod, addr, size, offset); - break; - } + mod = __module_address(addr); + if (mod) { + if (modname) + *modname = mod->name; + ret = get_ksymbol(mod, addr, size, offset); } /* Make a copy in here where it's safe */ if (ret) { @@ -3691,6 +3687,7 @@ const char *module_address_lookup(unsigned long addr, ret = namebuf; } preempt_enable(); + return ret; } -- cgit From 4f666546d047752c17265f4641cc9470c1cbaed4 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 27 May 2015 11:09:38 +0930 Subject: module: Rework module_addr_{min,max} __module_address() does an initial bound check before doing the {list/tree} iteration to find the actual module. The bound variables are nowhere near the mod_tree cacheline, in fact they're nowhere near one another. module_addr_min lives in .data while module_addr_max lives in .bss (smarty pants GCC thinks the explicit 0 assignment is a mistake). Rectify this by moving the two variables into a structure together with the latch_tree_root to guarantee they all share the same cacheline and avoid hitting two extra cachelines for the lookup. While reworking the bounds code, move the bound update from allocation to insertion time, this avoids updating the bounds for a few error paths. Cc: Rusty Russell Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Rusty Russell --- kernel/module.c | 80 +++++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 52 insertions(+), 28 deletions(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index 293dfaf4ce52..c8da2a59ebf7 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -170,7 +170,26 @@ static const struct latch_tree_ops mod_tree_ops = { .comp = mod_tree_comp, }; -static struct latch_tree_root mod_tree __cacheline_aligned; +static struct mod_tree_root { + struct latch_tree_root root; + unsigned long addr_min; + unsigned long addr_max; +} mod_tree __cacheline_aligned = { + .addr_min = -1UL, +}; + +#define module_addr_min mod_tree.addr_min +#define module_addr_max mod_tree.addr_max + +static noinline void __mod_tree_insert(struct mod_tree_node *node) +{ + latch_tree_insert(&node->node, &mod_tree.root, &mod_tree_ops); +} + +static void __mod_tree_remove(struct mod_tree_node *node) +{ + latch_tree_erase(&node->node, &mod_tree.root, &mod_tree_ops); +} /* * These modifications: insert, remove_init and remove; are serialized by the @@ -181,20 +200,20 @@ static void mod_tree_insert(struct module *mod) mod->mtn_core.mod = mod; mod->mtn_init.mod = mod; - latch_tree_insert(&mod->mtn_core.node, &mod_tree, &mod_tree_ops); + __mod_tree_insert(&mod->mtn_core); if (mod->init_size) - latch_tree_insert(&mod->mtn_init.node, &mod_tree, &mod_tree_ops); + __mod_tree_insert(&mod->mtn_init); } static void mod_tree_remove_init(struct module *mod) { if (mod->init_size) - latch_tree_erase(&mod->mtn_init.node, &mod_tree, &mod_tree_ops); + __mod_tree_remove(&mod->mtn_init); } static void mod_tree_remove(struct module *mod) { - latch_tree_erase(&mod->mtn_core.node, &mod_tree, &mod_tree_ops); + __mod_tree_remove(&mod->mtn_core); mod_tree_remove_init(mod); } @@ -202,7 +221,7 @@ static struct module *mod_find(unsigned long addr) { struct latch_tree_node *ltn; - ltn = latch_tree_find((void *)addr, &mod_tree, &mod_tree_ops); + ltn = latch_tree_find((void *)addr, &mod_tree.root, &mod_tree_ops); if (!ltn) return NULL; @@ -211,6 +230,8 @@ static struct module *mod_find(unsigned long addr) #else /* MODULES_TREE_LOOKUP */ +static unsigned long module_addr_min = -1UL, module_addr_max = 0; + static void mod_tree_insert(struct module *mod) { } static void mod_tree_remove_init(struct module *mod) { } static void mod_tree_remove(struct module *mod) { } @@ -229,6 +250,28 @@ static struct module *mod_find(unsigned long addr) #endif /* MODULES_TREE_LOOKUP */ +/* + * Bounds of module text, for speeding up __module_address. + * Protected by module_mutex. + */ +static void __mod_update_bounds(void *base, unsigned int size) +{ + unsigned long min = (unsigned long)base; + unsigned long max = min + size; + + if (min < module_addr_min) + module_addr_min = min; + if (max > module_addr_max) + module_addr_max = max; +} + +static void mod_update_bounds(struct module *mod) +{ + __mod_update_bounds(mod->module_core, mod->core_size); + if (mod->init_size) + __mod_update_bounds(mod->module_init, mod->init_size); +} + #ifdef CONFIG_KGDB_KDB struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */ #endif /* CONFIG_KGDB_KDB */ @@ -297,10 +340,6 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq); static BLOCKING_NOTIFIER_HEAD(module_notify_list); -/* Bounds of module allocation, for speeding __module_address. - * Protected by module_mutex. */ -static unsigned long module_addr_min = -1UL, module_addr_max = 0; - int register_module_notifier(struct notifier_block *nb) { return blocking_notifier_chain_register(&module_notify_list, nb); @@ -2539,22 +2578,6 @@ void * __weak module_alloc(unsigned long size) return vmalloc_exec(size); } -static void *module_alloc_update_bounds(unsigned long size) -{ - void *ret = module_alloc(size); - - if (ret) { - mutex_lock(&module_mutex); - /* Update module bounds. */ - if ((unsigned long)ret < module_addr_min) - module_addr_min = (unsigned long)ret; - if ((unsigned long)ret + size > module_addr_max) - module_addr_max = (unsigned long)ret + size; - mutex_unlock(&module_mutex); - } - return ret; -} - #ifdef CONFIG_DEBUG_KMEMLEAK static void kmemleak_load_module(const struct module *mod, const struct load_info *info) @@ -2960,7 +2983,7 @@ static int move_module(struct module *mod, struct load_info *info) void *ptr; /* Do the allocs. */ - ptr = module_alloc_update_bounds(mod->core_size); + ptr = module_alloc(mod->core_size); /* * The pointer to this block is stored in the module structure * which is inside the block. Just mark it as not being a @@ -2974,7 +2997,7 @@ static int move_module(struct module *mod, struct load_info *info) mod->module_core = ptr; if (mod->init_size) { - ptr = module_alloc_update_bounds(mod->init_size); + ptr = module_alloc(mod->init_size); /* * The pointer to this block is stored in the module structure * which is inside the block. This block doesn't need to be @@ -3344,6 +3367,7 @@ again: err = -EEXIST; goto out; } + mod_update_bounds(mod); list_add_rcu(&mod->list, &modules); mod_tree_insert(mod); err = 0; -- cgit From 9c27847dda9cfae7c273cde62becf364f9fa9ea3 Mon Sep 17 00:00:00 2001 From: "Luis R. Rodriguez" Date: Wed, 27 May 2015 11:09:38 +0930 Subject: kernel/params: constify struct kernel_param_ops uses Most code already uses consts for the struct kernel_param_ops, sweep the kernel for the last offending stragglers. Other than include/linux/moduleparam.h and kernel/params.c all other changes were generated with the following Coccinelle SmPL patch. Merge conflicts between trees can be handled with Coccinelle. In the future git could get Coccinelle merge support to deal with patch --> fail --> grammar --> Coccinelle --> new patch conflicts automatically for us on patches where the grammar is available and the patch is of high confidence. Consider this a feature request. Test compiled on x86_64 against: * allnoconfig * allmodconfig * allyesconfig @ const_found @ identifier ops; @@ const struct kernel_param_ops ops = { }; @ const_not_found depends on !const_found @ identifier ops; @@ -struct kernel_param_ops ops = { +const struct kernel_param_ops ops = { }; Generated-by: Coccinelle SmPL Cc: Rusty Russell Cc: Junio C Hamano Cc: Andrew Morton Cc: Kees Cook Cc: Tejun Heo Cc: Ingo Molnar Cc: cocci@systeme.lip6.fr Cc: linux-kernel@vger.kernel.org Signed-off-by: Luis R. Rodriguez Signed-off-by: Rusty Russell --- kernel/params.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/params.c b/kernel/params.c index a22d6a759b1a..b7635c025e9b 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -251,7 +251,7 @@ char *parse_args(const char *doing, return scnprintf(buffer, PAGE_SIZE, format, \ *((type *)kp->arg)); \ } \ - struct kernel_param_ops param_ops_##name = { \ + const struct kernel_param_ops param_ops_##name = { \ .set = param_set_##name, \ .get = param_get_##name, \ }; \ @@ -303,7 +303,7 @@ static void param_free_charp(void *arg) maybe_kfree_parameter(*((char **)arg)); } -struct kernel_param_ops param_ops_charp = { +const struct kernel_param_ops param_ops_charp = { .set = param_set_charp, .get = param_get_charp, .free = param_free_charp, @@ -328,7 +328,7 @@ int param_get_bool(char *buffer, const struct kernel_param *kp) } EXPORT_SYMBOL(param_get_bool); -struct kernel_param_ops param_ops_bool = { +const struct kernel_param_ops param_ops_bool = { .flags = KERNEL_PARAM_OPS_FL_NOARG, .set = param_set_bool, .get = param_get_bool, @@ -356,7 +356,7 @@ int param_get_invbool(char *buffer, const struct kernel_param *kp) } EXPORT_SYMBOL(param_get_invbool); -struct kernel_param_ops param_ops_invbool = { +const struct kernel_param_ops param_ops_invbool = { .set = param_set_invbool, .get = param_get_invbool, }; @@ -379,7 +379,7 @@ int param_set_bint(const char *val, const struct kernel_param *kp) } EXPORT_SYMBOL(param_set_bint); -struct kernel_param_ops param_ops_bint = { +const struct kernel_param_ops param_ops_bint = { .flags = KERNEL_PARAM_OPS_FL_NOARG, .set = param_set_bint, .get = param_get_int, @@ -476,7 +476,7 @@ static void param_array_free(void *arg) arr->ops->free(arr->elem + arr->elemsize * i); } -struct kernel_param_ops param_array_ops = { +const struct kernel_param_ops param_array_ops = { .set = param_array_set, .get = param_array_get, .free = param_array_free, @@ -504,7 +504,7 @@ int param_get_string(char *buffer, const struct kernel_param *kp) } EXPORT_SYMBOL(param_get_string); -struct kernel_param_ops param_ops_string = { +const struct kernel_param_ops param_ops_string = { .set = param_set_copystring, .get = param_get_string, }; -- cgit From 05f408dddb013168759cdb4cbd0ba4e189a9504d Mon Sep 17 00:00:00 2001 From: "Luis R. Rodriguez" Date: Wed, 27 May 2015 11:09:38 +0930 Subject: kernel/module.c: use generic module param operaters for sig_enforce We're directly checking and modifying sig_enforce when needed instead of using the generic helpers. This prevents us from generalizing this helper so that others can use it. Use indirect helpers to allow us to generalize this code a bit and to make it a bit more clear what this is doing. Cc: Rusty Russell Cc: Jani Nikula Cc: Andrew Morton Cc: Kees Cook Cc: Tejun Heo Cc: Ingo Molnar Cc: linux-kernel@vger.kernel.org Cc: cocci@systeme.lip6.fr Signed-off-by: Luis R. Rodriguez Signed-off-by: Rusty Russell --- kernel/module.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index c8da2a59ebf7..9e8c9305bba9 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -301,23 +301,25 @@ static bool sig_enforce = false; static int param_set_bool_enable_only(const char *val, const struct kernel_param *kp) { - int err; - bool test; + int err = 0; + bool new_value; + bool orig_value = *(bool *)kp->arg; struct kernel_param dummy_kp = *kp; - dummy_kp.arg = &test; + dummy_kp.arg = &new_value; err = param_set_bool(val, &dummy_kp); if (err) return err; /* Don't let them unset it once it's set! */ - if (!test && sig_enforce) + if (!new_value && orig_value) return -EROFS; - if (test) - sig_enforce = true; - return 0; + if (new_value) + err = param_set_bool(val, kp); + + return err; } static const struct kernel_param_ops param_ops_bool_enable_only = { -- cgit From d19f05d8a8fa221e5d5f4eaca0f3ca5874c990d3 Mon Sep 17 00:00:00 2001 From: "Luis R. Rodriguez" Date: Wed, 27 May 2015 11:09:38 +0930 Subject: kernel/params.c: generalize bool_enable_only This takes out the bool_enable_only implementation from the module loading code and generalizes it so that others can make use of it. Cc: Rusty Russell Cc: Jani Nikula Cc: Andrew Morton Cc: Kees Cook Cc: Tejun Heo Cc: Ingo Molnar Cc: linux-kernel@vger.kernel.org Cc: cocci@systeme.lip6.fr Signed-off-by: Luis R. Rodriguez Signed-off-by: Rusty Russell --- kernel/module.c | 31 ------------------------------- kernel/params.c | 30 ++++++++++++++++++++++++++++++ 2 files changed, 30 insertions(+), 31 deletions(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index 9e8c9305bba9..9b0e36145474 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -298,37 +298,6 @@ static bool sig_enforce = true; #else static bool sig_enforce = false; -static int param_set_bool_enable_only(const char *val, - const struct kernel_param *kp) -{ - int err = 0; - bool new_value; - bool orig_value = *(bool *)kp->arg; - struct kernel_param dummy_kp = *kp; - - dummy_kp.arg = &new_value; - - err = param_set_bool(val, &dummy_kp); - if (err) - return err; - - /* Don't let them unset it once it's set! */ - if (!new_value && orig_value) - return -EROFS; - - if (new_value) - err = param_set_bool(val, kp); - - return err; -} - -static const struct kernel_param_ops param_ops_bool_enable_only = { - .flags = KERNEL_PARAM_OPS_FL_NOARG, - .set = param_set_bool_enable_only, - .get = param_get_bool, -}; -#define param_check_bool_enable_only param_check_bool - module_param(sig_enforce, bool_enable_only, 0644); #endif /* !CONFIG_MODULE_SIG_FORCE */ #endif /* CONFIG_MODULE_SIG */ diff --git a/kernel/params.c b/kernel/params.c index b7635c025e9b..324624ed620f 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -335,6 +335,36 @@ const struct kernel_param_ops param_ops_bool = { }; EXPORT_SYMBOL(param_ops_bool); +int param_set_bool_enable_only(const char *val, const struct kernel_param *kp) +{ + int err = 0; + bool new_value; + bool orig_value = *(bool *)kp->arg; + struct kernel_param dummy_kp = *kp; + + dummy_kp.arg = &new_value; + + err = param_set_bool(val, &dummy_kp); + if (err) + return err; + + /* Don't let them unset it once it's set! */ + if (!new_value && orig_value) + return -EROFS; + + if (new_value) + err = param_set_bool(val, kp); + + return err; +} +EXPORT_SYMBOL_GPL(param_set_bool_enable_only); + +const struct kernel_param_ops param_ops_bool_enable_only = { + .flags = KERNEL_PARAM_OPS_FL_NOARG, + .set = param_set_bool_enable_only, + .get = param_get_bool, +}; + /* This one must be bool. */ int param_set_invbool(const char *val, const struct kernel_param *kp) { -- cgit From 154be21c582857c468575e7cab488fe39dc1445b Mon Sep 17 00:00:00 2001 From: "Luis R. Rodriguez" Date: Wed, 27 May 2015 11:09:39 +0930 Subject: kernel/params.c: export param_ops_bool_enable_only This will grant access to this helper to code built as modules. Cc: Rusty Russell Cc: David Howells Cc: Ming Lei Cc: Seth Forshee Cc: Kyle McMartin Signed-off-by: Luis R. Rodriguez Signed-off-by: Rusty Russell --- kernel/params.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/params.c b/kernel/params.c index 324624ed620f..7edf31f2ce96 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -364,6 +364,7 @@ const struct kernel_param_ops param_ops_bool_enable_only = { .set = param_set_bool_enable_only, .get = param_get_bool, }; +EXPORT_SYMBOL_GPL(param_ops_bool_enable_only); /* This one must be bool. */ int param_set_invbool(const char *val, const struct kernel_param *kp) -- cgit From 552f530cbc34072d824af021e3289fdd195c880d Mon Sep 17 00:00:00 2001 From: "Luis R. Rodriguez" Date: Wed, 27 May 2015 11:09:39 +0930 Subject: kernel/workqueue.c: remove ifdefs over wq_power_efficient We can avoid an ifdef over wq_power_efficient's declaration by just using IS_ENABLED(). Cc: Rusty Russell Cc: Jani Nikula Cc: Andrew Morton Cc: Kees Cook Cc: Tejun Heo Cc: Ingo Molnar Cc: linux-kernel@vger.kernel.org Cc: cocci@systeme.lip6.fr Signed-off-by: Luis R. Rodriguez Signed-off-by: Rusty Russell --- kernel/workqueue.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 586ad91300b0..59bc24918655 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -280,12 +280,7 @@ static bool wq_disable_numa; module_param_named(disable_numa, wq_disable_numa, bool, 0444); /* see the comment above the definition of WQ_POWER_EFFICIENT */ -#ifdef CONFIG_WQ_POWER_EFFICIENT_DEFAULT -static bool wq_power_efficient = true; -#else -static bool wq_power_efficient; -#endif - +static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT); module_param_named(power_efficient, wq_power_efficient, bool, 0444); static bool wq_numa_enabled; /* unbound NUMA affinity enabled */ -- cgit From 6727bb9c6abe836d88191ce13bfdd7a53c245e15 Mon Sep 17 00:00:00 2001 From: "Luis R. Rodriguez" Date: Wed, 27 May 2015 11:09:39 +0930 Subject: kernel/module.c: avoid ifdefs for sig_enforce declaration There's no need to require an ifdef over the declaration of sig_enforce as IS_ENABLED() can be used. While at it, there's no harm in exposing this kernel parameter outside of CONFIG_MODULE_SIG as it'd be a no-op on non module sig kernels. Now, technically we should in theory be able to remove the #ifdef'ery over the declaration of the module parameter as we are also trusting the bool_enable_only code for CONFIG_MODULE_SIG kernels but for now remain paranoid and keep it. With time if no one can put a bullet through bool_enable_only and if there are no technical requirements over not exposing CONFIG_MODULE_SIG_FORCE with the measures in place by bool_enable_only we could remove this last ifdef. Cc: Rusty Russell Cc: Andrew Morton Cc: Kees Cook Cc: Tejun Heo Cc: Ingo Molnar Cc: linux-kernel@vger.kernel.org Cc: cocci@systeme.lip6.fr Signed-off-by: Luis R. Rodriguez Signed-off-by: Rusty Russell --- kernel/module.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index 9b0e36145474..427b99f1a4b3 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -292,15 +292,10 @@ static void module_assert_mutex_or_preempt(void) #endif } -#ifdef CONFIG_MODULE_SIG -#ifdef CONFIG_MODULE_SIG_FORCE -static bool sig_enforce = true; -#else -static bool sig_enforce = false; - +static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE); +#ifndef CONFIG_MODULE_SIG_FORCE module_param(sig_enforce, bool_enable_only, 0644); #endif /* !CONFIG_MODULE_SIG_FORCE */ -#endif /* CONFIG_MODULE_SIG */ /* Block module loading/unloading? */ int modules_disabled = 0; -- cgit From 74c3dea355245c17ee407a3ce3ea34f55b40f2eb Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 17 Jun 2015 06:16:52 +0930 Subject: params: suppress unused variable error, warn once just in case code changes. It shouldn't fail due to OOM (it's boot time), and already warns if we get two identical names. But you never know what the future holds, and WARN_ON_ONCE() keeps gcc happy with minimal code. Reported-by: Louis Langholtz Acked-by: Tejun Heo Signed-off-by: Rusty Russell --- kernel/params.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/params.c b/kernel/params.c index 7edf31f2ce96..e906874da5fc 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -884,6 +884,7 @@ static void __init version_sysfs_builtin(void) mk = locate_module_kobject(vattr->module_name); if (mk) { err = sysfs_create_file(&mk->kobj, &vattr->mattr.attr); + WARN_ON_ONCE(err); kobject_uevent(&mk->kobj, KOBJ_ADD); kobject_put(&mk->kobj); } -- cgit From 5104b7d7678b0029417f6ac08243773a77259ac6 Mon Sep 17 00:00:00 2001 From: Dan Streetman Date: Wed, 17 Jun 2015 06:17:52 +0930 Subject: module: make perm const Change the struct kernel_param.perm field to a const, as it should never be changed. Signed-off-by: Dan Streetman Signed-off-by: Rusty Russell (cut from larger patch) --- kernel/params.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/params.c b/kernel/params.c index e906874da5fc..a8b09f6c87dc 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -395,12 +395,11 @@ EXPORT_SYMBOL(param_ops_invbool); int param_set_bint(const char *val, const struct kernel_param *kp) { - struct kernel_param boolkp; + /* Match bool exactly, by re-using it. */ + struct kernel_param boolkp = *kp; bool v; int ret; - /* Match bool exactly, by re-using it. */ - boolkp = *kp; boolkp.arg = &v; ret = param_set_bool(val, &boolkp); @@ -480,9 +479,8 @@ static int param_array_get(char *buffer, const struct kernel_param *kp) { int i, off, ret; const struct kparam_array *arr = kp->arr; - struct kernel_param p; + struct kernel_param p = *kp; - p = *kp; for (i = off = 0; i < (arr->num ? *arr->num : arr->max); i++) { if (i) buffer[off++] = ','; -- cgit From b51d23e4e9fea6f264d39535c2a62d1f51e7ccc3 Mon Sep 17 00:00:00 2001 From: Dan Streetman Date: Wed, 17 Jun 2015 06:18:52 +0930 Subject: module: add per-module param_lock Add a "param_lock" mutex to each module, and update params.c to use the correct built-in or module mutex while locking kernel params. Remove the kparam_block_sysfs_r/w() macros, replace them with direct calls to kernel_param_[un]lock(module). The kernel param code currently uses a single mutex to protect modification of any and all kernel params. While this generally works, there is one specific problem with it; a module callback function cannot safely load another module, i.e. with request_module() or even with indirect calls such as crypto_has_alg(). If the module to be loaded has any of its params configured (e.g. with a /etc/modprobe.d/* config file), then the attempt will result in a deadlock between the first module param callback waiting for modprobe, and modprobe trying to lock the single kernel param mutex to set the new module's param. This fixes that by using per-module mutexes, so that each individual module is protected against concurrent changes in its own kernel params, but is not blocked by changes to other module params. All built-in modules continue to use the built-in mutex, since they will always be loaded at runtime and references (e.g. request_module(), crypto_has_alg()) to them will never cause load-time param changing. This also simplifies the interface used by modules to block sysfs access to their params; while there are currently functions to block and unblock sysfs param access which are split up by read and write and expect a single kernel param to be passed, their actual operation is identical and applies to all params, not just the one passed to them; they simply lock and unlock the global param mutex. They are replaced with direct calls to kernel_param_[un]lock(THIS_MODULE), which locks THIS_MODULE's param_lock, or if the module is built-in, it locks the built-in mutex. Suggested-by: Rusty Russell Signed-off-by: Dan Streetman Signed-off-by: Rusty Russell --- kernel/module.c | 2 ++ kernel/params.c | 50 +++++++++++++++++++++++++++++++------------------- 2 files changed, 33 insertions(+), 19 deletions(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index 427b99f1a4b3..8ec33ce202a6 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -3442,6 +3442,8 @@ static int load_module(struct load_info *info, const char __user *uargs, if (err) goto unlink_mod; + mutex_init(&mod->param_lock); + /* Now we've got everything in the final locations, we can * find optional sections. */ err = find_module_sections(mod, info); diff --git a/kernel/params.c b/kernel/params.c index a8b09f6c87dc..8890d0b8dffc 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -25,15 +25,20 @@ #include #include -/* Protects all parameters, and incidentally kmalloced_param list. */ +/* Protects all built-in parameters, modules use their own param_lock */ static DEFINE_MUTEX(param_lock); +/* Use the module's mutex, or if built-in use the built-in mutex */ +#define KPARAM_MUTEX(mod) ((mod) ? &(mod)->param_lock : ¶m_lock) +#define KPARAM_IS_LOCKED(mod) mutex_is_locked(KPARAM_MUTEX(mod)) + /* This just allows us to keep track of which parameters are kmalloced. */ struct kmalloced_param { struct list_head list; char val[]; }; static LIST_HEAD(kmalloced_params); +static DEFINE_SPINLOCK(kmalloced_params_lock); static void *kmalloc_parameter(unsigned int size) { @@ -43,7 +48,10 @@ static void *kmalloc_parameter(unsigned int size) if (!p) return NULL; + spin_lock(&kmalloced_params_lock); list_add(&p->list, &kmalloced_params); + spin_unlock(&kmalloced_params_lock); + return p->val; } @@ -52,6 +60,7 @@ static void maybe_kfree_parameter(void *param) { struct kmalloced_param *p; + spin_lock(&kmalloced_params_lock); list_for_each_entry(p, &kmalloced_params, list) { if (p->val == param) { list_del(&p->list); @@ -59,6 +68,7 @@ static void maybe_kfree_parameter(void *param) break; } } + spin_unlock(&kmalloced_params_lock); } static char dash2underscore(char c) @@ -118,10 +128,10 @@ static int parse_one(char *param, return -EINVAL; pr_debug("handling %s with %p\n", param, params[i].ops->set); - mutex_lock(¶m_lock); + kernel_param_lock(params[i].mod); param_check_unsafe(¶ms[i]); err = params[i].ops->set(val, ¶ms[i]); - mutex_unlock(¶m_lock); + kernel_param_unlock(params[i].mod); return err; } } @@ -417,7 +427,8 @@ const struct kernel_param_ops param_ops_bint = { EXPORT_SYMBOL(param_ops_bint); /* We break the rule and mangle the string. */ -static int param_array(const char *name, +static int param_array(struct module *mod, + const char *name, const char *val, unsigned int min, unsigned int max, void *elem, int elemsize, @@ -448,7 +459,7 @@ static int param_array(const char *name, /* nul-terminate and parse */ save = val[len]; ((char *)val)[len] = '\0'; - BUG_ON(!mutex_is_locked(¶m_lock)); + BUG_ON(!KPARAM_IS_LOCKED(mod)); ret = set(val, &kp); if (ret != 0) @@ -470,7 +481,7 @@ static int param_array_set(const char *val, const struct kernel_param *kp) const struct kparam_array *arr = kp->arr; unsigned int temp_num; - return param_array(kp->name, val, 1, arr->max, arr->elem, + return param_array(kp->mod, kp->name, val, 1, arr->max, arr->elem, arr->elemsize, arr->ops->set, kp->level, arr->num ?: &temp_num); } @@ -485,7 +496,7 @@ static int param_array_get(char *buffer, const struct kernel_param *kp) if (i) buffer[off++] = ','; p.arg = arr->elem + arr->elemsize * i; - BUG_ON(!mutex_is_locked(¶m_lock)); + BUG_ON(!KPARAM_IS_LOCKED(p.mod)); ret = arr->ops->get(buffer + off, &p); if (ret < 0) return ret; @@ -568,9 +579,9 @@ static ssize_t param_attr_show(struct module_attribute *mattr, if (!attribute->param->ops->get) return -EPERM; - mutex_lock(¶m_lock); + kernel_param_lock(mk->mod); count = attribute->param->ops->get(buf, attribute->param); - mutex_unlock(¶m_lock); + kernel_param_unlock(mk->mod); if (count > 0) { strcat(buf, "\n"); ++count; @@ -580,7 +591,7 @@ static ssize_t param_attr_show(struct module_attribute *mattr, /* sysfs always hands a nul-terminated string in buf. We rely on that. */ static ssize_t param_attr_store(struct module_attribute *mattr, - struct module_kobject *km, + struct module_kobject *mk, const char *buf, size_t len) { int err; @@ -589,10 +600,10 @@ static ssize_t param_attr_store(struct module_attribute *mattr, if (!attribute->param->ops->set) return -EPERM; - mutex_lock(¶m_lock); + kernel_param_lock(mk->mod); param_check_unsafe(attribute->param); err = attribute->param->ops->set(buf, attribute->param); - mutex_unlock(¶m_lock); + kernel_param_unlock(mk->mod); if (!err) return len; return err; @@ -605,18 +616,19 @@ static ssize_t param_attr_store(struct module_attribute *mattr, #define __modinit __init #endif -#ifdef CONFIG_SYSFS -void __kernel_param_lock(void) +void kernel_param_lock(struct module *mod) { - mutex_lock(¶m_lock); + mutex_lock(KPARAM_MUTEX(mod)); } -EXPORT_SYMBOL(__kernel_param_lock); -void __kernel_param_unlock(void) +void kernel_param_unlock(struct module *mod) { - mutex_unlock(¶m_lock); + mutex_unlock(KPARAM_MUTEX(mod)); } -EXPORT_SYMBOL(__kernel_param_unlock); + +#ifdef CONFIG_SYSFS +EXPORT_SYMBOL(kernel_param_lock); +EXPORT_SYMBOL(kernel_param_unlock); /* * add_sysfs_param - add a parameter to sysfs -- cgit From cf2fde7b39e9446e2af015215d7fb695781af0c1 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 26 Jun 2015 06:44:38 +0930 Subject: param: fix module param locks when !CONFIG_SYSFS. As Dan Streetman points out, the entire point of locking for is to stop sysfs accesses, so they're elided entirely in the !SYSFS case. Reported-by: Stephen Rothwell Signed-off-by: Rusty Russell --- kernel/module.c | 9 ++++++++- kernel/params.c | 18 ++++++++++++++---- 2 files changed, 22 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index 8ec33ce202a6..b4994adf7187 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -1820,6 +1820,10 @@ static void mod_sysfs_fini(struct module *mod) mod_kobject_put(mod); } +static void init_param_lock(struct module *mod) +{ + mutex_init(&mod->param_lock); +} #else /* !CONFIG_SYSFS */ static int mod_sysfs_setup(struct module *mod, @@ -1842,6 +1846,9 @@ static void del_usage_links(struct module *mod) { } +static void init_param_lock(struct module *mod) +{ +} #endif /* CONFIG_SYSFS */ static void mod_sysfs_teardown(struct module *mod) @@ -3442,7 +3449,7 @@ static int load_module(struct load_info *info, const char __user *uargs, if (err) goto unlink_mod; - mutex_init(&mod->param_lock); + init_param_lock(mod); /* Now we've got everything in the final locations, we can * find optional sections. */ diff --git a/kernel/params.c b/kernel/params.c index 8890d0b8dffc..faa461c16f12 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -25,12 +25,22 @@ #include #include +#ifdef CONFIG_SYSFS /* Protects all built-in parameters, modules use their own param_lock */ static DEFINE_MUTEX(param_lock); /* Use the module's mutex, or if built-in use the built-in mutex */ #define KPARAM_MUTEX(mod) ((mod) ? &(mod)->param_lock : ¶m_lock) -#define KPARAM_IS_LOCKED(mod) mutex_is_locked(KPARAM_MUTEX(mod)) + +static inline void check_kparam_locked(struct module *mod) +{ + BUG_ON(!mutex_is_locked(KPARAM_MUTEX(mod))); +} +#else +static inline void check_kparam_locked(struct module *mod) +{ +} +#endif /* !CONFIG_SYSFS */ /* This just allows us to keep track of which parameters are kmalloced. */ struct kmalloced_param { @@ -459,7 +469,7 @@ static int param_array(struct module *mod, /* nul-terminate and parse */ save = val[len]; ((char *)val)[len] = '\0'; - BUG_ON(!KPARAM_IS_LOCKED(mod)); + check_kparam_locked(mod); ret = set(val, &kp); if (ret != 0) @@ -496,7 +506,7 @@ static int param_array_get(char *buffer, const struct kernel_param *kp) if (i) buffer[off++] = ','; p.arg = arr->elem + arr->elemsize * i; - BUG_ON(!KPARAM_IS_LOCKED(p.mod)); + check_kparam_locked(p.mod); ret = arr->ops->get(buffer + off, &p); if (ret < 0) return ret; @@ -616,6 +626,7 @@ static ssize_t param_attr_store(struct module_attribute *mattr, #define __modinit __init #endif +#ifdef CONFIG_SYSFS void kernel_param_lock(struct module *mod) { mutex_lock(KPARAM_MUTEX(mod)); @@ -626,7 +637,6 @@ void kernel_param_unlock(struct module *mod) mutex_unlock(KPARAM_MUTEX(mod)); } -#ifdef CONFIG_SYSFS EXPORT_SYMBOL(kernel_param_lock); EXPORT_SYMBOL(kernel_param_unlock); -- cgit From 20bdc2cfdbc484777b30b96fcdbb8994038f3ce1 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Fri, 26 Jun 2015 13:19:19 +1000 Subject: modules: only use mod->param_lock if CONFIG_MODULES Signed-off-by: Stephen Rothwell Signed-off-by: Rusty Russell --- kernel/params.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'kernel') diff --git a/kernel/params.c b/kernel/params.c index faa461c16f12..adc0bbc06cc5 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -30,7 +30,11 @@ static DEFINE_MUTEX(param_lock); /* Use the module's mutex, or if built-in use the built-in mutex */ +#ifdef CONFIG_MODULES #define KPARAM_MUTEX(mod) ((mod) ? &(mod)->param_lock : ¶m_lock) +#else +#define KPARAM_MUTEX(mod) (¶m_lock) +#endif static inline void check_kparam_locked(struct module *mod) { -- cgit