diff options
Diffstat (limited to 'kernel/kallsyms.c')
-rw-r--r-- | kernel/kallsyms.c | 463 |
1 files changed, 309 insertions, 154 deletions
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index fe9de067771c..a9a0ca605d4a 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -25,30 +25,14 @@ #include <linux/filter.h> #include <linux/ftrace.h> #include <linux/kprobes.h> +#include <linux/build_bug.h> #include <linux/compiler.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/bsearch.h> +#include <linux/btf_ids.h> -/* - * These will be re-linked against their real values - * during the second link stage. - */ -extern const unsigned long kallsyms_addresses[] __weak; -extern const int kallsyms_offsets[] __weak; -extern const u8 kallsyms_names[] __weak; - -/* - * Tell the compiler that the count isn't in the small data section if the arch - * has one (eg: FRV). - */ -extern const unsigned int kallsyms_num_syms -__section(".rodata") __attribute__((weak)); - -extern const unsigned long kallsyms_relative_base -__section(".rodata") __attribute__((weak)); - -extern const char kallsyms_token_table[] __weak; -extern const u16 kallsyms_token_index[] __weak; - -extern const unsigned int kallsyms_markers[] __weak; +#include "kallsyms_internal.h" /* * Expand a compressed symbol data into the resulting uncompressed string, @@ -66,12 +50,20 @@ static unsigned int kallsyms_expand_symbol(unsigned int off, data = &kallsyms_names[off]; len = *data; data++; + off++; + + /* If MSB is 1, it is a "big" symbol, so needs an additional byte. */ + if ((len & 0x80) != 0) { + len = (len & 0x7F) | (*data << 7); + data++; + off++; + } /* * Update the offset to return the offset for the next symbol on * the compressed stream. */ - off += len + 1; + off += len; /* * For every byte on the compressed symbol data, copy the table @@ -124,7 +116,7 @@ static char kallsyms_get_symbol_type(unsigned int off) static unsigned int get_symbol_offset(unsigned long pos) { const u8 *name; - int i; + int i, len; /* * Use the closest marker we have. We have markers every 256 positions, @@ -138,17 +130,24 @@ static unsigned int get_symbol_offset(unsigned long pos) * so we just need to add the len to the current pointer for every * symbol we wish to skip. */ - for (i = 0; i < (pos & 0xFF); i++) - name = name + (*name) + 1; + for (i = 0; i < (pos & 0xFF); i++) { + len = *name; + + /* + * If MSB is 1, it is a "big" symbol, so we need to look into + * the next byte (and skip it, too). + */ + if ((len & 0x80) != 0) + len = ((len & 0x7F) | (name[1] << 7)) + 1; + + name = name + len + 1; + } return name - kallsyms_names; } -static unsigned long kallsyms_sym_address(int idx) +unsigned long kallsyms_sym_address(int idx) { - if (!IS_ENABLED(CONFIG_KALLSYMS_BASE_RELATIVE)) - return kallsyms_addresses[idx]; - /* values are unsigned offsets if --absolute-percpu is not in effect */ if (!IS_ENABLED(CONFIG_KALLSYMS_ABSOLUTE_PERCPU)) return kallsyms_relative_base + (u32)kallsyms_offsets[idx]; @@ -161,24 +160,94 @@ static unsigned long kallsyms_sym_address(int idx) return kallsyms_relative_base - 1 - kallsyms_offsets[idx]; } -/* Lookup the address for this symbol. Returns 0 if not found. */ -unsigned long kallsyms_lookup_name(const char *name) +static unsigned int get_symbol_seq(int index) { + unsigned int i, seq = 0; + + for (i = 0; i < 3; i++) + seq = (seq << 8) | kallsyms_seqs_of_names[3 * index + i]; + + return seq; +} + +static int kallsyms_lookup_names(const char *name, + unsigned int *start, + unsigned int *end) +{ + int ret; + int low, mid, high; + unsigned int seq, off; char namebuf[KSYM_NAME_LEN]; - unsigned long i; - unsigned int off; - for (i = 0, off = 0; i < kallsyms_num_syms; i++) { - off = kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf)); + low = 0; + high = kallsyms_num_syms - 1; + + while (low <= high) { + mid = low + (high - low) / 2; + seq = get_symbol_seq(mid); + off = get_symbol_offset(seq); + kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf)); + ret = strcmp(name, namebuf); + if (ret > 0) + low = mid + 1; + else if (ret < 0) + high = mid - 1; + else + break; + } + + if (low > high) + return -ESRCH; - if (strcmp(namebuf, name) == 0) - return kallsyms_sym_address(i); + low = mid; + while (low) { + seq = get_symbol_seq(low - 1); + off = get_symbol_offset(seq); + kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf)); + if (strcmp(name, namebuf)) + break; + low--; } + *start = low; + + if (end) { + high = mid; + while (high < kallsyms_num_syms - 1) { + seq = get_symbol_seq(high + 1); + off = get_symbol_offset(seq); + kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf)); + if (strcmp(name, namebuf)) + break; + high++; + } + *end = high; + } + + return 0; +} + +/* Lookup the address for this symbol. Returns 0 if not found. */ +unsigned long kallsyms_lookup_name(const char *name) +{ + int ret; + unsigned int i; + + /* Skip the search for empty string. */ + if (!*name) + return 0; + + ret = kallsyms_lookup_names(name, &i, NULL); + if (!ret) + return kallsyms_sym_address(get_symbol_seq(i)); + return module_kallsyms_lookup_name(name); } -int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *, - unsigned long), +/* + * Iterate over all symbols in vmlinux. For symbols from modules use + * module_kallsyms_on_each_symbol instead. + */ +int kallsyms_on_each_symbol(int (*fn)(void *, const char *, unsigned long), void *data) { char namebuf[KSYM_NAME_LEN]; @@ -188,11 +257,30 @@ int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *, for (i = 0, off = 0; i < kallsyms_num_syms; i++) { off = kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf)); - ret = fn(data, namebuf, NULL, kallsyms_sym_address(i)); + ret = fn(data, namebuf, kallsyms_sym_address(i)); if (ret != 0) return ret; + cond_resched(); } - return module_kallsyms_on_each_symbol(fn, data); + return 0; +} + +int kallsyms_on_each_match_symbol(int (*fn)(void *, unsigned long), + const char *name, void *data) +{ + int ret; + unsigned int i, start, end; + + ret = kallsyms_lookup_names(name, &start, &end); + if (ret) + return 0; + + for (i = start; !ret && i <= end; i++) { + ret = fn(data, kallsyms_sym_address(get_symbol_seq(i))); + cond_resched(); + } + + return ret; } static unsigned long get_symbol_pos(unsigned long addr, @@ -202,13 +290,7 @@ static unsigned long get_symbol_pos(unsigned long addr, unsigned long symbol_start = 0, symbol_end = 0; unsigned long i, low, high, mid; - /* This kernel should never had been booted. */ - if (!IS_ENABLED(CONFIG_KALLSYMS_BASE_RELATIVE)) - BUG_ON(!kallsyms_addresses); - else - BUG_ON(!kallsyms_offsets); - - /* Do a binary search on the sorted kallsyms_addresses array. */ + /* Do a binary search on the sorted kallsyms_offsets array. */ low = 0; high = kallsyms_num_syms; @@ -267,23 +349,16 @@ int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize, get_symbol_pos(addr, symbolsize, offset); return 1; } - return !!module_address_lookup(addr, symbolsize, offset, NULL, namebuf) || + return !!module_address_lookup(addr, symbolsize, offset, NULL, NULL, namebuf) || !!__bpf_address_lookup(addr, symbolsize, offset, namebuf); } -/* - * Lookup an address - * - modname is set to NULL if it's in the kernel. - * - We guarantee that the returned name is valid until we reschedule even if. - * It resides in a module. - * - We also guarantee that modname will be valid until rescheduled. - */ -const char *kallsyms_lookup(unsigned long addr, - unsigned long *symbolsize, - unsigned long *offset, - char **modname, char *namebuf) +static int kallsyms_lookup_buildid(unsigned long addr, + unsigned long *symbolsize, + unsigned long *offset, char **modname, + const unsigned char **modbuildid, char *namebuf) { - const char *ret; + int ret; namebuf[KSYM_NAME_LEN - 1] = 0; namebuf[0] = 0; @@ -297,12 +372,15 @@ const char *kallsyms_lookup(unsigned long addr, namebuf, KSYM_NAME_LEN); if (modname) *modname = NULL; - return namebuf; + if (modbuildid) + *modbuildid = NULL; + + return strlen(namebuf); } /* See if it's in a module or a BPF JITed image. */ ret = module_address_lookup(addr, symbolsize, offset, - modname, namebuf); + modname, modbuildid, namebuf); if (!ret) ret = bpf_address_lookup(addr, symbolsize, offset, modname, namebuf); @@ -310,9 +388,31 @@ const char *kallsyms_lookup(unsigned long addr, if (!ret) ret = ftrace_mod_address_lookup(addr, symbolsize, offset, modname, namebuf); + return ret; } +/* + * Lookup an address + * - modname is set to NULL if it's in the kernel. + * - We guarantee that the returned name is valid until we reschedule even if. + * It resides in a module. + * - We also guarantee that modname will be valid until rescheduled. + */ +const char *kallsyms_lookup(unsigned long addr, + unsigned long *symbolsize, + unsigned long *offset, + char **modname, char *namebuf) +{ + int ret = kallsyms_lookup_buildid(addr, symbolsize, offset, modname, + NULL, namebuf); + + if (!ret) + return NULL; + + return namebuf; +} + int lookup_symbol_name(unsigned long addr, char *symname) { symname[0] = '\0'; @@ -331,50 +431,39 @@ int lookup_symbol_name(unsigned long addr, char *symname) return lookup_module_symbol_name(addr, symname); } -int lookup_symbol_attrs(unsigned long addr, unsigned long *size, - unsigned long *offset, char *modname, char *name) -{ - name[0] = '\0'; - name[KSYM_NAME_LEN - 1] = '\0'; - - if (is_ksym_addr(addr)) { - unsigned long pos; - - pos = get_symbol_pos(addr, size, offset); - /* Grab name */ - kallsyms_expand_symbol(get_symbol_offset(pos), - name, KSYM_NAME_LEN); - modname[0] = '\0'; - return 0; - } - /* See if it's in a module. */ - return lookup_module_symbol_attrs(addr, size, offset, modname, name); -} - /* Look up a kernel symbol and return it in a text buffer. */ static int __sprint_symbol(char *buffer, unsigned long address, - int symbol_offset, int add_offset) + int symbol_offset, int add_offset, int add_buildid) { char *modname; - const char *name; + const unsigned char *buildid; unsigned long offset, size; int len; address += symbol_offset; - name = kallsyms_lookup(address, &size, &offset, &modname, buffer); - if (!name) + len = kallsyms_lookup_buildid(address, &size, &offset, &modname, &buildid, + buffer); + if (!len) return sprintf(buffer, "0x%lx", address - symbol_offset); - if (name != buffer) - strcpy(buffer, name); - len = strlen(buffer); offset -= symbol_offset; if (add_offset) len += sprintf(buffer + len, "+%#lx/%#lx", offset, size); - if (modname) - len += sprintf(buffer + len, " [%s]", modname); + if (modname) { + len += sprintf(buffer + len, " [%s", modname); +#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) + if (add_buildid && buildid) { + /* build ID should match length of sprintf */ +#if IS_ENABLED(CONFIG_MODULES) + static_assert(sizeof(typeof_member(struct module, build_id)) == 20); +#endif + len += sprintf(buffer + len, " %20phN", buildid); + } +#endif + len += sprintf(buffer + len, "]"); + } return len; } @@ -392,11 +481,28 @@ static int __sprint_symbol(char *buffer, unsigned long address, */ int sprint_symbol(char *buffer, unsigned long address) { - return __sprint_symbol(buffer, address, 0, 1); + return __sprint_symbol(buffer, address, 0, 1, 0); } EXPORT_SYMBOL_GPL(sprint_symbol); /** + * sprint_symbol_build_id - Look up a kernel symbol and return it in a text buffer + * @buffer: buffer to be stored + * @address: address to lookup + * + * This function looks up a kernel symbol with @address and stores its name, + * offset, size, module name and module build ID to @buffer if possible. If no + * symbol was found, just saves its @address as is. + * + * This function returns the number of bytes stored in @buffer. + */ +int sprint_symbol_build_id(char *buffer, unsigned long address) +{ + return __sprint_symbol(buffer, address, 0, 1, 1); +} +EXPORT_SYMBOL_GPL(sprint_symbol_build_id); + +/** * sprint_symbol_no_offset - Look up a kernel symbol and return it in a text buffer * @buffer: buffer to be stored * @address: address to lookup @@ -409,7 +515,7 @@ EXPORT_SYMBOL_GPL(sprint_symbol); */ int sprint_symbol_no_offset(char *buffer, unsigned long address) { - return __sprint_symbol(buffer, address, 0, 0); + return __sprint_symbol(buffer, address, 0, 0, 0); } EXPORT_SYMBOL_GPL(sprint_symbol_no_offset); @@ -429,13 +535,32 @@ EXPORT_SYMBOL_GPL(sprint_symbol_no_offset); */ int sprint_backtrace(char *buffer, unsigned long address) { - return __sprint_symbol(buffer, address, -1, 1); + return __sprint_symbol(buffer, address, -1, 1, 0); +} + +/** + * sprint_backtrace_build_id - Look up a backtrace symbol and return it in a text buffer + * @buffer: buffer to be stored + * @address: address to lookup + * + * This function is for stack backtrace and does the same thing as + * sprint_symbol() but with modified/decreased @address. If there is a + * tail-call to the function marked "noreturn", gcc optimized out code after + * the call so that the stack-saved return address could point outside of the + * caller. This function ensures that kallsyms will find the original caller + * by decreasing @address. This function also appends the module build ID to + * the @buffer if @address is within a kernel module. + * + * This function returns the number of bytes stored in @buffer. + */ +int sprint_backtrace_build_id(char *buffer, unsigned long address) +{ + return __sprint_symbol(buffer, address, -1, 1, 1); } /* To avoid using get_symbol_offset for every symbol, we carry prefix along. */ struct kallsym_iter { loff_t pos; - loff_t pos_arch_end; loff_t pos_mod_end; loff_t pos_ftrace_mod_end; loff_t pos_bpf_end; @@ -448,29 +573,9 @@ struct kallsym_iter { int show_value; }; -int __weak arch_get_kallsym(unsigned int symnum, unsigned long *value, - char *type, char *name) -{ - return -EINVAL; -} - -static int get_ksymbol_arch(struct kallsym_iter *iter) -{ - int ret = arch_get_kallsym(iter->pos - kallsyms_num_syms, - &iter->value, &iter->type, - iter->name); - - if (ret < 0) { - iter->pos_arch_end = iter->pos; - return 0; - } - - return 1; -} - static int get_ksymbol_mod(struct kallsym_iter *iter) { - int ret = module_get_kallsym(iter->pos - iter->pos_arch_end, + int ret = module_get_kallsym(iter->pos - kallsyms_num_syms, &iter->value, &iter->type, iter->name, iter->module_name, &iter->exported); @@ -505,7 +610,7 @@ static int get_ksymbol_bpf(struct kallsym_iter *iter) { int ret; - strlcpy(iter->module_name, "bpf", MODULE_NAME_LEN); + strscpy(iter->module_name, "bpf", MODULE_NAME_LEN); iter->exported = 0; ret = bpf_get_kallsym(iter->pos - iter->pos_ftrace_mod_end, &iter->value, &iter->type, @@ -525,7 +630,7 @@ static int get_ksymbol_bpf(struct kallsym_iter *iter) */ static int get_ksymbol_kprobe(struct kallsym_iter *iter) { - strlcpy(iter->module_name, "__builtin__kprobes", MODULE_NAME_LEN); + strscpy(iter->module_name, "__builtin__kprobes", MODULE_NAME_LEN); iter->exported = 0; return kprobe_get_kallsym(iter->pos - iter->pos_bpf_end, &iter->value, &iter->type, @@ -553,7 +658,6 @@ static void reset_iter(struct kallsym_iter *iter, loff_t new_pos) iter->nameoff = get_symbol_offset(new_pos); iter->pos = new_pos; if (new_pos == 0) { - iter->pos_arch_end = 0; iter->pos_mod_end = 0; iter->pos_ftrace_mod_end = 0; iter->pos_bpf_end = 0; @@ -569,10 +673,6 @@ static int update_iter_mod(struct kallsym_iter *iter, loff_t pos) { iter->pos = pos; - if ((!iter->pos_arch_end || iter->pos_arch_end > pos) && - get_ksymbol_arch(iter)) - return 1; - if ((!iter->pos_mod_end || iter->pos_mod_end > pos) && get_ksymbol_mod(iter)) return 1; @@ -660,41 +760,96 @@ static const struct seq_operations kallsyms_op = { .show = s_show }; -static inline int kallsyms_for_perf(void) +#ifdef CONFIG_BPF_SYSCALL + +struct bpf_iter__ksym { + __bpf_md_ptr(struct bpf_iter_meta *, meta); + __bpf_md_ptr(struct kallsym_iter *, ksym); +}; + +static int ksym_prog_seq_show(struct seq_file *m, bool in_stop) { -#ifdef CONFIG_PERF_EVENTS - extern int sysctl_perf_event_paranoid; - if (sysctl_perf_event_paranoid <= 1) - return 1; -#endif + struct bpf_iter__ksym ctx; + struct bpf_iter_meta meta; + struct bpf_prog *prog; + + meta.seq = m; + prog = bpf_iter_get_info(&meta, in_stop); + if (!prog) + return 0; + + ctx.meta = &meta; + ctx.ksym = m ? m->private : NULL; + return bpf_iter_run_prog(prog, &ctx); +} + +static int bpf_iter_ksym_seq_show(struct seq_file *m, void *p) +{ + return ksym_prog_seq_show(m, false); +} + +static void bpf_iter_ksym_seq_stop(struct seq_file *m, void *p) +{ + if (!p) + (void) ksym_prog_seq_show(m, true); + else + s_stop(m, p); +} + +static const struct seq_operations bpf_iter_ksym_ops = { + .start = s_start, + .next = s_next, + .stop = bpf_iter_ksym_seq_stop, + .show = bpf_iter_ksym_seq_show, +}; + +static int bpf_iter_ksym_init(void *priv_data, struct bpf_iter_aux_info *aux) +{ + struct kallsym_iter *iter = priv_data; + + reset_iter(iter, 0); + + /* cache here as in kallsyms_open() case; use current process + * credentials to tell BPF iterators if values should be shown. + */ + iter->show_value = kallsyms_show_value(current_cred()); + return 0; } -/* - * We show kallsyms information even to normal users if we've enabled - * kernel profiling and are explicitly not paranoid (so kptr_restrict - * is clear, and sysctl_perf_event_paranoid isn't set). - * - * Otherwise, require CAP_SYSLOG (assuming kptr_restrict isn't set to - * block even that). - */ -bool kallsyms_show_value(const struct cred *cred) -{ - switch (kptr_restrict) { - case 0: - if (kallsyms_for_perf()) - return true; - fallthrough; - case 1: - if (security_capable(cred, &init_user_ns, CAP_SYSLOG, - CAP_OPT_NOAUDIT) == 0) - return true; - fallthrough; - default: - return false; - } +DEFINE_BPF_ITER_FUNC(ksym, struct bpf_iter_meta *meta, struct kallsym_iter *ksym) + +static const struct bpf_iter_seq_info ksym_iter_seq_info = { + .seq_ops = &bpf_iter_ksym_ops, + .init_seq_private = bpf_iter_ksym_init, + .fini_seq_private = NULL, + .seq_priv_size = sizeof(struct kallsym_iter), +}; + +static struct bpf_iter_reg ksym_iter_reg_info = { + .target = "ksym", + .feature = BPF_ITER_RESCHED, + .ctx_arg_info_size = 1, + .ctx_arg_info = { + { offsetof(struct bpf_iter__ksym, ksym), + PTR_TO_BTF_ID_OR_NULL }, + }, + .seq_info = &ksym_iter_seq_info, +}; + +BTF_ID_LIST(btf_ksym_iter_id) +BTF_ID(struct, kallsym_iter) + +static int __init bpf_ksym_iter_register(void) +{ + ksym_iter_reg_info.ctx_arg_info[0].btf_id = *btf_ksym_iter_id; + return bpf_iter_reg_target(&ksym_iter_reg_info); } +late_initcall(bpf_ksym_iter_register); + +#endif /* CONFIG_BPF_SYSCALL */ + static int kallsyms_open(struct inode *inode, struct file *file) { /* |