summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/core.c223
-rw-r--r--kernel/bpf/syscall.c2
-rw-r--r--kernel/extable.c9
-rw-r--r--kernel/kallsyms.c61
4 files changed, 282 insertions, 13 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 2831ba1e71c1..f45827e205d3 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -28,6 +28,9 @@
#include <linux/moduleloader.h>
#include <linux/bpf.h>
#include <linux/frame.h>
+#include <linux/rbtree_latch.h>
+#include <linux/kallsyms.h>
+#include <linux/rcupdate.h>
#include <asm/unaligned.h>
@@ -95,6 +98,8 @@ struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
fp->aux = aux;
fp->aux->prog = fp;
+ INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
+
return fp;
}
EXPORT_SYMBOL_GPL(bpf_prog_alloc);
@@ -290,6 +295,206 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
}
#ifdef CONFIG_BPF_JIT
+static __always_inline void
+bpf_get_prog_addr_region(const struct bpf_prog *prog,
+ unsigned long *symbol_start,
+ unsigned long *symbol_end)
+{
+ const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
+ unsigned long addr = (unsigned long)hdr;
+
+ WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
+
+ *symbol_start = addr;
+ *symbol_end = addr + hdr->pages * PAGE_SIZE;
+}
+
+static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
+{
+ BUILD_BUG_ON(sizeof("bpf_prog_") +
+ sizeof(prog->tag) * 2 + 1 > KSYM_NAME_LEN);
+
+ sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
+ sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
+ *sym = 0;
+}
+
+static __always_inline unsigned long
+bpf_get_prog_addr_start(struct latch_tree_node *n)
+{
+ unsigned long symbol_start, symbol_end;
+ const struct bpf_prog_aux *aux;
+
+ aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
+ bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
+
+ return symbol_start;
+}
+
+static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
+ struct latch_tree_node *b)
+{
+ return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
+}
+
+static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
+{
+ unsigned long val = (unsigned long)key;
+ unsigned long symbol_start, symbol_end;
+ const struct bpf_prog_aux *aux;
+
+ aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
+ bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
+
+ if (val < symbol_start)
+ return -1;
+ if (val >= symbol_end)
+ return 1;
+
+ return 0;
+}
+
+static const struct latch_tree_ops bpf_tree_ops = {
+ .less = bpf_tree_less,
+ .comp = bpf_tree_comp,
+};
+
+static DEFINE_SPINLOCK(bpf_lock);
+static LIST_HEAD(bpf_kallsyms);
+static struct latch_tree_root bpf_tree __cacheline_aligned;
+
+int bpf_jit_kallsyms __read_mostly;
+
+static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
+{
+ WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
+ list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);
+ latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
+}
+
+static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
+{
+ if (list_empty(&aux->ksym_lnode))
+ return;
+
+ latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
+ list_del_rcu(&aux->ksym_lnode);
+}
+
+static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
+{
+ return fp->jited && !bpf_prog_was_classic(fp);
+}
+
+static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
+{
+ return list_empty(&fp->aux->ksym_lnode) ||
+ fp->aux->ksym_lnode.prev == LIST_POISON2;
+}
+
+void bpf_prog_kallsyms_add(struct bpf_prog *fp)
+{
+ unsigned long flags;
+
+ if (!bpf_prog_kallsyms_candidate(fp) ||
+ !capable(CAP_SYS_ADMIN))
+ return;
+
+ spin_lock_irqsave(&bpf_lock, flags);
+ bpf_prog_ksym_node_add(fp->aux);
+ spin_unlock_irqrestore(&bpf_lock, flags);
+}
+
+void bpf_prog_kallsyms_del(struct bpf_prog *fp)
+{
+ unsigned long flags;
+
+ if (!bpf_prog_kallsyms_candidate(fp))
+ return;
+
+ spin_lock_irqsave(&bpf_lock, flags);
+ bpf_prog_ksym_node_del(fp->aux);
+ spin_unlock_irqrestore(&bpf_lock, flags);
+}
+
+static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
+{
+ struct latch_tree_node *n;
+
+ if (!bpf_jit_kallsyms_enabled())
+ return NULL;
+
+ n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
+ return n ?
+ container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
+ NULL;
+}
+
+const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
+ unsigned long *off, char *sym)
+{
+ unsigned long symbol_start, symbol_end;
+ struct bpf_prog *prog;
+ char *ret = NULL;
+
+ rcu_read_lock();
+ prog = bpf_prog_kallsyms_find(addr);
+ if (prog) {
+ bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end);
+ bpf_get_prog_name(prog, sym);
+
+ ret = sym;
+ if (size)
+ *size = symbol_end - symbol_start;
+ if (off)
+ *off = addr - symbol_start;
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
+bool is_bpf_text_address(unsigned long addr)
+{
+ bool ret;
+
+ rcu_read_lock();
+ ret = bpf_prog_kallsyms_find(addr) != NULL;
+ rcu_read_unlock();
+
+ return ret;
+}
+
+int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
+ char *sym)
+{
+ unsigned long symbol_start, symbol_end;
+ struct bpf_prog_aux *aux;
+ unsigned int it = 0;
+ int ret = -ERANGE;
+
+ if (!bpf_jit_kallsyms_enabled())
+ return ret;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {
+ if (it++ != symnum)
+ continue;
+
+ bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
+ bpf_get_prog_name(aux->prog, sym);
+
+ *value = symbol_start;
+ *type = BPF_SYM_ELF_TYPE;
+
+ ret = 0;
+ break;
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
unsigned int alignment,
@@ -326,6 +531,24 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr)
module_memfree(hdr);
}
+/* This symbol is only overridden by archs that have different
+ * requirements than the usual eBPF JITs, f.e. when they only
+ * implement cBPF JIT, do not set images read-only, etc.
+ */
+void __weak bpf_jit_free(struct bpf_prog *fp)
+{
+ if (fp->jited) {
+ struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
+
+ bpf_jit_binary_unlock_ro(hdr);
+ bpf_jit_binary_free(hdr);
+
+ WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
+ }
+
+ bpf_prog_unlock_free(fp);
+}
+
int bpf_jit_harden __read_mostly;
static int bpf_jit_blind_insn(const struct bpf_insn *from,
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index f74ca17af64a..461eb1e66a0f 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -707,6 +707,7 @@ void bpf_prog_put(struct bpf_prog *prog)
{
if (atomic_dec_and_test(&prog->aux->refcnt)) {
trace_bpf_prog_put_rcu(prog);
+ bpf_prog_kallsyms_del(prog);
call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
}
}
@@ -903,6 +904,7 @@ static int bpf_prog_load(union bpf_attr *attr)
/* failed to allocate fd */
goto free_used_maps;
+ bpf_prog_kallsyms_add(prog);
trace_bpf_prog_load(prog, err);
return err;
diff --git a/kernel/extable.c b/kernel/extable.c
index e3beec4a2339..bd82117ad424 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -20,6 +20,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/init.h>
+#include <linux/filter.h>
#include <asm/sections.h>
#include <linux/uaccess.h>
@@ -104,6 +105,8 @@ int __kernel_text_address(unsigned long addr)
return 1;
if (is_ftrace_trampoline(addr))
return 1;
+ if (is_bpf_text_address(addr))
+ return 1;
/*
* There might be init symbols in saved stacktraces.
* Give those symbols a chance to be printed in
@@ -123,7 +126,11 @@ int kernel_text_address(unsigned long addr)
return 1;
if (is_module_text_address(addr))
return 1;
- return is_ftrace_trampoline(addr);
+ if (is_ftrace_trampoline(addr))
+ return 1;
+ if (is_bpf_text_address(addr))
+ return 1;
+ return 0;
}
/*
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index fafd1a3ef0da..6a3b249a2ae1 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -23,6 +23,7 @@
#include <linux/mm.h>
#include <linux/ctype.h>
#include <linux/slab.h>
+#include <linux/filter.h>
#include <linux/compiler.h>
#include <asm/sections.h>
@@ -300,10 +301,11 @@ int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize,
unsigned long *offset)
{
char namebuf[KSYM_NAME_LEN];
+
if (is_ksym_addr(addr))
return !!get_symbol_pos(addr, symbolsize, offset);
-
- return !!module_address_lookup(addr, symbolsize, offset, NULL, namebuf);
+ return !!module_address_lookup(addr, symbolsize, offset, NULL, namebuf) ||
+ !!__bpf_address_lookup(addr, symbolsize, offset, namebuf);
}
/*
@@ -318,6 +320,8 @@ const char *kallsyms_lookup(unsigned long addr,
unsigned long *offset,
char **modname, char *namebuf)
{
+ const char *ret;
+
namebuf[KSYM_NAME_LEN - 1] = 0;
namebuf[0] = 0;
@@ -333,9 +337,13 @@ const char *kallsyms_lookup(unsigned long addr,
return namebuf;
}
- /* See if it's in a module. */
- return module_address_lookup(addr, symbolsize, offset, modname,
- namebuf);
+ /* See if it's in a module or a BPF JITed image. */
+ ret = module_address_lookup(addr, symbolsize, offset,
+ modname, namebuf);
+ if (!ret)
+ ret = bpf_address_lookup(addr, symbolsize,
+ offset, modname, namebuf);
+ return ret;
}
int lookup_symbol_name(unsigned long addr, char *symname)
@@ -471,6 +479,7 @@ EXPORT_SYMBOL(__print_symbol);
/* To avoid using get_symbol_offset for every symbol, we carry prefix along. */
struct kallsym_iter {
loff_t pos;
+ loff_t pos_mod_end;
unsigned long value;
unsigned int nameoff; /* If iterating in core kernel symbols. */
char type;
@@ -481,13 +490,27 @@ struct kallsym_iter {
static int get_ksymbol_mod(struct kallsym_iter *iter)
{
- if (module_get_kallsym(iter->pos - kallsyms_num_syms, &iter->value,
- &iter->type, iter->name, iter->module_name,
- &iter->exported) < 0)
+ int ret = module_get_kallsym(iter->pos - kallsyms_num_syms,
+ &iter->value, &iter->type,
+ iter->name, iter->module_name,
+ &iter->exported);
+ if (ret < 0) {
+ iter->pos_mod_end = iter->pos;
return 0;
+ }
+
return 1;
}
+static int get_ksymbol_bpf(struct kallsym_iter *iter)
+{
+ iter->module_name[0] = '\0';
+ iter->exported = 0;
+ return bpf_get_kallsym(iter->pos - iter->pos_mod_end,
+ &iter->value, &iter->type,
+ iter->name) < 0 ? 0 : 1;
+}
+
/* Returns space to next name. */
static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
{
@@ -508,16 +531,30 @@ static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
iter->name[0] = '\0';
iter->nameoff = get_symbol_offset(new_pos);
iter->pos = new_pos;
+ if (new_pos == 0)
+ iter->pos_mod_end = 0;
+}
+
+static int update_iter_mod(struct kallsym_iter *iter, loff_t pos)
+{
+ iter->pos = pos;
+
+ if (iter->pos_mod_end > 0 &&
+ iter->pos_mod_end < iter->pos)
+ return get_ksymbol_bpf(iter);
+
+ if (!get_ksymbol_mod(iter))
+ return get_ksymbol_bpf(iter);
+
+ return 1;
}
/* Returns false if pos at or past end of file. */
static int update_iter(struct kallsym_iter *iter, loff_t pos)
{
/* Module symbols can be accessed randomly. */
- if (pos >= kallsyms_num_syms) {
- iter->pos = pos;
- return get_ksymbol_mod(iter);
- }
+ if (pos >= kallsyms_num_syms)
+ return update_iter_mod(iter, pos);
/* If we're not on the desired position, reset to new position. */
if (pos != iter->pos)