summaryrefslogtreecommitdiff
path: root/kernel/bpf/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/bpf/core.c')
-rw-r--r--kernel/bpf/core.c129
1 files changed, 119 insertions, 10 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index ef0e1e3e66f4..49e32acad7d8 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -30,7 +30,8 @@
#include <linux/kallsyms.h>
#include <linux/rcupdate.h>
#include <linux/perf_event.h>
-
+#include <linux/extable.h>
+#include <linux/log2.h>
#include <asm/unaligned.h>
/* Registers */
@@ -255,6 +256,7 @@ void __bpf_prog_free(struct bpf_prog *fp)
{
if (fp->aux) {
free_percpu(fp->aux->stats);
+ kfree(fp->aux->poke_tab);
kfree(fp->aux);
}
vfree(fp);
@@ -668,9 +670,6 @@ static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
{
struct latch_tree_node *n;
- if (!bpf_jit_kallsyms_enabled())
- return NULL;
-
n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
return n ?
container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
@@ -712,6 +711,24 @@ bool is_bpf_text_address(unsigned long addr)
return ret;
}
+const struct exception_table_entry *search_bpf_extables(unsigned long addr)
+{
+ const struct exception_table_entry *e = NULL;
+ struct bpf_prog *prog;
+
+ rcu_read_lock();
+ prog = bpf_prog_kallsyms_find(addr);
+ if (!prog)
+ goto out;
+ if (!prog->aux->num_exentries)
+ goto out;
+
+ e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
+out:
+ rcu_read_unlock();
+ return e;
+}
+
int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
char *sym)
{
@@ -740,6 +757,39 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
return ret;
}
+int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
+ struct bpf_jit_poke_descriptor *poke)
+{
+ struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
+ static const u32 poke_tab_max = 1024;
+ u32 slot = prog->aux->size_poke_tab;
+ u32 size = slot + 1;
+
+ if (size > poke_tab_max)
+ return -ENOSPC;
+ if (poke->ip || poke->ip_stable || poke->adj_off)
+ return -EINVAL;
+
+ switch (poke->reason) {
+ case BPF_POKE_REASON_TAIL_CALL:
+ if (!poke->tail_call.map)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL);
+ if (!tab)
+ return -ENOMEM;
+
+ memcpy(&tab[slot], poke, sizeof(*poke));
+ prog->aux->size_poke_tab = size;
+ prog->aux->poke_tab = tab;
+
+ return slot;
+}
+
static atomic_long_t bpf_jit_current;
/* Can be overridden by an arch's JIT compiler if it has a custom,
@@ -800,6 +850,9 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
struct bpf_binary_header *hdr;
u32 size, hole, start, pages;
+ WARN_ON_ONCE(!is_power_of_2(alignment) ||
+ alignment > BPF_IMAGE_ALIGNMENT);
+
/* Most of BPF filters are really small, but if some of them
* fill a page, allow at least 128 extra bytes to insert a
* random section of illegal instructions.
@@ -1291,6 +1344,12 @@ bool bpf_opcode_in_insntable(u8 code)
}
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
+{
+ memset(dst, 0, size);
+ return -EFAULT;
+}
+
/**
* __bpf_prog_run - run eBPF program on a given context
* @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
@@ -1310,6 +1369,10 @@ static u64 __no_fgcse ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u6
/* Non-UAPI available opcodes. */
[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
+ [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
+ [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
+ [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
+ [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
};
#undef BPF_INSN_3_LBL
#undef BPF_INSN_2_LBL
@@ -1542,6 +1605,16 @@ out:
LDST(W, u32)
LDST(DW, u64)
#undef LDST
+#define LDX_PROBE(SIZEOP, SIZE) \
+ LDX_PROBE_MEM_##SIZEOP: \
+ bpf_probe_read_kernel(&DST, SIZE, (const void *)(long) (SRC + insn->off)); \
+ CONT;
+ LDX_PROBE(B, 1)
+ LDX_PROBE(H, 2)
+ LDX_PROBE(W, 4)
+ LDX_PROBE(DW, 8)
+#undef LDX_PROBE
+
STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
atomic_add((u32) SRC, (atomic_t *)(unsigned long)
(DST + insn->off));
@@ -1652,18 +1725,17 @@ bool bpf_prog_array_compatible(struct bpf_array *array,
if (fp->kprobe_override)
return false;
- if (!array->owner_prog_type) {
+ if (!array->aux->type) {
/* There's no owner yet where we could check for
* compatibility.
*/
- array->owner_prog_type = fp->type;
- array->owner_jited = fp->jited;
-
+ array->aux->type = fp->type;
+ array->aux->jited = fp->jited;
return true;
}
- return array->owner_prog_type == fp->type &&
- array->owner_jited == fp->jited;
+ return array->aux->type == fp->type &&
+ array->aux->jited == fp->jited;
}
static int bpf_check_tail_call(const struct bpf_prog *fp)
@@ -1964,18 +2036,47 @@ int bpf_prog_array_copy_info(struct bpf_prog_array *array,
: 0;
}
+static void bpf_free_cgroup_storage(struct bpf_prog_aux *aux)
+{
+ enum bpf_cgroup_storage_type stype;
+
+ for_each_cgroup_storage_type(stype) {
+ if (!aux->cgroup_storage[stype])
+ continue;
+ bpf_cgroup_storage_release(aux->prog,
+ aux->cgroup_storage[stype]);
+ }
+}
+
+static void bpf_free_used_maps(struct bpf_prog_aux *aux)
+{
+ struct bpf_map *map;
+ int i;
+
+ bpf_free_cgroup_storage(aux);
+ for (i = 0; i < aux->used_map_cnt; i++) {
+ map = aux->used_maps[i];
+ if (map->ops->map_poke_untrack)
+ map->ops->map_poke_untrack(map, aux);
+ bpf_map_put(map);
+ }
+ kfree(aux->used_maps);
+}
+
static void bpf_prog_free_deferred(struct work_struct *work)
{
struct bpf_prog_aux *aux;
int i;
aux = container_of(work, struct bpf_prog_aux, work);
+ bpf_free_used_maps(aux);
if (bpf_prog_is_dev_bound(aux))
bpf_prog_offload_destroy(aux->prog);
#ifdef CONFIG_PERF_EVENTS
if (aux->prog->has_callchain_buf)
put_callchain_buffers();
#endif
+ bpf_trampoline_put(aux->trampoline);
for (i = 0; i < aux->func_cnt; i++)
bpf_jit_free(aux->func[i]);
if (aux->func_cnt) {
@@ -1991,6 +2092,8 @@ void bpf_prog_free(struct bpf_prog *fp)
{
struct bpf_prog_aux *aux = fp->aux;
+ if (aux->linked_prog)
+ bpf_prog_put(aux->linked_prog);
INIT_WORK(&aux->work, bpf_prog_free_deferred);
schedule_work(&aux->work);
}
@@ -2105,6 +2208,12 @@ int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
return -EFAULT;
}
+int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
+ void *addr1, void *addr2)
+{
+ return -ENOTSUPP;
+}
+
DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
EXPORT_SYMBOL(bpf_stats_enabled_key);