summaryrefslogtreecommitdiff
path: root/include/linux/bpf.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/bpf.h')
-rw-r--r--include/linux/bpf.h174
1 files changed, 162 insertions, 12 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index f1af7d63d678..c397934f91dd 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -15,6 +15,7 @@
#include <linux/err.h>
#include <linux/rbtree_latch.h>
#include <linux/numa.h>
+#include <linux/wait.h>
struct perf_event;
struct bpf_prog;
@@ -56,6 +57,10 @@ struct bpf_map {
struct work_struct work;
atomic_t usercnt;
struct bpf_map *inner_map_meta;
+ char name[BPF_OBJ_NAME_LEN];
+#ifdef CONFIG_SECURITY
+ void *security;
+#endif
};
/* function argument constraints */
@@ -137,6 +142,7 @@ enum bpf_reg_type {
PTR_TO_MAP_VALUE, /* reg points to map element value */
PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
PTR_TO_STACK, /* reg == frame_pointer + offset */
+ PTR_TO_PACKET_META, /* skb->data - meta_len */
PTR_TO_PACKET, /* reg points to skb->data */
PTR_TO_PACKET_END, /* skb->data + headlen */
};
@@ -155,6 +161,11 @@ bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
aux->ctx_field_size = size;
}
+struct bpf_prog_ops {
+ int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+};
+
struct bpf_verifier_ops {
/* return eBPF function prototype for verification */
const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id);
@@ -170,8 +181,16 @@ struct bpf_verifier_ops {
const struct bpf_insn *src,
struct bpf_insn *dst,
struct bpf_prog *prog, u32 *target_size);
- int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
- union bpf_attr __user *uattr);
+};
+
+struct bpf_dev_offload {
+ struct bpf_prog *prog;
+ struct net_device *netdev;
+ void *dev_priv;
+ struct list_head offloads;
+ bool dev_state;
+ bool verifier_running;
+ wait_queue_head_t verifier_done;
};
struct bpf_prog_aux {
@@ -182,10 +201,16 @@ struct bpf_prog_aux {
u32 id;
struct latch_tree_node ksym_tnode;
struct list_head ksym_lnode;
- const struct bpf_verifier_ops *ops;
+ const struct bpf_prog_ops *ops;
struct bpf_map **used_maps;
struct bpf_prog *prog;
struct user_struct *user;
+ u64 load_time; /* ns since boottime */
+ char name[BPF_OBJ_NAME_LEN];
+#ifdef CONFIG_SECURITY
+ void *security;
+#endif
+ struct bpf_dev_offload *offload;
union {
struct work_struct work;
struct rcu_head rcu;
@@ -218,9 +243,6 @@ struct bpf_event_entry {
struct rcu_head rcu;
};
-u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
-u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
-
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
int bpf_prog_calc_tag(struct bpf_prog *fp);
@@ -237,19 +259,84 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr);
+/* an array of programs to be executed under rcu_lock.
+ *
+ * Typical usage:
+ * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN);
+ *
+ * the structure returned by bpf_prog_array_alloc() should be populated
+ * with program pointers and the last pointer must be NULL.
+ * The user has to keep refcnt on the program and make sure the program
+ * is removed from the array before bpf_prog_put().
+ * The 'struct bpf_prog_array *' should only be replaced with xchg()
+ * since other cpus are walking the array of pointers in parallel.
+ */
+struct bpf_prog_array {
+ struct rcu_head rcu;
+ struct bpf_prog *progs[0];
+};
+
+struct bpf_prog_array __rcu *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
+void bpf_prog_array_free(struct bpf_prog_array __rcu *progs);
+int bpf_prog_array_length(struct bpf_prog_array __rcu *progs);
+int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
+ __u32 __user *prog_ids, u32 cnt);
+
+void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs,
+ struct bpf_prog *old_prog);
+int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
+ struct bpf_prog *exclude_prog,
+ struct bpf_prog *include_prog,
+ struct bpf_prog_array **new_array);
+
+#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \
+ ({ \
+ struct bpf_prog **_prog, *__prog; \
+ struct bpf_prog_array *_array; \
+ u32 _ret = 1; \
+ rcu_read_lock(); \
+ _array = rcu_dereference(array); \
+ if (unlikely(check_non_null && !_array))\
+ goto _out; \
+ _prog = _array->progs; \
+ while ((__prog = READ_ONCE(*_prog))) { \
+ _ret &= func(__prog, ctx); \
+ _prog++; \
+ } \
+_out: \
+ rcu_read_unlock(); \
+ _ret; \
+ })
+
+#define BPF_PROG_RUN_ARRAY(array, ctx, func) \
+ __BPF_PROG_RUN_ARRAY(array, ctx, func, false)
+
+#define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \
+ __BPF_PROG_RUN_ARRAY(array, ctx, func, true)
+
#ifdef CONFIG_BPF_SYSCALL
DECLARE_PER_CPU(int, bpf_prog_active);
-#define BPF_PROG_TYPE(_id, _ops) \
- extern const struct bpf_verifier_ops _ops;
+extern const struct file_operations bpf_map_fops;
+extern const struct file_operations bpf_prog_fops;
+
+#define BPF_PROG_TYPE(_id, _name) \
+ extern const struct bpf_prog_ops _name ## _prog_ops; \
+ extern const struct bpf_verifier_ops _name ## _verifier_ops;
#define BPF_MAP_TYPE(_id, _ops) \
extern const struct bpf_map_ops _ops;
#include <linux/bpf_types.h>
#undef BPF_PROG_TYPE
#undef BPF_MAP_TYPE
+extern const struct bpf_prog_ops bpf_offload_prog_ops;
+extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
+extern const struct bpf_verifier_ops xdp_analyzer_ops;
+
struct bpf_prog *bpf_prog_get(u32 ufd);
struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type);
+struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
+ struct net_device *netdev);
struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i);
void bpf_prog_sub(struct bpf_prog *prog, int i);
struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog);
@@ -269,11 +356,11 @@ void bpf_map_area_free(void *base);
extern int sysctl_unprivileged_bpf_disabled;
-int bpf_map_new_fd(struct bpf_map *map);
+int bpf_map_new_fd(struct bpf_map *map, int flags);
int bpf_prog_new_fd(struct bpf_prog *prog);
int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
-int bpf_obj_get_user(const char __user *pathname);
+int bpf_obj_get_user(const char __user *pathname, int flags);
int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
@@ -292,6 +379,8 @@ int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
void *key, void *value, u64 map_flags);
int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
+int bpf_get_file_flag(int flags);
+
/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
* forced to use 'long' read/writes to try to atomically copy long counters.
* Best-effort only. No barriers here, since it _will_ race with concurrent
@@ -316,6 +405,13 @@ struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
void __dev_map_insert_ctx(struct bpf_map *map, u32 index);
void __dev_map_flush(struct bpf_map *map);
+struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
+void __cpu_map_insert_ctx(struct bpf_map *map, u32 index);
+void __cpu_map_flush(struct bpf_map *map);
+struct xdp_buff;
+int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
+ struct net_device *dev_rx);
+
/* Return map's numa specified by userspace */
static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
{
@@ -323,7 +419,7 @@ static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
attr->numa_node : NUMA_NO_NODE;
}
-#else
+#else /* !CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
{
return ERR_PTR(-EOPNOTSUPP);
@@ -334,6 +430,14 @@ static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
{
return ERR_PTR(-EOPNOTSUPP);
}
+
+static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
+ enum bpf_prog_type type,
+ struct net_device *netdev)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
static inline struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog,
int i)
{
@@ -368,7 +472,7 @@ static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
{
}
-static inline int bpf_obj_get_user(const char __user *pathname)
+static inline int bpf_obj_get_user(const char __user *pathname, int flags)
{
return -EOPNOTSUPP;
}
@@ -386,8 +490,54 @@ static inline void __dev_map_insert_ctx(struct bpf_map *map, u32 index)
static inline void __dev_map_flush(struct bpf_map *map)
{
}
+
+static inline
+struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
+{
+ return NULL;
+}
+
+static inline void __cpu_map_insert_ctx(struct bpf_map *map, u32 index)
+{
+}
+
+static inline void __cpu_map_flush(struct bpf_map *map)
+{
+}
+
+struct xdp_buff;
+static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
+ struct xdp_buff *xdp,
+ struct net_device *dev_rx)
+{
+ return 0;
+}
#endif /* CONFIG_BPF_SYSCALL */
+int bpf_prog_offload_compile(struct bpf_prog *prog);
+void bpf_prog_offload_destroy(struct bpf_prog *prog);
+u32 bpf_prog_offload_ifindex(struct bpf_prog *prog);
+
+#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
+int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
+
+static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
+{
+ return aux->offload;
+}
+#else
+static inline int bpf_prog_offload_init(struct bpf_prog *prog,
+ union bpf_attr *attr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
+{
+ return false;
+}
+#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
+
#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL)
struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key);
int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type);