summaryrefslogtreecommitdiff
path: root/tools/include
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-04-28 08:42:41 -0400
committerDavid S. Miller <davem@davemloft.net>2019-04-28 08:42:41 -0400
commit5f0d736e7f7db586141f974821b6ca6c1d906d5b (patch)
tree8f62c1a24fd499a55deb455c6dd42bb51cd53275 /tools/include
parentb1a79360ee862f8ada4798ad2346fa45bb41b527 (diff)
parent9076c49bdca2aa68c805f2677b2bccc4bde2bac1 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says: ==================== pull-request: bpf-next 2019-04-28 The following pull-request contains BPF updates for your *net-next* tree. The main changes are: 1) Introduce BPF socket local storage map so that BPF programs can store private data they associate with a socket (instead of e.g. separate hash table), from Martin. 2) Add support for bpftool to dump BTF types. This is done through a new `bpftool btf dump` sub-command, from Andrii. 3) Enable BPF-based flow dissector for skb-less eth_get_headlen() calls which was currently not supported since skb was used to lookup netns, from Stanislav. 4) Add an opt-in interface for tracepoints to expose a writable context for attached BPF programs, used here for NBD sockets, from Matt. 5) BPF xadd related arm64 JIT fixes and scalability improvements, from Daniel. 6) Change the skb->protocol for bpf_skb_adjust_room() helper in order to support tunnels such as sit. Add selftests as well, from Willem. 7) Various smaller misc fixes. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'tools/include')
-rw-r--r--tools/include/uapi/linux/bpf.h54
1 files changed, 52 insertions, 2 deletions
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 704bb69514a2..72336bac7573 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -133,6 +133,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
BPF_MAP_TYPE_QUEUE,
BPF_MAP_TYPE_STACK,
+ BPF_MAP_TYPE_SK_STORAGE,
};
/* Note that tracing related programs such as
@@ -168,6 +169,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_SK_REUSEPORT,
BPF_PROG_TYPE_FLOW_DISSECTOR,
BPF_PROG_TYPE_CGROUP_SYSCTL,
+ BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
};
enum bpf_attach_type {
@@ -1737,12 +1739,19 @@ union bpf_attr {
* error if an eBPF program tries to set a callback that is not
* supported in the current kernel.
*
- * The supported callback values that *argval* can combine are:
+ * *argval* is a flag array which can combine these flags:
*
* * **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out)
* * **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission)
* * **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change)
*
+ * Therefore, this function can be used to clear a callback flag by
+ * setting the appropriate bit to zero. e.g. to disable the RTO
+ * callback:
+ *
+ * **bpf_sock_ops_cb_flags_set(bpf_sock,**
+ * **bpf_sock->bpf_sock_ops_cb_flags & ~BPF_SOCK_OPS_RTO_CB_FLAG)**
+ *
* Here are some examples of where one could call such eBPF
* program:
*
@@ -2622,6 +2631,42 @@ union bpf_attr {
* was provided.
*
* **-ERANGE** if resulting value was out of range.
+ *
+ * void *bpf_sk_storage_get(struct bpf_map *map, struct bpf_sock *sk, void *value, u64 flags)
+ * Description
+ * Get a bpf-local-storage from a sk.
+ *
+ * Logically, it could be thought of getting the value from
+ * a *map* with *sk* as the **key**. From this
+ * perspective, the usage is not much different from
+ * **bpf_map_lookup_elem(map, &sk)** except this
+ * helper enforces the key must be a **bpf_fullsock()**
+ * and the map must be a BPF_MAP_TYPE_SK_STORAGE also.
+ *
+ * Underneath, the value is stored locally at *sk* instead of
+ * the map. The *map* is used as the bpf-local-storage **type**.
+ * The bpf-local-storage **type** (i.e. the *map*) is searched
+ * against all bpf-local-storages residing at sk.
+ *
+ * An optional *flags* (BPF_SK_STORAGE_GET_F_CREATE) can be
+ * used such that a new bpf-local-storage will be
+ * created if one does not exist. *value* can be used
+ * together with BPF_SK_STORAGE_GET_F_CREATE to specify
+ * the initial value of a bpf-local-storage. If *value* is
+ * NULL, the new bpf-local-storage will be zero initialized.
+ * Return
+ * A bpf-local-storage pointer is returned on success.
+ *
+ * **NULL** if not found or there was an error in adding
+ * a new bpf-local-storage.
+ *
+ * int bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk)
+ * Description
+ * Delete a bpf-local-storage from a sk.
+ * Return
+ * 0 on success.
+ *
+ * **-ENOENT** if the bpf-local-storage cannot be found.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -2730,7 +2775,9 @@ union bpf_attr {
FN(sysctl_get_new_value), \
FN(sysctl_set_new_value), \
FN(strtol), \
- FN(strtoul),
+ FN(strtoul), \
+ FN(sk_storage_get), \
+ FN(sk_storage_delete),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -2806,6 +2853,9 @@ enum bpf_func_id {
/* BPF_FUNC_sysctl_get_name flags. */
#define BPF_F_SYSCTL_BASE_NAME (1ULL << 0)
+/* BPF_FUNC_sk_storage_get flags */
+#define BPF_SK_STORAGE_GET_F_CREATE (1ULL << 0)
+
/* Mode for BPF_FUNC_skb_adjust_room helper. */
enum bpf_adj_room_mode {
BPF_ADJ_ROOM_NET,