From bae30468dfc1522464f16051addd7bc7da6da75a Mon Sep 17 00:00:00 2001 From: Craig Gallek Date: Mon, 18 Sep 2017 15:30:56 -0400 Subject: bpf: Add uniqueness invariant to trivial lpm test implementation The 'trivial' lpm implementation in this test allows equivalent nodes to be added (that is, nodes consisting of the same prefix and prefix length). For lookup operations, this is fine because insertion happens at the head of the (singly linked) list and the first, best match is returned. In order to support deletion, the tlpm data structue must first enforce uniqueness. This change modifies the insertion algorithm to search for equivalent nodes and remove them. Note: the BPF_MAP_TYPE_LPM_TRIE already has a uniqueness invariant that is implemented as node replacement. Signed-off-by: Craig Gallek Acked-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- tools/testing/selftests/bpf/test_lpm_map.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c index e97565243d59..a5ed7c4f1819 100644 --- a/tools/testing/selftests/bpf/test_lpm_map.c +++ b/tools/testing/selftests/bpf/test_lpm_map.c @@ -31,6 +31,10 @@ struct tlpm_node { uint8_t key[]; }; +static struct tlpm_node *tlpm_match(struct tlpm_node *list, + const uint8_t *key, + size_t n_bits); + static struct tlpm_node *tlpm_add(struct tlpm_node *list, const uint8_t *key, size_t n_bits) @@ -38,9 +42,17 @@ static struct tlpm_node *tlpm_add(struct tlpm_node *list, struct tlpm_node *node; size_t n; + n = (n_bits + 7) / 8; + + /* 'overwrite' an equivalent entry if one already exists */ + node = tlpm_match(list, key, n_bits); + if (node && node->n_bits == n_bits) { + memcpy(node->key, key, n); + return list; + } + /* add new entry with @key/@n_bits to @list and return new head */ - n = (n_bits + 7) / 8; node = malloc(sizeof(*node) + n); assert(node); -- cgit From e8d1749910b56a7248cb9a5392274348d10108bb Mon Sep 17 00:00:00 2001 From: Craig Gallek Date: Mon, 18 Sep 2017 15:30:57 -0400 Subject: bpf: Test deletion in BPF_MAP_TYPE_LPM_TRIE Extend the 'random' operation tests to include a delete operation (delete half of the nodes from both lpm implementions and ensure that lookups are still equivalent). Also, add a simple IPv4 test which verifies lookup behavior as nodes are deleted from the tree. Signed-off-by: Craig Gallek Acked-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- tools/testing/selftests/bpf/test_lpm_map.c | 187 ++++++++++++++++++++++++++++- 1 file changed, 183 insertions(+), 4 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c index a5ed7c4f1819..6fedb9fec36b 100644 --- a/tools/testing/selftests/bpf/test_lpm_map.c +++ b/tools/testing/selftests/bpf/test_lpm_map.c @@ -104,6 +104,34 @@ static struct tlpm_node *tlpm_match(struct tlpm_node *list, return best; } +static struct tlpm_node *tlpm_delete(struct tlpm_node *list, + const uint8_t *key, + size_t n_bits) +{ + struct tlpm_node *best = tlpm_match(list, key, n_bits); + struct tlpm_node *node; + + if (!best || best->n_bits != n_bits) + return list; + + if (best == list) { + node = best->next; + free(best); + return node; + } + + for (node = list; node; node = node->next) { + if (node->next == best) { + node->next = best->next; + free(best); + return list; + } + } + /* should never get here */ + assert(0); + return list; +} + static void test_lpm_basic(void) { struct tlpm_node *list = NULL, *t1, *t2; @@ -126,6 +154,13 @@ static void test_lpm_basic(void) assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 15)); assert(!tlpm_match(list, (uint8_t[]){ 0x7f, 0xff }, 16)); + list = tlpm_delete(list, (uint8_t[]){ 0xff, 0xff }, 16); + assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff }, 8)); + assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 16)); + + list = tlpm_delete(list, (uint8_t[]){ 0xff }, 8); + assert(!tlpm_match(list, (uint8_t[]){ 0xff }, 8)); + tlpm_clear(list); } @@ -170,7 +205,7 @@ static void test_lpm_order(void) static void test_lpm_map(int keysize) { - size_t i, j, n_matches, n_nodes, n_lookups; + size_t i, j, n_matches, n_matches_after_delete, n_nodes, n_lookups; struct tlpm_node *t, *list = NULL; struct bpf_lpm_trie_key *key; uint8_t *data, *value; @@ -182,6 +217,7 @@ static void test_lpm_map(int keysize) */ n_matches = 0; + n_matches_after_delete = 0; n_nodes = 1 << 8; n_lookups = 1 << 16; @@ -235,15 +271,54 @@ static void test_lpm_map(int keysize) } } + /* Remove the first half of the elements in the tlpm and the + * corresponding nodes from the bpf-lpm. Then run the same + * large number of random lookups in both and make sure they match. + * Note: we need to count the number of nodes actually inserted + * since there may have been duplicates. + */ + for (i = 0, t = list; t; i++, t = t->next) + ; + for (j = 0; j < i / 2; ++j) { + key->prefixlen = list->n_bits; + memcpy(key->data, list->key, keysize); + r = bpf_map_delete_elem(map, key); + assert(!r); + list = tlpm_delete(list, list->key, list->n_bits); + assert(list); + } + for (i = 0; i < n_lookups; ++i) { + for (j = 0; j < keysize; ++j) + data[j] = rand() & 0xff; + + t = tlpm_match(list, data, 8 * keysize); + + key->prefixlen = 8 * keysize; + memcpy(key->data, data, keysize); + r = bpf_map_lookup_elem(map, key, value); + assert(!r || errno == ENOENT); + assert(!t == !!r); + + if (t) { + ++n_matches_after_delete; + assert(t->n_bits == value[keysize]); + for (j = 0; j < t->n_bits; ++j) + assert((t->key[j / 8] & (1 << (7 - j % 8))) == + (value[j / 8] & (1 << (7 - j % 8)))); + } + } + close(map); tlpm_clear(list); /* With 255 random nodes in the map, we are pretty likely to match * something on every lookup. For statistics, use this: * - * printf(" nodes: %zu\n" - * "lookups: %zu\n" - * "matches: %zu\n", n_nodes, n_lookups, n_matches); + * printf(" nodes: %zu\n" + * " lookups: %zu\n" + * " matches: %zu\n" + * "matches(delete): %zu\n", + * n_nodes, n_lookups, n_matches, n_matches_after_delete); */ } @@ -343,6 +418,108 @@ static void test_lpm_ipaddr(void) close(map_fd_ipv6); } +static void test_lpm_delete(void) +{ + struct bpf_lpm_trie_key *key; + size_t key_size; + int map_fd; + __u64 value; + + key_size = sizeof(*key) + sizeof(__u32); + key = alloca(key_size); + + map_fd = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE, + key_size, sizeof(value), + 100, BPF_F_NO_PREALLOC); + assert(map_fd >= 0); + + /* Add nodes: + * 192.168.0.0/16 (1) + * 192.168.0.0/24 (2) + * 192.168.128.0/24 (3) + * 192.168.1.0/24 (4) + * + * (1) + * / \ + * (IM) (3) + * / \ + * (2) (4) + */ + value = 1; + key->prefixlen = 16; + inet_pton(AF_INET, "192.168.0.0", key->data); + assert(bpf_map_update_elem(map_fd, key, &value, 0) == 0); + + value = 2; + key->prefixlen = 24; + inet_pton(AF_INET, "192.168.0.0", key->data); + assert(bpf_map_update_elem(map_fd, key, &value, 0) == 0); + + value = 3; + key->prefixlen = 24; + inet_pton(AF_INET, "192.168.128.0", key->data); + assert(bpf_map_update_elem(map_fd, key, &value, 0) == 0); + + value = 4; + key->prefixlen = 24; + inet_pton(AF_INET, "192.168.1.0", key->data); + assert(bpf_map_update_elem(map_fd, key, &value, 0) == 0); + + /* remove non-existent node */ + key->prefixlen = 32; + inet_pton(AF_INET, "10.0.0.1", key->data); + assert(bpf_map_lookup_elem(map_fd, key, &value) == -1 && + errno == ENOENT); + + /* assert initial lookup */ + key->prefixlen = 32; + inet_pton(AF_INET, "192.168.0.1", key->data); + assert(bpf_map_lookup_elem(map_fd, key, &value) == 0); + assert(value == 2); + + /* remove leaf node */ + key->prefixlen = 24; + inet_pton(AF_INET, "192.168.0.0", key->data); + assert(bpf_map_delete_elem(map_fd, key) == 0); + + key->prefixlen = 32; + inet_pton(AF_INET, "192.168.0.1", key->data); + assert(bpf_map_lookup_elem(map_fd, key, &value) == 0); + assert(value == 1); + + /* remove leaf (and intermediary) node */ + key->prefixlen = 24; + inet_pton(AF_INET, "192.168.1.0", key->data); + assert(bpf_map_delete_elem(map_fd, key) == 0); + + key->prefixlen = 32; + inet_pton(AF_INET, "192.168.1.1", key->data); + assert(bpf_map_lookup_elem(map_fd, key, &value) == 0); + assert(value == 1); + + /* remove root node */ + key->prefixlen = 16; + inet_pton(AF_INET, "192.168.0.0", key->data); + assert(bpf_map_delete_elem(map_fd, key) == 0); + + key->prefixlen = 32; + inet_pton(AF_INET, "192.168.128.1", key->data); + assert(bpf_map_lookup_elem(map_fd, key, &value) == 0); + assert(value == 3); + + /* remove last node */ + key->prefixlen = 24; + inet_pton(AF_INET, "192.168.128.0", key->data); + assert(bpf_map_delete_elem(map_fd, key) == 0); + + key->prefixlen = 32; + inet_pton(AF_INET, "192.168.128.1", key->data); + assert(bpf_map_lookup_elem(map_fd, key, &value) == -1 && + errno == ENOENT); + + close(map_fd); +} + int main(void) { struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY }; @@ -365,6 +542,8 @@ int main(void) test_lpm_ipaddr(); + test_lpm_delete(); + printf("test_lpm: OK\n"); return 0; } -- cgit From 69e33b2754ea4fbe4fdd2d18bc8ca05d8670a5c2 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 19 Sep 2017 14:42:17 +0200 Subject: selftests: rtnetlink.sh: add test case for device ifalias Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- tools/testing/selftests/net/rtnetlink.sh | 57 ++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh index 57b5ff576240..4b48de565cae 100755 --- a/tools/testing/selftests/net/rtnetlink.sh +++ b/tools/testing/selftests/net/rtnetlink.sh @@ -15,6 +15,14 @@ check_err() fi } +# same but inverted -- used when command must fail for test to pass +check_fail() +{ + if [ $1 -eq 0 ]; then + ret=1 + fi +} + kci_add_dummy() { ip link add name "$devdummy" type dummy @@ -235,6 +243,54 @@ kci_test_addrlabel() echo "PASS: ipv6 addrlabel" } +kci_test_ifalias() +{ + ret=0 + namewant=$(uuidgen) + syspathname="/sys/class/net/$devdummy/ifalias" + + ip link set dev "$devdummy" alias "$namewant" + check_err $? + + if [ $ret -ne 0 ]; then + echo "FAIL: cannot set interface alias of $devdummy to $namewant" + return 1 + fi + + ip link show "$devdummy" | grep -q "alias $namewant" + check_err $? + + if [ -r "$syspathname" ] ; then + read namehave < "$syspathname" + if [ "$namewant" != "$namehave" ]; then + echo "FAIL: did set ifalias $namewant but got $namehave" + return 1 + fi + + namewant=$(uuidgen) + echo "$namewant" > "$syspathname" + ip link show "$devdummy" | grep -q "alias $namewant" + check_err $? + + # sysfs interface allows to delete alias again + echo "" > "$syspathname" + + ip link show "$devdummy" | grep -q "alias $namewant" + check_fail $? + + # re-add the alias -- kernel should free mem when dummy dev is removed + ip link set dev "$devdummy" alias "$namewant" + check_err $? + fi + + if [ $ret -ne 0 ]; then + echo "FAIL: set interface alias $devdummy to $namewant" + return 1 + fi + + echo "PASS: set ifalias $namewant for $devdummy" +} + kci_test_rtnl() { kci_add_dummy @@ -249,6 +305,7 @@ kci_test_rtnl() kci_test_gre kci_test_bridge kci_test_addrlabel + kci_test_ifalias kci_del_dummy } -- cgit From b655fc1c2ee1c2b2d07c0b6f432798b91202718c Mon Sep 17 00:00:00 2001 From: Joel Fernandes Date: Wed, 20 Sep 2017 09:11:58 -0700 Subject: samples/bpf: Fix pt_regs issues when cross-compiling BPF samples fail to build when cross-compiling for ARM64 because of incorrect pt_regs param selection. This is because clang defines __x86_64__ and bpf_headers thinks we're building for x86. Since clang is building for the BPF target, it shouldn't make assumptions about what target the BPF program is going to run on. To fix this, lets pass ARCH so the header knows which target the BPF program is being compiled for and can use the correct pt_regs code. Acked-by: Alexei Starovoitov Signed-off-by: Joel Fernandes Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- tools/testing/selftests/bpf/bpf_helpers.h | 56 +++++++++++++++++++++++++++---- 1 file changed, 49 insertions(+), 7 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h index 36fb9161b34a..4875395b0b52 100644 --- a/tools/testing/selftests/bpf/bpf_helpers.h +++ b/tools/testing/selftests/bpf/bpf_helpers.h @@ -109,7 +109,47 @@ static int (*bpf_skb_under_cgroup)(void *ctx, void *map, int index) = static int (*bpf_skb_change_head)(void *, int len, int flags) = (void *) BPF_FUNC_skb_change_head; +/* Scan the ARCH passed in from ARCH env variable (see Makefile) */ +#if defined(__TARGET_ARCH_x86) + #define bpf_target_x86 + #define bpf_target_defined +#elif defined(__TARGET_ARCH_s930x) + #define bpf_target_s930x + #define bpf_target_defined +#elif defined(__TARGET_ARCH_arm64) + #define bpf_target_arm64 + #define bpf_target_defined +#elif defined(__TARGET_ARCH_mips) + #define bpf_target_mips + #define bpf_target_defined +#elif defined(__TARGET_ARCH_powerpc) + #define bpf_target_powerpc + #define bpf_target_defined +#elif defined(__TARGET_ARCH_sparc) + #define bpf_target_sparc + #define bpf_target_defined +#else + #undef bpf_target_defined +#endif + +/* Fall back to what the compiler says */ +#ifndef bpf_target_defined #if defined(__x86_64__) + #define bpf_target_x86 +#elif defined(__s390x__) + #define bpf_target_s930x +#elif defined(__aarch64__) + #define bpf_target_arm64 +#elif defined(__mips__) + #define bpf_target_mips +#elif defined(__powerpc__) + #define bpf_target_powerpc +#elif defined(__sparc__) + #define bpf_target_sparc +#endif +#endif + +#if defined(bpf_target_x86) #define PT_REGS_PARM1(x) ((x)->di) #define PT_REGS_PARM2(x) ((x)->si) @@ -122,7 +162,7 @@ static int (*bpf_skb_change_head)(void *, int len, int flags) = #define PT_REGS_SP(x) ((x)->sp) #define PT_REGS_IP(x) ((x)->ip) -#elif defined(__s390x__) +#elif defined(bpf_target_s390x) #define PT_REGS_PARM1(x) ((x)->gprs[2]) #define PT_REGS_PARM2(x) ((x)->gprs[3]) @@ -135,7 +175,7 @@ static int (*bpf_skb_change_head)(void *, int len, int flags) = #define PT_REGS_SP(x) ((x)->gprs[15]) #define PT_REGS_IP(x) ((x)->psw.addr) -#elif defined(__aarch64__) +#elif defined(bpf_target_arm64) #define PT_REGS_PARM1(x) ((x)->regs[0]) #define PT_REGS_PARM2(x) ((x)->regs[1]) @@ -148,7 +188,7 @@ static int (*bpf_skb_change_head)(void *, int len, int flags) = #define PT_REGS_SP(x) ((x)->sp) #define PT_REGS_IP(x) ((x)->pc) -#elif defined(__mips__) +#elif defined(bpf_target_mips) #define PT_REGS_PARM1(x) ((x)->regs[4]) #define PT_REGS_PARM2(x) ((x)->regs[5]) @@ -161,7 +201,7 @@ static int (*bpf_skb_change_head)(void *, int len, int flags) = #define PT_REGS_SP(x) ((x)->regs[29]) #define PT_REGS_IP(x) ((x)->cp0_epc) -#elif defined(__powerpc__) +#elif defined(bpf_target_powerpc) #define PT_REGS_PARM1(x) ((x)->gpr[3]) #define PT_REGS_PARM2(x) ((x)->gpr[4]) @@ -172,7 +212,7 @@ static int (*bpf_skb_change_head)(void *, int len, int flags) = #define PT_REGS_SP(x) ((x)->sp) #define PT_REGS_IP(x) ((x)->nip) -#elif defined(__sparc__) +#elif defined(bpf_target_sparc) #define PT_REGS_PARM1(x) ((x)->u_regs[UREG_I0]) #define PT_REGS_PARM2(x) ((x)->u_regs[UREG_I1]) @@ -182,6 +222,8 @@ static int (*bpf_skb_change_head)(void *, int len, int flags) = #define PT_REGS_RET(x) ((x)->u_regs[UREG_I7]) #define PT_REGS_RC(x) ((x)->u_regs[UREG_I0]) #define PT_REGS_SP(x) ((x)->u_regs[UREG_FP]) + +/* Should this also be a bpf_target check for the sparc case? */ #if defined(__arch64__) #define PT_REGS_IP(x) ((x)->tpc) #else @@ -190,10 +232,10 @@ static int (*bpf_skb_change_head)(void *, int len, int flags) = #endif -#ifdef __powerpc__ +#ifdef bpf_target_powerpc #define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; }) #define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP -#elif defined(__sparc__) +#elif bpf_target_sparc #define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); }) #define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP #else -- cgit From ac29991ba137cc0e3b0f647fb41e79300230f15c Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Mon, 25 Sep 2017 02:25:52 +0200 Subject: bpf: update bpf.h uapi header for tools Looks like a couple of updates missed to get carried into tools/include/uapi/, so copy the bpf.h header as usual to pull in latest updates. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Acked-by: John Fastabend Signed-off-by: David S. Miller --- tools/include/uapi/linux/bpf.h | 45 ++++++++++++++++++++++++++++++------------ 1 file changed, 32 insertions(+), 13 deletions(-) (limited to 'tools') diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 461811e57140..e43491ac4823 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -143,12 +143,6 @@ enum bpf_attach_type { #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE -enum bpf_sockmap_flags { - BPF_SOCKMAP_UNSPEC, - BPF_SOCKMAP_STRPARSER, - __MAX_BPF_SOCKMAP_FLAG -}; - /* If BPF_F_ALLOW_OVERRIDE flag is used in BPF_PROG_ATTACH command * to the given target_fd cgroup the descendent cgroup will be able to * override effective bpf program that was inherited from this cgroup @@ -368,9 +362,20 @@ union bpf_attr { * int bpf_redirect(ifindex, flags) * redirect to another netdev * @ifindex: ifindex of the net device - * @flags: bit 0 - if set, redirect to ingress instead of egress - * other bits - reserved - * Return: TC_ACT_REDIRECT + * @flags: + * cls_bpf: + * bit 0 - if set, redirect to ingress instead of egress + * other bits - reserved + * xdp_bpf: + * all bits - reserved + * Return: cls_bpf: TC_ACT_REDIRECT on success or TC_ACT_SHOT on error + * xdp_bfp: XDP_REDIRECT on success or XDP_ABORT on error + * int bpf_redirect_map(map, key, flags) + * redirect to endpoint in map + * @map: pointer to dev map + * @key: index in map to lookup + * @flags: -- + * Return: XDP_REDIRECT on success or XDP_ABORT on error * * u32 bpf_get_route_realm(skb) * retrieve a dst's tclassid @@ -577,6 +582,12 @@ union bpf_attr { * @map: pointer to sockmap to update * @key: key to insert/update sock in map * @flags: same flags as map update elem + * + * int bpf_xdp_adjust_meta(xdp_md, delta) + * Adjust the xdp_md.data_meta by delta + * @xdp_md: pointer to xdp_md + * @delta: An positive/negative integer to be added to xdp_md.data_meta + * Return: 0 on success or negative on error */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -632,7 +643,8 @@ union bpf_attr { FN(skb_adjust_room), \ FN(redirect_map), \ FN(sk_redirect_map), \ - FN(sock_map_update), + FN(sock_map_update), \ + FN(xdp_adjust_meta), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call @@ -710,7 +722,7 @@ struct __sk_buff { __u32 data_end; __u32 napi_id; - /* accessed by BPF_PROG_TYPE_sk_skb types */ + /* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */ __u32 family; __u32 remote_ip4; /* Stored in network byte order */ __u32 local_ip4; /* Stored in network byte order */ @@ -718,6 +730,9 @@ struct __sk_buff { __u32 local_ip6[4]; /* Stored in network byte order */ __u32 remote_port; /* Stored in network byte order */ __u32 local_port; /* stored in host byte order */ + /* ... here. */ + + __u32 data_meta; }; struct bpf_tunnel_key { @@ -753,20 +768,23 @@ struct bpf_sock { __u32 family; __u32 type; __u32 protocol; + __u32 mark; + __u32 priority; }; #define XDP_PACKET_HEADROOM 256 /* User return codes for XDP prog type. * A valid XDP program must return one of these defined values. All other - * return codes are reserved for future use. Unknown return codes will result - * in packet drop. + * return codes are reserved for future use. Unknown return codes will + * result in packet drops and a warning via bpf_warn_invalid_xdp_action(). */ enum xdp_action { XDP_ABORTED = 0, XDP_DROP, XDP_PASS, XDP_TX, + XDP_REDIRECT, }; /* user accessible metadata for XDP packet hook @@ -775,6 +793,7 @@ enum xdp_action { struct xdp_md { __u32 data; __u32 data_end; + __u32 data_meta; }; enum sk_action { -- cgit From 22c8852624fc90a90709d1237625ca57999c5092 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Mon, 25 Sep 2017 02:25:53 +0200 Subject: bpf: improve selftests and add tests for meta pointer Add various test_verifier selftests, and a simple xdp/tc functional test that is being attached to veths. Also let new versions of clang use the recently added -mcpu=probe support [1] for the BPF target, so that it can probe the underlying kernel for BPF insn set extensions. We could also just set this options always, where older versions just ignore it and give a note to the user that the -mcpu value is not supported, but given emitting the note cannot be turned off from clang side lets not confuse users running selftests with it, thus fallback to the default generic one when we see that clang doesn't support it. Also allow CPU option to be overridden in the Makefile from command line. [1] https://github.com/llvm-mirror/llvm/commit/d7276a40d87b89aed89978dec6457a5b8b3a0db5 Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Acked-by: John Fastabend Signed-off-by: David S. Miller --- tools/testing/selftests/bpf/Makefile | 21 ++- tools/testing/selftests/bpf/bpf_helpers.h | 2 + tools/testing/selftests/bpf/test_verifier.c | 247 +++++++++++++++++++++++++++ tools/testing/selftests/bpf/test_xdp_meta.c | 53 ++++++ tools/testing/selftests/bpf/test_xdp_meta.sh | 51 ++++++ 5 files changed, 370 insertions(+), 4 deletions(-) create mode 100644 tools/testing/selftests/bpf/test_xdp_meta.c create mode 100755 tools/testing/selftests/bpf/test_xdp_meta.sh (limited to 'tools') diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index f4b23d697448..924af8d79bde 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -15,9 +15,10 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test test_align TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \ - test_pkt_md_access.o test_xdp_redirect.o sockmap_parse_prog.o sockmap_verdict_prog.o + test_pkt_md_access.o test_xdp_redirect.o test_xdp_meta.o sockmap_parse_prog.o \ + sockmap_verdict_prog.o -TEST_PROGS := test_kmod.sh test_xdp_redirect.sh +TEST_PROGS := test_kmod.sh test_xdp_redirect.sh test_xdp_meta.sh include ../lib.mk @@ -34,8 +35,20 @@ $(BPFOBJ): force $(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/ CLANG ?= clang +LLC ?= llc + +PROBE := $(shell llc -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1) + +# Let newer LLVM versions transparently probe the kernel for availability +# of full BPF instruction set. +ifeq ($(PROBE),) + CPU ?= probe +else + CPU ?= generic +endif %.o: %.c $(CLANG) -I. -I./include/uapi -I../../../include/uapi \ - -Wno-compare-distinct-pointer-types \ - -O2 -target bpf -c $< -o $@ + -Wno-compare-distinct-pointer-types \ + -O2 -target bpf -emit-llvm -c $< -o - | \ + $(LLC) -march=bpf -mcpu=$(CPU) -filetype=obj -o $@ diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h index 4875395b0b52..a56053db26f5 100644 --- a/tools/testing/selftests/bpf/bpf_helpers.h +++ b/tools/testing/selftests/bpf/bpf_helpers.h @@ -62,6 +62,8 @@ static unsigned long long (*bpf_get_prandom_u32)(void) = (void *) BPF_FUNC_get_prandom_u32; static int (*bpf_xdp_adjust_head)(void *ctx, int offset) = (void *) BPF_FUNC_xdp_adjust_head; +static int (*bpf_xdp_adjust_meta)(void *ctx, int offset) = + (void *) BPF_FUNC_xdp_adjust_meta; static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval, int optlen) = (void *) BPF_FUNC_setsockopt; diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 26f3250bdcd2..a0426147523d 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -6645,6 +6645,253 @@ static struct bpf_test tests[] = { .errstr = "BPF_END uses reserved fields", .result = REJECT, }, + { + "meta access, test1", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + }, + { + "meta access, test2", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid access to packet, off=-8", + .prog_type = BPF_PROG_TYPE_XDP, + }, + { + "meta access, test3", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid access to packet", + .prog_type = BPF_PROG_TYPE_XDP, + }, + { + "meta access, test4", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, + offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_4), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid access to packet", + .prog_type = BPF_PROG_TYPE_XDP, + }, + { + "meta access, test5", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, + offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3), + BPF_MOV64_IMM(BPF_REG_2, -8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_xdp_adjust_meta), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "R3 !read_ok", + .prog_type = BPF_PROG_TYPE_XDP, + }, + { + "meta access, test6", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid access to packet", + .prog_type = BPF_PROG_TYPE_XDP, + }, + { + "meta access, test7", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + }, + { + "meta access, test8", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF), + BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + }, + { + "meta access, test9", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1), + BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid access to packet", + .prog_type = BPF_PROG_TYPE_XDP, + }, + { + "meta access, test10", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_IMM(BPF_REG_5, 42), + BPF_MOV64_IMM(BPF_REG_6, 24), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8), + BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8), + BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6), + BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5), + BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid access to packet", + .prog_type = BPF_PROG_TYPE_XDP, + }, + { + "meta access, test11", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data)), + BPF_MOV64_IMM(BPF_REG_5, 42), + BPF_MOV64_IMM(BPF_REG_6, 24), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8), + BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8), + BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6), + BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5), + BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + }, + { + "meta access, test12", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16), + BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0), + BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16), + BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + }, }; static int probe_filter_length(const struct bpf_insn *fp) diff --git a/tools/testing/selftests/bpf/test_xdp_meta.c b/tools/testing/selftests/bpf/test_xdp_meta.c new file mode 100644 index 000000000000..8d0182650653 --- /dev/null +++ b/tools/testing/selftests/bpf/test_xdp_meta.c @@ -0,0 +1,53 @@ +#include +#include +#include + +#include "bpf_helpers.h" + +#define __round_mask(x, y) ((__typeof__(x))((y) - 1)) +#define round_up(x, y) ((((x) - 1) | __round_mask(x, y)) + 1) +#define ctx_ptr(ctx, mem) (void *)(unsigned long)ctx->mem + +SEC("t") +int ing_cls(struct __sk_buff *ctx) +{ + __u8 *data, *data_meta, *data_end; + __u32 diff = 0; + + data_meta = ctx_ptr(ctx, data_meta); + data_end = ctx_ptr(ctx, data_end); + data = ctx_ptr(ctx, data); + + if (data + ETH_ALEN > data_end || + data_meta + round_up(ETH_ALEN, 4) > data) + return TC_ACT_SHOT; + + diff |= ((__u32 *)data_meta)[0] ^ ((__u32 *)data)[0]; + diff |= ((__u16 *)data_meta)[2] ^ ((__u16 *)data)[2]; + + return diff ? TC_ACT_SHOT : TC_ACT_OK; +} + +SEC("x") +int ing_xdp(struct xdp_md *ctx) +{ + __u8 *data, *data_meta, *data_end; + int ret; + + ret = bpf_xdp_adjust_meta(ctx, -round_up(ETH_ALEN, 4)); + if (ret < 0) + return XDP_DROP; + + data_meta = ctx_ptr(ctx, data_meta); + data_end = ctx_ptr(ctx, data_end); + data = ctx_ptr(ctx, data); + + if (data + ETH_ALEN > data_end || + data_meta + round_up(ETH_ALEN, 4) > data) + return XDP_DROP; + + __builtin_memcpy(data_meta, data, ETH_ALEN); + return XDP_PASS; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/test_xdp_meta.sh b/tools/testing/selftests/bpf/test_xdp_meta.sh new file mode 100755 index 000000000000..307aa856cee3 --- /dev/null +++ b/tools/testing/selftests/bpf/test_xdp_meta.sh @@ -0,0 +1,51 @@ +#!/bin/sh + +cleanup() +{ + if [ "$?" = "0" ]; then + echo "selftests: test_xdp_meta [PASS]"; + else + echo "selftests: test_xdp_meta [FAILED]"; + fi + + set +e + ip netns del ns1 2> /dev/null + ip netns del ns2 2> /dev/null +} + +ip link set dev lo xdp off 2>/dev/null > /dev/null +if [ $? -ne 0 ];then + echo "selftests: [SKIP] Could not run test without the ip xdp support" + exit 0 +fi +set -e + +ip netns add ns1 +ip netns add ns2 + +trap cleanup 0 2 3 6 9 + +ip link add veth1 type veth peer name veth2 + +ip link set veth1 netns ns1 +ip link set veth2 netns ns2 + +ip netns exec ns1 ip addr add 10.1.1.11/24 dev veth1 +ip netns exec ns2 ip addr add 10.1.1.22/24 dev veth2 + +ip netns exec ns1 tc qdisc add dev veth1 clsact +ip netns exec ns2 tc qdisc add dev veth2 clsact + +ip netns exec ns1 tc filter add dev veth1 ingress bpf da obj test_xdp_meta.o sec t +ip netns exec ns2 tc filter add dev veth2 ingress bpf da obj test_xdp_meta.o sec t + +ip netns exec ns1 ip link set dev veth1 xdp obj test_xdp_meta.o sec x +ip netns exec ns2 ip link set dev veth2 xdp obj test_xdp_meta.o sec x + +ip netns exec ns1 ip link set dev veth1 up +ip netns exec ns2 ip link set dev veth2 up + +ip netns exec ns1 ping -c 1 10.1.1.22 +ip netns exec ns2 ping -c 1 10.1.1.11 + +exit 0 -- cgit From 61f26d92510ea1e30f9b69097b34b6a522daa1d2 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 26 Sep 2017 07:40:42 +0200 Subject: selftests: rtnetlink.sh: add rudimentary vrf test Acked-by: David Ahern Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- tools/testing/selftests/net/rtnetlink.sh | 42 ++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh index 4b48de565cae..a048f7a5f94c 100755 --- a/tools/testing/selftests/net/rtnetlink.sh +++ b/tools/testing/selftests/net/rtnetlink.sh @@ -291,6 +291,47 @@ kci_test_ifalias() echo "PASS: set ifalias $namewant for $devdummy" } +kci_test_vrf() +{ + vrfname="test-vrf" + ret=0 + + ip link show type vrf 2>/dev/null + if [ $? -ne 0 ]; then + echo "SKIP: vrf: iproute2 too old" + return 0 + fi + + ip link add "$vrfname" type vrf table 10 + check_err $? + if [ $ret -ne 0 ];then + echo "FAIL: can't add vrf interface, skipping test" + return 0 + fi + + ip -br link show type vrf | grep -q "$vrfname" + check_err $? + if [ $ret -ne 0 ];then + echo "FAIL: created vrf device not found" + return 1 + fi + + ip link set dev "$vrfname" up + check_err $? + + ip link set dev "$devdummy" master "$vrfname" + check_err $? + ip link del dev "$vrfname" + check_err $? + + if [ $ret -ne 0 ];then + echo "FAIL: vrf" + return 1 + fi + + echo "PASS: vrf" +} + kci_test_rtnl() { kci_add_dummy @@ -306,6 +347,7 @@ kci_test_rtnl() kci_test_bridge kci_test_addrlabel kci_test_ifalias + kci_test_vrf kci_del_dummy } -- cgit From 88cda1c9da02c8aa31e1d5dcf22e8a35cc8c19f2 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 27 Sep 2017 14:37:54 -0700 Subject: bpf: libbpf: Provide basic API support to specify BPF obj name This patch extends the libbpf to provide API support to allow specifying BPF object name. In tools/lib/bpf/libbpf, the C symbol of the function and the map is used. Regarding section name, all maps are under the same section named "maps". Hence, section name is not a good choice for map's name. To be consistent with map, bpf_prog also follows and uses its function symbol as the prog's name. This patch adds logic to collect function's symbols in libbpf. There is existing codes to collect the map's symbols and no change is needed. The bpf_load_program_name() and bpf_map_create_name() are added to take the name argument. For the other bpf_map_create_xxx() variants, a name argument is directly added to them. In samples/bpf, bpf_load.c in particular, the symbol is also used as the map's name and the map symbols has already been collected in the existing code. For bpf_prog, bpf_load.c does not collect the function symbol name. We can consider to collect them later if there is a need to continue supporting the bpf_load.c. Signed-off-by: Martin KaFai Lau Acked-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- tools/include/uapi/linux/bpf.h | 10 +++ tools/lib/bpf/bpf.c | 57 +++++++++++---- tools/lib/bpf/bpf.h | 23 ++++-- tools/lib/bpf/libbpf.c | 109 +++++++++++++++++++++------- tools/testing/selftests/bpf/test_verifier.c | 2 +- 5 files changed, 154 insertions(+), 47 deletions(-) (limited to 'tools') diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index e43491ac4823..6d2137b4cf38 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -175,6 +175,8 @@ enum bpf_attach_type { /* Specify numa node during map creation */ #define BPF_F_NUMA_NODE (1U << 2) +#define BPF_OBJ_NAME_LEN 16U + union bpf_attr { struct { /* anonymous struct used by BPF_MAP_CREATE command */ __u32 map_type; /* one of enum bpf_map_type */ @@ -188,6 +190,7 @@ union bpf_attr { __u32 numa_node; /* numa node (effective only if * BPF_F_NUMA_NODE is set). */ + __u8 map_name[BPF_OBJ_NAME_LEN]; }; struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ @@ -210,6 +213,7 @@ union bpf_attr { __aligned_u64 log_buf; /* user supplied buffer */ __u32 kern_version; /* checked when prog_type=kprobe */ __u32 prog_flags; + __u8 prog_name[BPF_OBJ_NAME_LEN]; }; struct { /* anonymous struct used by BPF_OBJ_* commands */ @@ -812,6 +816,11 @@ struct bpf_prog_info { __u32 xlated_prog_len; __aligned_u64 jited_prog_insns; __aligned_u64 xlated_prog_insns; + __u64 load_time; /* ns since boottime */ + __u32 created_by_uid; + __u32 nr_map_ids; + __aligned_u64 map_ids; + __u8 name[BPF_OBJ_NAME_LEN]; } __attribute__((aligned(8))); struct bpf_map_info { @@ -821,6 +830,7 @@ struct bpf_map_info { __u32 value_size; __u32 max_entries; __u32 map_flags; + __u8 name[BPF_OBJ_NAME_LEN]; } __attribute__((aligned(8))); /* User bpf_sock_ops struct to access socket values and specify request ops diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 1d6907d379c9..daf624e4c720 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -46,6 +46,8 @@ # endif #endif +#define min(x, y) ((x) < (y) ? (x) : (y)) + static inline __u64 ptr_to_u64(const void *ptr) { return (__u64) (unsigned long) ptr; @@ -57,10 +59,11 @@ static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr, return syscall(__NR_bpf, cmd, attr, size); } -int bpf_create_map_node(enum bpf_map_type map_type, int key_size, - int value_size, int max_entries, __u32 map_flags, - int node) +int bpf_create_map_node(enum bpf_map_type map_type, const char *name, + int key_size, int value_size, int max_entries, + __u32 map_flags, int node) { + __u32 name_len = name ? strlen(name) : 0; union bpf_attr attr; memset(&attr, '\0', sizeof(attr)); @@ -70,6 +73,8 @@ int bpf_create_map_node(enum bpf_map_type map_type, int key_size, attr.value_size = value_size; attr.max_entries = max_entries; attr.map_flags = map_flags; + memcpy(attr.map_name, name, min(name_len, BPF_OBJ_NAME_LEN - 1)); + if (node >= 0) { attr.map_flags |= BPF_F_NUMA_NODE; attr.numa_node = node; @@ -81,14 +86,23 @@ int bpf_create_map_node(enum bpf_map_type map_type, int key_size, int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size, int max_entries, __u32 map_flags) { - return bpf_create_map_node(map_type, key_size, value_size, + return bpf_create_map_node(map_type, NULL, key_size, value_size, max_entries, map_flags, -1); } -int bpf_create_map_in_map_node(enum bpf_map_type map_type, int key_size, - int inner_map_fd, int max_entries, +int bpf_create_map_name(enum bpf_map_type map_type, const char *name, + int key_size, int value_size, int max_entries, + __u32 map_flags) +{ + return bpf_create_map_node(map_type, name, key_size, value_size, + max_entries, map_flags, -1); +} + +int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name, + int key_size, int inner_map_fd, int max_entries, __u32 map_flags, int node) { + __u32 name_len = name ? strlen(name) : 0; union bpf_attr attr; memset(&attr, '\0', sizeof(attr)); @@ -99,6 +113,8 @@ int bpf_create_map_in_map_node(enum bpf_map_type map_type, int key_size, attr.inner_map_fd = inner_map_fd; attr.max_entries = max_entries; attr.map_flags = map_flags; + memcpy(attr.map_name, name, min(name_len, BPF_OBJ_NAME_LEN - 1)); + if (node >= 0) { attr.map_flags |= BPF_F_NUMA_NODE; attr.numa_node = node; @@ -107,19 +123,24 @@ int bpf_create_map_in_map_node(enum bpf_map_type map_type, int key_size, return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr)); } -int bpf_create_map_in_map(enum bpf_map_type map_type, int key_size, - int inner_map_fd, int max_entries, __u32 map_flags) +int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name, + int key_size, int inner_map_fd, int max_entries, + __u32 map_flags) { - return bpf_create_map_in_map_node(map_type, key_size, inner_map_fd, - max_entries, map_flags, -1); + return bpf_create_map_in_map_node(map_type, name, key_size, + inner_map_fd, max_entries, map_flags, + -1); } -int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, - size_t insns_cnt, const char *license, - __u32 kern_version, char *log_buf, size_t log_buf_sz) +int bpf_load_program_name(enum bpf_prog_type type, const char *name, + const struct bpf_insn *insns, + size_t insns_cnt, const char *license, + __u32 kern_version, char *log_buf, + size_t log_buf_sz) { int fd; union bpf_attr attr; + __u32 name_len = name ? strlen(name) : 0; bzero(&attr, sizeof(attr)); attr.prog_type = type; @@ -130,6 +151,7 @@ int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, attr.log_size = 0; attr.log_level = 0; attr.kern_version = kern_version; + memcpy(attr.prog_name, name, min(name_len, BPF_OBJ_NAME_LEN - 1)); fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); if (fd >= 0 || !log_buf || !log_buf_sz) @@ -143,6 +165,15 @@ int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); } +int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, + size_t insns_cnt, const char *license, + __u32 kern_version, char *log_buf, + size_t log_buf_sz) +{ + return bpf_load_program_name(type, NULL, insns, insns_cnt, license, + kern_version, log_buf, log_buf_sz); +} + int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns, size_t insns_cnt, int strict_alignment, const char *license, __u32 kern_version, diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index b8ea5843c39e..118d00535a0d 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -24,19 +24,28 @@ #include #include -int bpf_create_map_node(enum bpf_map_type map_type, int key_size, - int value_size, int max_entries, __u32 map_flags, - int node); +int bpf_create_map_node(enum bpf_map_type map_type, const char *name, + int key_size, int value_size, int max_entries, + __u32 map_flags, int node); +int bpf_create_map_name(enum bpf_map_type map_type, const char *name, + int key_size, int value_size, int max_entries, + __u32 map_flags); int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size, int max_entries, __u32 map_flags); -int bpf_create_map_in_map_node(enum bpf_map_type map_type, int key_size, - int inner_map_fd, int max_entries, +int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name, + int key_size, int inner_map_fd, int max_entries, __u32 map_flags, int node); -int bpf_create_map_in_map(enum bpf_map_type map_type, int key_size, - int inner_map_fd, int max_entries, __u32 map_flags); +int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name, + int key_size, int inner_map_fd, int max_entries, + __u32 map_flags); /* Recommend log buffer size */ #define BPF_LOG_BUF_SIZE 65536 +int bpf_load_program_name(enum bpf_prog_type type, const char *name, + const struct bpf_insn *insns, + size_t insns_cnt, const char *license, + __u32 kern_version, char *log_buf, + size_t log_buf_sz); int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, size_t insns_cnt, const char *license, __u32 kern_version, char *log_buf, diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 35f6dfcdc565..4f402dcdf372 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -171,6 +171,7 @@ int libbpf_strerror(int err, char *buf, size_t size) struct bpf_program { /* Index in elf obj file, for relocation use. */ int idx; + char *name; char *section_name; struct bpf_insn *insns; size_t insns_cnt; @@ -283,6 +284,7 @@ static void bpf_program__exit(struct bpf_program *prog) prog->clear_priv = NULL; bpf_program__unload(prog); + zfree(&prog->name); zfree(&prog->section_name); zfree(&prog->insns); zfree(&prog->reloc_desc); @@ -293,26 +295,27 @@ static void bpf_program__exit(struct bpf_program *prog) } static int -bpf_program__init(void *data, size_t size, char *name, int idx, - struct bpf_program *prog) +bpf_program__init(void *data, size_t size, char *section_name, int idx, + struct bpf_program *prog) { if (size < sizeof(struct bpf_insn)) { - pr_warning("corrupted section '%s'\n", name); + pr_warning("corrupted section '%s'\n", section_name); return -EINVAL; } bzero(prog, sizeof(*prog)); - prog->section_name = strdup(name); + prog->section_name = strdup(section_name); if (!prog->section_name) { - pr_warning("failed to alloc name for prog %s\n", - name); + pr_warning("failed to alloc name for prog under section %s\n", + section_name); goto errout; } prog->insns = malloc(size); if (!prog->insns) { - pr_warning("failed to alloc insns for %s\n", name); + pr_warning("failed to alloc insns for prog under section %s\n", + section_name); goto errout; } prog->insns_cnt = size / sizeof(struct bpf_insn); @@ -331,12 +334,12 @@ errout: static int bpf_object__add_program(struct bpf_object *obj, void *data, size_t size, - char *name, int idx) + char *section_name, int idx) { struct bpf_program prog, *progs; int nr_progs, err; - err = bpf_program__init(data, size, name, idx, &prog); + err = bpf_program__init(data, size, section_name, idx, &prog); if (err) return err; @@ -350,8 +353,8 @@ bpf_object__add_program(struct bpf_object *obj, void *data, size_t size, * is still valid, so don't need special treat for * bpf_close_object(). */ - pr_warning("failed to alloc a new program '%s'\n", - name); + pr_warning("failed to alloc a new program under section '%s'\n", + section_name); bpf_program__exit(&prog); return -ENOMEM; } @@ -364,6 +367,54 @@ bpf_object__add_program(struct bpf_object *obj, void *data, size_t size, return 0; } +static int +bpf_object__init_prog_names(struct bpf_object *obj) +{ + Elf_Data *symbols = obj->efile.symbols; + struct bpf_program *prog; + size_t pi, si; + + for (pi = 0; pi < obj->nr_programs; pi++) { + char *name = NULL; + + prog = &obj->programs[pi]; + + for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name; + si++) { + GElf_Sym sym; + + if (!gelf_getsym(symbols, si, &sym)) + continue; + if (sym.st_shndx != prog->idx) + continue; + + name = elf_strptr(obj->efile.elf, + obj->efile.strtabidx, + sym.st_name); + if (!name) { + pr_warning("failed to get sym name string for prog %s\n", + prog->section_name); + return -LIBBPF_ERRNO__LIBELF; + } + } + + if (!name) { + pr_warning("failed to find sym for prog %s\n", + prog->section_name); + return -EINVAL; + } + + prog->name = strdup(name); + if (!prog->name) { + pr_warning("failed to allocate memory for prog sym %s\n", + name); + return -ENOMEM; + } + } + + return 0; +} + static struct bpf_object *bpf_object__new(const char *path, void *obj_buf, size_t obj_buf_sz) @@ -766,8 +817,12 @@ static int bpf_object__elf_collect(struct bpf_object *obj) pr_warning("Corrupted ELF file: index of strtab invalid\n"); return LIBBPF_ERRNO__FORMAT; } - if (obj->efile.maps_shndx >= 0) + if (obj->efile.maps_shndx >= 0) { err = bpf_object__init_maps(obj); + if (err) + goto out; + } + err = bpf_object__init_prog_names(obj); out: return err; } @@ -870,11 +925,12 @@ bpf_object__create_maps(struct bpf_object *obj) struct bpf_map_def *def = &obj->maps[i].def; int *pfd = &obj->maps[i].fd; - *pfd = bpf_create_map(def->type, - def->key_size, - def->value_size, - def->max_entries, - 0); + *pfd = bpf_create_map_name(def->type, + obj->maps[i].name, + def->key_size, + def->value_size, + def->max_entries, + 0); if (*pfd < 0) { size_t j; int err = *pfd; @@ -982,7 +1038,7 @@ static int bpf_object__collect_reloc(struct bpf_object *obj) } static int -load_program(enum bpf_prog_type type, struct bpf_insn *insns, +load_program(enum bpf_prog_type type, const char *name, struct bpf_insn *insns, int insns_cnt, char *license, u32 kern_version, int *pfd) { int ret; @@ -995,8 +1051,8 @@ load_program(enum bpf_prog_type type, struct bpf_insn *insns, if (!log_buf) pr_warning("Alloc log buffer for bpf loader error, continue without log\n"); - ret = bpf_load_program(type, insns, insns_cnt, license, - kern_version, log_buf, BPF_LOG_BUF_SIZE); + ret = bpf_load_program_name(type, name, insns, insns_cnt, license, + kern_version, log_buf, BPF_LOG_BUF_SIZE); if (ret >= 0) { *pfd = ret; @@ -1021,9 +1077,9 @@ load_program(enum bpf_prog_type type, struct bpf_insn *insns, if (type != BPF_PROG_TYPE_KPROBE) { int fd; - fd = bpf_load_program(BPF_PROG_TYPE_KPROBE, insns, - insns_cnt, license, kern_version, - NULL, 0); + fd = bpf_load_program_name(BPF_PROG_TYPE_KPROBE, name, + insns, insns_cnt, license, + kern_version, NULL, 0); if (fd >= 0) { close(fd); ret = -LIBBPF_ERRNO__PROGTYPE; @@ -1067,8 +1123,8 @@ bpf_program__load(struct bpf_program *prog, pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n", prog->section_name, prog->instances.nr); } - err = load_program(prog->type, prog->insns, prog->insns_cnt, - license, kern_version, &fd); + err = load_program(prog->type, prog->name, prog->insns, + prog->insns_cnt, license, kern_version, &fd); if (!err) prog->instances.fds[0] = fd; goto out; @@ -1096,7 +1152,8 @@ bpf_program__load(struct bpf_program *prog, continue; } - err = load_program(prog->type, result.new_insn_ptr, + err = load_program(prog->type, prog->name, + result.new_insn_ptr, result.new_insn_cnt, license, kern_version, &fd); diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index a0426147523d..290d5056c165 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -6939,7 +6939,7 @@ static int create_map_in_map(void) return inner_map_fd; } - outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, + outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL, sizeof(int), inner_map_fd, 1, 0); if (outer_map_fd < 0) printf("Failed to create array of maps '%s'!\n", -- cgit From 6e525d06674a21468b4e728ef880770a0ca5c60d Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 27 Sep 2017 14:37:55 -0700 Subject: bpf: Swap the order of checking prog_info and map_info This patch swaps the checking order. It now checks the map_info first and then prog_info. It is a prep work for adding test to the newly added fields (the map_ids of prog_info field in particular). Signed-off-by: Martin KaFai Lau Acked-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- tools/testing/selftests/bpf/test_progs.c | 58 +++++++++++++++++--------------- 1 file changed, 30 insertions(+), 28 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 11ee25cea227..31ae27dc8d04 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -316,6 +316,36 @@ static void test_bpf_obj_id(void) error_cnt++; assert(!err); + /* Insert a magic value to the map */ + map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id"); + assert(map_fds[i] >= 0); + err = bpf_map_update_elem(map_fds[i], &array_key, + &array_magic_value, 0); + assert(!err); + + /* Check getting map info */ + info_len = sizeof(struct bpf_map_info) * 2; + bzero(&map_infos[i], info_len); + err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i], + &info_len); + if (CHECK(err || + map_infos[i].type != BPF_MAP_TYPE_ARRAY || + map_infos[i].key_size != sizeof(__u32) || + map_infos[i].value_size != sizeof(__u64) || + map_infos[i].max_entries != 1 || + map_infos[i].map_flags != 0 || + info_len != sizeof(struct bpf_map_info), + "get-map-info(fd)", + "err %d errno %d type %d(%d) info_len %u(%lu) key_size %u value_size %u max_entries %u map_flags %X\n", + err, errno, + map_infos[i].type, BPF_MAP_TYPE_ARRAY, + info_len, sizeof(struct bpf_map_info), + map_infos[i].key_size, + map_infos[i].value_size, + map_infos[i].max_entries, + map_infos[i].map_flags)) + goto done; + /* Check getting prog info */ info_len = sizeof(struct bpf_prog_info) * 2; bzero(&prog_infos[i], info_len); @@ -347,34 +377,6 @@ static void test_bpf_obj_id(void) !!memcmp(xlated_insns, zeros, sizeof(zeros)))) goto done; - map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id"); - assert(map_fds[i] >= 0); - err = bpf_map_update_elem(map_fds[i], &array_key, - &array_magic_value, 0); - assert(!err); - - /* Check getting map info */ - info_len = sizeof(struct bpf_map_info) * 2; - bzero(&map_infos[i], info_len); - err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i], - &info_len); - if (CHECK(err || - map_infos[i].type != BPF_MAP_TYPE_ARRAY || - map_infos[i].key_size != sizeof(__u32) || - map_infos[i].value_size != sizeof(__u64) || - map_infos[i].max_entries != 1 || - map_infos[i].map_flags != 0 || - info_len != sizeof(struct bpf_map_info), - "get-map-info(fd)", - "err %d errno %d type %d(%d) info_len %u(%lu) key_size %u value_size %u max_entries %u map_flags %X\n", - err, errno, - map_infos[i].type, BPF_MAP_TYPE_ARRAY, - info_len, sizeof(struct bpf_map_info), - map_infos[i].key_size, - map_infos[i].value_size, - map_infos[i].max_entries, - map_infos[i].map_flags)) - goto done; } /* Check bpf_prog_get_next_id() */ -- cgit From 3a8ad560a9674235d1dd2e174434f540924e249f Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 27 Sep 2017 14:37:56 -0700 Subject: bpf: Test new fields in bpf_attr and bpf_{prog, map}_info This patch tests newly added fields of the bpf_attr, bpf_prog_info and bpf_map_info. Signed-off-by: Martin KaFai Lau Acked-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- tools/testing/selftests/bpf/test_progs.c | 143 ++++++++++++++++++++++++++++--- 1 file changed, 132 insertions(+), 11 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 31ae27dc8d04..69427531408d 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -10,6 +10,7 @@ #include #include #include +#include #include typedef __u16 __sum16; @@ -19,6 +20,8 @@ typedef __u16 __sum16; #include #include #include +#include +#include #include #include @@ -273,16 +276,26 @@ static void test_bpf_obj_id(void) const int nr_iters = 2; const char *file = "./test_obj_id.o"; const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable"; + const char *expected_prog_name = "test_obj_id"; + const char *expected_map_name = "test_map_id"; + const __u64 nsec_per_sec = 1000000000; struct bpf_object *objs[nr_iters]; int prog_fds[nr_iters], map_fds[nr_iters]; /* +1 to test for the info_len returned by kernel */ struct bpf_prog_info prog_infos[nr_iters + 1]; struct bpf_map_info map_infos[nr_iters + 1]; + /* Each prog only uses one map. +1 to test nr_map_ids + * returned by kernel. + */ + __u32 map_ids[nr_iters + 1]; char jited_insns[128], xlated_insns[128], zeros[128]; __u32 i, next_id, info_len, nr_id_found, duration = 0; + struct timespec real_time_ts, boot_time_ts; int sysctl_fd, jit_enabled = 0, err = 0; __u64 array_value; + uid_t my_uid = getuid(); + time_t now, load_time; sysctl_fd = open(jit_sysctl, 0, O_RDONLY); if (sysctl_fd != -1) { @@ -307,6 +320,7 @@ static void test_bpf_obj_id(void) /* Check bpf_obj_get_info_by_fd() */ bzero(zeros, sizeof(zeros)); for (i = 0; i < nr_iters; i++) { + now = time(NULL); err = bpf_prog_load(file, BPF_PROG_TYPE_SOCKET_FILTER, &objs[i], &prog_fds[i]); /* test_obj_id.o is a dumb prog. It should never fail @@ -334,16 +348,18 @@ static void test_bpf_obj_id(void) map_infos[i].value_size != sizeof(__u64) || map_infos[i].max_entries != 1 || map_infos[i].map_flags != 0 || - info_len != sizeof(struct bpf_map_info), + info_len != sizeof(struct bpf_map_info) || + strcmp((char *)map_infos[i].name, expected_map_name), "get-map-info(fd)", - "err %d errno %d type %d(%d) info_len %u(%lu) key_size %u value_size %u max_entries %u map_flags %X\n", + "err %d errno %d type %d(%d) info_len %u(%lu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n", err, errno, map_infos[i].type, BPF_MAP_TYPE_ARRAY, info_len, sizeof(struct bpf_map_info), map_infos[i].key_size, map_infos[i].value_size, map_infos[i].max_entries, - map_infos[i].map_flags)) + map_infos[i].map_flags, + map_infos[i].name, expected_map_name)) goto done; /* Check getting prog info */ @@ -355,8 +371,16 @@ static void test_bpf_obj_id(void) prog_infos[i].jited_prog_len = sizeof(jited_insns); prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns); prog_infos[i].xlated_prog_len = sizeof(xlated_insns); + prog_infos[i].map_ids = ptr_to_u64(map_ids + i); + prog_infos[i].nr_map_ids = 2; + err = clock_gettime(CLOCK_REALTIME, &real_time_ts); + assert(!err); + err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts); + assert(!err); err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i], &info_len); + load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec) + + (prog_infos[i].load_time / nsec_per_sec); if (CHECK(err || prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER || info_len != sizeof(struct bpf_prog_info) || @@ -364,9 +388,14 @@ static void test_bpf_obj_id(void) (jit_enabled && !memcmp(jited_insns, zeros, sizeof(zeros))) || !prog_infos[i].xlated_prog_len || - !memcmp(xlated_insns, zeros, sizeof(zeros)), + !memcmp(xlated_insns, zeros, sizeof(zeros)) || + load_time < now - 60 || load_time > now + 60 || + prog_infos[i].created_by_uid != my_uid || + prog_infos[i].nr_map_ids != 1 || + *(int *)prog_infos[i].map_ids != map_infos[i].id || + strcmp((char *)prog_infos[i].name, expected_prog_name), "get-prog-info(fd)", - "err %d errno %d i %d type %d(%d) info_len %u(%lu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d\n", + "err %d errno %d i %d type %d(%d) info_len %u(%lu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n", err, errno, i, prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER, info_len, sizeof(struct bpf_prog_info), @@ -374,9 +403,13 @@ static void test_bpf_obj_id(void) prog_infos[i].jited_prog_len, prog_infos[i].xlated_prog_len, !!memcmp(jited_insns, zeros, sizeof(zeros)), - !!memcmp(xlated_insns, zeros, sizeof(zeros)))) + !!memcmp(xlated_insns, zeros, sizeof(zeros)), + load_time, now, + prog_infos[i].created_by_uid, my_uid, + prog_infos[i].nr_map_ids, 1, + *(int *)prog_infos[i].map_ids, map_infos[i].id, + prog_infos[i].name, expected_prog_name)) goto done; - } /* Check bpf_prog_get_next_id() */ @@ -384,6 +417,7 @@ static void test_bpf_obj_id(void) next_id = 0; while (!bpf_prog_get_next_id(next_id, &next_id)) { struct bpf_prog_info prog_info = {}; + __u32 saved_map_id; int prog_fd; info_len = sizeof(prog_info); @@ -406,16 +440,33 @@ static void test_bpf_obj_id(void) nr_id_found++; + /* Negative test: + * prog_info.nr_map_ids = 1 + * prog_info.map_ids = NULL + */ + prog_info.nr_map_ids = 1; + err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len); + if (CHECK(!err || errno != EFAULT, + "get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)", + err, errno, EFAULT)) + break; + bzero(&prog_info, sizeof(prog_info)); + info_len = sizeof(prog_info); + + saved_map_id = *(int *)(prog_infos[i].map_ids); + prog_info.map_ids = prog_infos[i].map_ids; + prog_info.nr_map_ids = 2; err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len); prog_infos[i].jited_prog_insns = 0; prog_infos[i].xlated_prog_insns = 0; CHECK(err || info_len != sizeof(struct bpf_prog_info) || - memcmp(&prog_info, &prog_infos[i], info_len), + memcmp(&prog_info, &prog_infos[i], info_len) || + *(int *)prog_info.map_ids != saved_map_id, "get-prog-info(next_id->fd)", - "err %d errno %d info_len %u(%lu) memcmp %d\n", + "err %d errno %d info_len %u(%lu) memcmp %d map_id %u(%u)\n", err, errno, info_len, sizeof(struct bpf_prog_info), - memcmp(&prog_info, &prog_infos[i], info_len)); - + memcmp(&prog_info, &prog_infos[i], info_len), + *(int *)prog_info.map_ids, saved_map_id); close(prog_fd); } CHECK(nr_id_found != nr_iters, @@ -497,6 +548,75 @@ static void test_pkt_md_access(void) bpf_object__close(obj); } +static void test_obj_name(void) +{ + struct { + const char *name; + int success; + int expected_errno; + } tests[] = { + { "", 1, 0 }, + { "_123456789ABCDE", 1, 0 }, + { "_123456789ABCDEF", 0, EINVAL }, + { "_123456789ABCD\n", 0, EINVAL }, + }; + struct bpf_insn prog[] = { + BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + __u32 duration = 0; + int i; + + for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { + size_t name_len = strlen(tests[i].name) + 1; + union bpf_attr attr; + size_t ncopy; + int fd; + + /* test different attr.prog_name during BPF_PROG_LOAD */ + ncopy = name_len < sizeof(attr.prog_name) ? + name_len : sizeof(attr.prog_name); + bzero(&attr, sizeof(attr)); + attr.prog_type = BPF_PROG_TYPE_SCHED_CLS; + attr.insn_cnt = 2; + attr.insns = ptr_to_u64(prog); + attr.license = ptr_to_u64(""); + memcpy(attr.prog_name, tests[i].name, ncopy); + + fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr)); + CHECK((tests[i].success && fd < 0) || + (!tests[i].success && fd != -1) || + (!tests[i].success && errno != tests[i].expected_errno), + "check-bpf-prog-name", + "fd %d(%d) errno %d(%d)\n", + fd, tests[i].success, errno, tests[i].expected_errno); + + if (fd != -1) + close(fd); + + /* test different attr.map_name during BPF_MAP_CREATE */ + ncopy = name_len < sizeof(attr.map_name) ? + name_len : sizeof(attr.map_name); + bzero(&attr, sizeof(attr)); + attr.map_type = BPF_MAP_TYPE_ARRAY; + attr.key_size = 4; + attr.value_size = 4; + attr.max_entries = 1; + attr.map_flags = 0; + memcpy(attr.map_name, tests[i].name, ncopy); + fd = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr)); + CHECK((tests[i].success && fd < 0) || + (!tests[i].success && fd != -1) || + (!tests[i].success && errno != tests[i].expected_errno), + "check-bpf-map-name", + "fd %d(%d) errno %d(%d)\n", + fd, tests[i].success, errno, tests[i].expected_errno); + + if (fd != -1) + close(fd); + } +} + int main(void) { struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY }; @@ -509,6 +629,7 @@ int main(void) test_tcp_estats(); test_bpf_obj_id(); test_pkt_md_access(); + test_obj_name(); printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt); return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS; -- cgit From 6227efc1a20b24a21ce8e9122c0569696d644aff Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 2 Oct 2017 12:05:29 +0200 Subject: selftests: rtnetlink.sh: add vxlan and fou test cases fou test lifted from ip-fou man page. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- tools/testing/selftests/net/rtnetlink.sh | 96 ++++++++++++++++++++++++++++++++ 1 file changed, 96 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh index a048f7a5f94c..62c87da92770 100755 --- a/tools/testing/selftests/net/rtnetlink.sh +++ b/tools/testing/selftests/net/rtnetlink.sh @@ -332,6 +332,101 @@ kci_test_vrf() echo "PASS: vrf" } +kci_test_encap_vxlan() +{ + ret=0 + vxlan="test-vxlan0" + vlan="test-vlan0" + testns="$1" + + ip netns exec "$testns" ip link add "$vxlan" type vxlan id 42 group 239.1.1.1 \ + dev "$devdummy" dstport 4789 2>/dev/null + if [ $? -ne 0 ]; then + echo "FAIL: can't add vxlan interface, skipping test" + return 0 + fi + check_err $? + + ip netns exec "$testns" ip addr add 10.2.11.49/24 dev "$vxlan" + check_err $? + + ip netns exec "$testns" ip link set up dev "$vxlan" + check_err $? + + ip netns exec "$testns" ip link add link "$vxlan" name "$vlan" type vlan id 1 + check_err $? + + ip netns exec "$testns" ip link del "$vxlan" + check_err $? + + if [ $ret -ne 0 ]; then + echo "FAIL: vxlan" + return 1 + fi + echo "PASS: vxlan" +} + +kci_test_encap_fou() +{ + ret=0 + name="test-fou" + testns="$1" + + ip fou help 2>&1 |grep -q 'Usage: ip fou' + if [ $? -ne 0 ];then + echo "SKIP: fou: iproute2 too old" + return 1 + fi + + ip netns exec "$testns" ip fou add port 7777 ipproto 47 2>/dev/null + if [ $? -ne 0 ];then + echo "FAIL: can't add fou port 7777, skipping test" + return 1 + fi + + ip netns exec "$testns" ip fou add port 8888 ipproto 4 + check_err $? + + ip netns exec "$testns" ip fou del port 9999 2>/dev/null + check_fail $? + + ip netns exec "$testns" ip fou del port 7777 + check_err $? + + if [ $ret -ne 0 ]; then + echo "FAIL: fou" + return 1 + fi + + echo "PASS: fou" +} + +# test various encap methods, use netns to avoid unwanted interference +kci_test_encap() +{ + testns="testns" + ret=0 + + ip netns add "$testns" + if [ $? -ne 0 ]; then + echo "SKIP encap tests: cannot add net namespace $testns" + return 1 + fi + + ip netns exec "$testns" ip link set lo up + check_err $? + + ip netns exec "$testns" ip link add name "$devdummy" type dummy + check_err $? + ip netns exec "$testns" ip link set "$devdummy" up + check_err $? + + kci_test_encap_vxlan "$testns" + kci_test_encap_fou "$testns" + + ip netns del "$testns" +} + kci_test_rtnl() { kci_add_dummy @@ -348,6 +443,7 @@ kci_test_rtnl() kci_test_addrlabel kci_test_ifalias kci_test_vrf + kci_test_encap kci_del_dummy } -- cgit From e9b871ee098dfb91780e13bfc6e91e82c7b4ad73 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 4 Oct 2017 16:22:59 +0200 Subject: selftests: rtnetlink: try concurrent change of ifalias to make sure this is serialized correctly. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- tools/testing/selftests/net/rtnetlink.sh | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh index 62c87da92770..e8c86c416ed0 100755 --- a/tools/testing/selftests/net/rtnetlink.sh +++ b/tools/testing/selftests/net/rtnetlink.sh @@ -278,6 +278,12 @@ kci_test_ifalias() ip link show "$devdummy" | grep -q "alias $namewant" check_fail $? + for i in $(seq 1 100); do + uuidgen > "$syspathname" & + done + + wait + # re-add the alias -- kernel should free mem when dummy dev is removed ip link set dev "$devdummy" alias "$namewant" check_err $? -- cgit From 390ee7e29fc8e6e90d3065b00afb047c4ee552f9 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Mon, 2 Oct 2017 22:50:23 -0700 Subject: bpf: enforce return code for cgroup-bpf programs with addition of tnum logic the verifier got smart enough and we can enforce return codes at program load time. For now do so for cgroup-bpf program types. Signed-off-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- tools/testing/selftests/bpf/test_verifier.c | 72 +++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 290d5056c165..cc91d0159f43 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -6892,6 +6892,78 @@ static struct bpf_test tests[] = { .result = ACCEPT, .prog_type = BPF_PROG_TYPE_XDP, }, + { + "bpf_exit with invalid return code. test1", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R0 has value (0x0; 0xffffffff)", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, + }, + { + "bpf_exit with invalid return code. test2", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, + }, + { + "bpf_exit with invalid return code. test3", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3), + BPF_EXIT_INSN(), + }, + .errstr = "R0 has value (0x0; 0x3)", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, + }, + { + "bpf_exit with invalid return code. test4", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, + }, + { + "bpf_exit with invalid return code. test5", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .errstr = "R0 has value (0x2; 0x0)", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, + }, + { + "bpf_exit with invalid return code. test6", + .insns = { + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .errstr = "R0 is not a known value (ctx)", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, + }, + { + "bpf_exit with invalid return code. test7", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4), + BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2), + BPF_EXIT_INSN(), + }, + .errstr = "R0 has unknown scalar value", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, + }, }; static int probe_filter_length(const struct bpf_insn *fp) -- cgit From 244d20efdb68c5c1a26c667baeb232ea163e2f69 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Mon, 2 Oct 2017 22:50:24 -0700 Subject: libbpf: introduce bpf_prog_detach2() introduce bpf_prog_detach2() that takes one more argument prog_fd vs bpf_prog_detach() that takes only attach_fd and type. Signed-off-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- tools/lib/bpf/bpf.c | 12 ++++++++++++ tools/lib/bpf/bpf.h | 1 + 2 files changed, 13 insertions(+) (limited to 'tools') diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index daf624e4c720..d4b6ba8292ee 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -291,6 +291,18 @@ int bpf_prog_detach(int target_fd, enum bpf_attach_type type) return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr)); } +int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type) +{ + union bpf_attr attr; + + bzero(&attr, sizeof(attr)); + attr.target_fd = target_fd; + attr.attach_bpf_fd = prog_fd; + attr.attach_type = type; + + return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr)); +} + int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size, void *data_out, __u32 *size_out, __u32 *retval, __u32 *duration) diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 118d00535a0d..afd64727c9cf 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -66,6 +66,7 @@ int bpf_obj_get(const char *pathname); int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type, unsigned int flags); int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type); +int bpf_prog_detach2(int prog_fd, int attachable_fd, enum bpf_attach_type type); int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size, void *data_out, __u32 *size_out, __u32 *retval, __u32 *duration); -- cgit From defd9c476fa6b01b4eb5450452bfd202138decb7 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Mon, 2 Oct 2017 22:50:26 -0700 Subject: libbpf: sync bpf.h tools/include/uapi/linux/bpf.h got out of sync with actual kernel header. Update it. Signed-off-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- tools/include/uapi/linux/bpf.h | 55 +++++++++++++++++++++++++++++++++++++++--- 1 file changed, 52 insertions(+), 3 deletions(-) (limited to 'tools') diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 6d2137b4cf38..cb2b9f95160a 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -92,6 +92,7 @@ enum bpf_cmd { BPF_PROG_GET_FD_BY_ID, BPF_MAP_GET_FD_BY_ID, BPF_OBJ_GET_INFO_BY_FD, + BPF_PROG_QUERY, }; enum bpf_map_type { @@ -143,11 +144,47 @@ enum bpf_attach_type { #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE -/* If BPF_F_ALLOW_OVERRIDE flag is used in BPF_PROG_ATTACH command - * to the given target_fd cgroup the descendent cgroup will be able to - * override effective bpf program that was inherited from this cgroup +/* cgroup-bpf attach flags used in BPF_PROG_ATTACH command + * + * NONE(default): No further bpf programs allowed in the subtree. + * + * BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program, + * the program in this cgroup yields to sub-cgroup program. + * + * BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program, + * that cgroup program gets run in addition to the program in this cgroup. + * + * Only one program is allowed to be attached to a cgroup with + * NONE or BPF_F_ALLOW_OVERRIDE flag. + * Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will + * release old program and attach the new one. Attach flags has to match. + * + * Multiple programs are allowed to be attached to a cgroup with + * BPF_F_ALLOW_MULTI flag. They are executed in FIFO order + * (those that were attached first, run first) + * The programs of sub-cgroup are executed first, then programs of + * this cgroup and then programs of parent cgroup. + * When children program makes decision (like picking TCP CA or sock bind) + * parent program has a chance to override it. + * + * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups. + * A cgroup with NONE doesn't allow any programs in sub-cgroups. + * Ex1: + * cgrp1 (MULTI progs A, B) -> + * cgrp2 (OVERRIDE prog C) -> + * cgrp3 (MULTI prog D) -> + * cgrp4 (OVERRIDE prog E) -> + * cgrp5 (NONE prog F) + * the event in cgrp5 triggers execution of F,D,A,B in that order. + * if prog F is detached, the execution is E,D,A,B + * if prog F and D are detached, the execution is E,A,B + * if prog F, E and D are detached, the execution is C,A,B + * + * All eligible programs are executed regardless of return code from + * earlier programs. */ #define BPF_F_ALLOW_OVERRIDE (1U << 0) +#define BPF_F_ALLOW_MULTI (1U << 1) /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the * verifier will perform strict alignment checking as if the kernel @@ -175,6 +212,9 @@ enum bpf_attach_type { /* Specify numa node during map creation */ #define BPF_F_NUMA_NODE (1U << 2) +/* flags for BPF_PROG_QUERY */ +#define BPF_F_QUERY_EFFECTIVE (1U << 0) + #define BPF_OBJ_NAME_LEN 16U union bpf_attr { @@ -253,6 +293,15 @@ union bpf_attr { __u32 info_len; __aligned_u64 info; } info; + + struct { /* anonymous struct used by BPF_PROG_QUERY command */ + __u32 target_fd; /* container object to query */ + __u32 attach_type; + __u32 query_flags; + __u32 attach_flags; + __aligned_u64 prog_ids; + __u32 prog_cnt; + } query; } __attribute__((aligned(8))); /* BPF helper function descriptions: -- cgit From 5d0cbf9b6c11964bf2f4041a5b0b6f81cb169736 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Mon, 2 Oct 2017 22:50:27 -0700 Subject: libbpf: add support for BPF_PROG_QUERY add support for BPF_PROG_QUERY command to libbpf Signed-off-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- tools/lib/bpf/bpf.c | 20 ++++++++++++++++++++ tools/lib/bpf/bpf.h | 3 ++- 2 files changed, 22 insertions(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index d4b6ba8292ee..5128677e4117 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -303,6 +303,26 @@ int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type) return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr)); } +int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags, + __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt) +{ + union bpf_attr attr; + int ret; + + bzero(&attr, sizeof(attr)); + attr.query.target_fd = target_fd; + attr.query.attach_type = type; + attr.query.query_flags = query_flags; + attr.query.prog_cnt = *prog_cnt; + attr.query.prog_ids = ptr_to_u64(prog_ids); + + ret = sys_bpf(BPF_PROG_QUERY, &attr, sizeof(attr)); + if (attach_flags) + *attach_flags = attr.query.attach_flags; + *prog_cnt = attr.query.prog_cnt; + return ret; +} + int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size, void *data_out, __u32 *size_out, __u32 *retval, __u32 *duration) diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index afd64727c9cf..6534889e2b2f 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -75,5 +75,6 @@ int bpf_map_get_next_id(__u32 start_id, __u32 *next_id); int bpf_prog_get_fd_by_id(__u32 id); int bpf_map_get_fd_by_id(__u32 id); int bpf_obj_get_info_by_fd(int prog_fd, void *info, __u32 *info_len); - +int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags, + __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt); #endif -- cgit From a92bb546cff0b31d96d5919ebfeda9c678756b42 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 4 Oct 2017 20:10:03 -0700 Subject: tools: rename tools/net directory to tools/bpf We currently only have BPF tools in the tools/net directory. We are about to add more BPF tools there, not necessarily networking related, rename the directory and related Makefile targets to bpf. Suggested-by: Daniel Borkmann Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Acked-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- tools/Makefile | 14 +- tools/bpf/Makefile | 35 ++ tools/bpf/bpf_asm.c | 52 ++ tools/bpf/bpf_dbg.c | 1395 ++++++++++++++++++++++++++++++++++++++++++++ tools/bpf/bpf_exp.l | 198 +++++++ tools/bpf/bpf_exp.y | 656 +++++++++++++++++++++ tools/bpf/bpf_jit_disasm.c | 320 ++++++++++ tools/net/Makefile | 35 -- tools/net/bpf_asm.c | 52 -- tools/net/bpf_dbg.c | 1395 -------------------------------------------- tools/net/bpf_exp.l | 198 ------- tools/net/bpf_exp.y | 656 --------------------- tools/net/bpf_jit_disasm.c | 320 ---------- 13 files changed, 2663 insertions(+), 2663 deletions(-) create mode 100644 tools/bpf/Makefile create mode 100644 tools/bpf/bpf_asm.c create mode 100644 tools/bpf/bpf_dbg.c create mode 100644 tools/bpf/bpf_exp.l create mode 100644 tools/bpf/bpf_exp.y create mode 100644 tools/bpf/bpf_jit_disasm.c delete mode 100644 tools/net/Makefile delete mode 100644 tools/net/bpf_asm.c delete mode 100644 tools/net/bpf_dbg.c delete mode 100644 tools/net/bpf_exp.l delete mode 100644 tools/net/bpf_exp.y delete mode 100644 tools/net/bpf_jit_disasm.c (limited to 'tools') diff --git a/tools/Makefile b/tools/Makefile index 9dfede37c8ff..df6fcb293fbc 100644 --- a/tools/Makefile +++ b/tools/Makefile @@ -19,7 +19,7 @@ help: @echo ' kvm_stat - top-like utility for displaying kvm statistics' @echo ' leds - LEDs tools' @echo ' liblockdep - user-space wrapper for kernel locking-validator' - @echo ' net - misc networking tools' + @echo ' bpf - misc BPF tools' @echo ' perf - Linux performance measurement and analysis tool' @echo ' selftests - various kernel selftests' @echo ' spi - spi tools' @@ -57,7 +57,7 @@ acpi: FORCE cpupower: FORCE $(call descend,power/$@) -cgroup firewire hv guest spi usb virtio vm net iio gpio objtool leds: FORCE +cgroup firewire hv guest spi usb virtio vm bpf iio gpio objtool leds: FORCE $(call descend,$@) liblockdep: FORCE @@ -91,7 +91,7 @@ kvm_stat: FORCE all: acpi cgroup cpupower gpio hv firewire liblockdep \ perf selftests spi turbostat usb \ - virtio vm net x86_energy_perf_policy \ + virtio vm bpf x86_energy_perf_policy \ tmon freefall iio objtool kvm_stat acpi_install: @@ -100,7 +100,7 @@ acpi_install: cpupower_install: $(call descend,power/$(@:_install=),install) -cgroup_install firewire_install gpio_install hv_install iio_install perf_install spi_install usb_install virtio_install vm_install net_install objtool_install: +cgroup_install firewire_install gpio_install hv_install iio_install perf_install spi_install usb_install virtio_install vm_install bpf_install objtool_install: $(call descend,$(@:_install=),install) liblockdep_install: @@ -124,7 +124,7 @@ kvm_stat_install: install: acpi_install cgroup_install cpupower_install gpio_install \ hv_install firewire_install iio_install liblockdep_install \ perf_install selftests_install turbostat_install usb_install \ - virtio_install vm_install net_install x86_energy_perf_policy_install \ + virtio_install vm_install bpf_install x86_energy_perf_policy_install \ tmon_install freefall_install objtool_install kvm_stat_install acpi_clean: @@ -133,7 +133,7 @@ acpi_clean: cpupower_clean: $(call descend,power/cpupower,clean) -cgroup_clean hv_clean firewire_clean spi_clean usb_clean virtio_clean vm_clean net_clean iio_clean gpio_clean objtool_clean leds_clean: +cgroup_clean hv_clean firewire_clean spi_clean usb_clean virtio_clean vm_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean: $(call descend,$(@:_clean=),clean) liblockdep_clean: @@ -169,7 +169,7 @@ build_clean: clean: acpi_clean cgroup_clean cpupower_clean hv_clean firewire_clean \ perf_clean selftests_clean turbostat_clean spi_clean usb_clean virtio_clean \ - vm_clean net_clean iio_clean x86_energy_perf_policy_clean tmon_clean \ + vm_clean bpf_clean iio_clean x86_energy_perf_policy_clean tmon_clean \ freefall_clean build_clean libbpf_clean libsubcmd_clean liblockdep_clean \ gpio_clean objtool_clean leds_clean diff --git a/tools/bpf/Makefile b/tools/bpf/Makefile new file mode 100644 index 000000000000..ddf888010652 --- /dev/null +++ b/tools/bpf/Makefile @@ -0,0 +1,35 @@ +prefix = /usr + +CC = gcc +LEX = flex +YACC = bison + +CFLAGS += -Wall -O2 +CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include + +%.yacc.c: %.y + $(YACC) -o $@ -d $< + +%.lex.c: %.l + $(LEX) -o $@ $< + +all : bpf_jit_disasm bpf_dbg bpf_asm + +bpf_jit_disasm : CFLAGS += -DPACKAGE='bpf_jit_disasm' +bpf_jit_disasm : LDLIBS = -lopcodes -lbfd -ldl +bpf_jit_disasm : bpf_jit_disasm.o + +bpf_dbg : LDLIBS = -lreadline +bpf_dbg : bpf_dbg.o + +bpf_asm : LDLIBS = +bpf_asm : bpf_asm.o bpf_exp.yacc.o bpf_exp.lex.o +bpf_exp.lex.o : bpf_exp.yacc.c + +clean : + rm -rf *.o bpf_jit_disasm bpf_dbg bpf_asm bpf_exp.yacc.* bpf_exp.lex.* + +install : + install bpf_jit_disasm $(prefix)/bin/bpf_jit_disasm + install bpf_dbg $(prefix)/bin/bpf_dbg + install bpf_asm $(prefix)/bin/bpf_asm diff --git a/tools/bpf/bpf_asm.c b/tools/bpf/bpf_asm.c new file mode 100644 index 000000000000..c15aef097b04 --- /dev/null +++ b/tools/bpf/bpf_asm.c @@ -0,0 +1,52 @@ +/* + * Minimal BPF assembler + * + * Instead of libpcap high-level filter expressions, it can be quite + * useful to define filters in low-level BPF assembler (that is kept + * close to Steven McCanne and Van Jacobson's original BPF paper). + * In particular for BPF JIT implementors, JIT security auditors, or + * just for defining BPF expressions that contain extensions which are + * not supported by compilers. + * + * How to get into it: + * + * 1) read Documentation/networking/filter.txt + * 2) Run `bpf_asm [-c] ` to translate into binary + * blob that is loadable with xt_bpf, cls_bpf et al. Note: -c will + * pretty print a C-like construct. + * + * Copyright 2013 Daniel Borkmann + * Licensed under the GNU General Public License, version 2.0 (GPLv2) + */ + +#include +#include +#include + +extern void bpf_asm_compile(FILE *fp, bool cstyle); + +int main(int argc, char **argv) +{ + FILE *fp = stdin; + bool cstyle = false; + int i; + + for (i = 1; i < argc; i++) { + if (!strncmp("-c", argv[i], 2)) { + cstyle = true; + continue; + } + + fp = fopen(argv[i], "r"); + if (!fp) { + fp = stdin; + continue; + } + + break; + } + + bpf_asm_compile(fp, cstyle); + + return 0; +} diff --git a/tools/bpf/bpf_dbg.c b/tools/bpf/bpf_dbg.c new file mode 100644 index 000000000000..4f254bcc4423 --- /dev/null +++ b/tools/bpf/bpf_dbg.c @@ -0,0 +1,1395 @@ +/* + * Minimal BPF debugger + * + * Minimal BPF debugger that mimics the kernel's engine (w/o extensions) + * and allows for single stepping through selected packets from a pcap + * with a provided user filter in order to facilitate verification of a + * BPF program. Besides others, this is useful to verify BPF programs + * before attaching to a live system, and can be used in socket filters, + * cls_bpf, xt_bpf, team driver and e.g. PTP code; in particular when a + * single more complex BPF program is being used. Reasons for a more + * complex BPF program are likely primarily to optimize execution time + * for making a verdict when multiple simple BPF programs are combined + * into one in order to prevent parsing same headers multiple times. + * + * More on how to debug BPF opcodes see Documentation/networking/filter.txt + * which is the main document on BPF. Mini howto for getting started: + * + * 1) `./bpf_dbg` to enter the shell (shell cmds denoted with '>'): + * 2) > load bpf 6,40 0 0 12,21 0 3 20... (output from `bpf_asm` or + * `tcpdump -iem1 -ddd port 22 | tr '\n' ','` to load as filter) + * 3) > load pcap foo.pcap + * 4) > run /disassemble/dump/quit (self-explanatory) + * 5) > breakpoint 2 (sets bp at loaded BPF insns 2, do `run` then; + * multiple bps can be set, of course, a call to `breakpoint` + * w/o args shows currently loaded bps, `breakpoint reset` for + * resetting all breakpoints) + * 6) > select 3 (`run` etc will start from the 3rd packet in the pcap) + * 7) > step [-, +] (performs single stepping through the BPF) + * + * Copyright 2013 Daniel Borkmann + * Licensed under the GNU General Public License, version 2.0 (GPLv2) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define TCPDUMP_MAGIC 0xa1b2c3d4 + +#define BPF_LDX_B (BPF_LDX | BPF_B) +#define BPF_LDX_W (BPF_LDX | BPF_W) +#define BPF_JMP_JA (BPF_JMP | BPF_JA) +#define BPF_JMP_JEQ (BPF_JMP | BPF_JEQ) +#define BPF_JMP_JGT (BPF_JMP | BPF_JGT) +#define BPF_JMP_JGE (BPF_JMP | BPF_JGE) +#define BPF_JMP_JSET (BPF_JMP | BPF_JSET) +#define BPF_ALU_ADD (BPF_ALU | BPF_ADD) +#define BPF_ALU_SUB (BPF_ALU | BPF_SUB) +#define BPF_ALU_MUL (BPF_ALU | BPF_MUL) +#define BPF_ALU_DIV (BPF_ALU | BPF_DIV) +#define BPF_ALU_MOD (BPF_ALU | BPF_MOD) +#define BPF_ALU_NEG (BPF_ALU | BPF_NEG) +#define BPF_ALU_AND (BPF_ALU | BPF_AND) +#define BPF_ALU_OR (BPF_ALU | BPF_OR) +#define BPF_ALU_XOR (BPF_ALU | BPF_XOR) +#define BPF_ALU_LSH (BPF_ALU | BPF_LSH) +#define BPF_ALU_RSH (BPF_ALU | BPF_RSH) +#define BPF_MISC_TAX (BPF_MISC | BPF_TAX) +#define BPF_MISC_TXA (BPF_MISC | BPF_TXA) +#define BPF_LD_B (BPF_LD | BPF_B) +#define BPF_LD_H (BPF_LD | BPF_H) +#define BPF_LD_W (BPF_LD | BPF_W) + +#ifndef array_size +# define array_size(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#ifndef __check_format_printf +# define __check_format_printf(pos_fmtstr, pos_fmtargs) \ + __attribute__ ((format (printf, (pos_fmtstr), (pos_fmtargs)))) +#endif + +enum { + CMD_OK, + CMD_ERR, + CMD_EX, +}; + +struct shell_cmd { + const char *name; + int (*func)(char *args); +}; + +struct pcap_filehdr { + uint32_t magic; + uint16_t version_major; + uint16_t version_minor; + int32_t thiszone; + uint32_t sigfigs; + uint32_t snaplen; + uint32_t linktype; +}; + +struct pcap_timeval { + int32_t tv_sec; + int32_t tv_usec; +}; + +struct pcap_pkthdr { + struct pcap_timeval ts; + uint32_t caplen; + uint32_t len; +}; + +struct bpf_regs { + uint32_t A; + uint32_t X; + uint32_t M[BPF_MEMWORDS]; + uint32_t R; + bool Rs; + uint16_t Pc; +}; + +static struct sock_filter bpf_image[BPF_MAXINSNS + 1]; +static unsigned int bpf_prog_len; + +static int bpf_breakpoints[64]; +static struct bpf_regs bpf_regs[BPF_MAXINSNS + 1]; +static struct bpf_regs bpf_curr; +static unsigned int bpf_regs_len; + +static int pcap_fd = -1; +static unsigned int pcap_packet; +static size_t pcap_map_size; +static char *pcap_ptr_va_start, *pcap_ptr_va_curr; + +static const char * const op_table[] = { + [BPF_ST] = "st", + [BPF_STX] = "stx", + [BPF_LD_B] = "ldb", + [BPF_LD_H] = "ldh", + [BPF_LD_W] = "ld", + [BPF_LDX] = "ldx", + [BPF_LDX_B] = "ldxb", + [BPF_JMP_JA] = "ja", + [BPF_JMP_JEQ] = "jeq", + [BPF_JMP_JGT] = "jgt", + [BPF_JMP_JGE] = "jge", + [BPF_JMP_JSET] = "jset", + [BPF_ALU_ADD] = "add", + [BPF_ALU_SUB] = "sub", + [BPF_ALU_MUL] = "mul", + [BPF_ALU_DIV] = "div", + [BPF_ALU_MOD] = "mod", + [BPF_ALU_NEG] = "neg", + [BPF_ALU_AND] = "and", + [BPF_ALU_OR] = "or", + [BPF_ALU_XOR] = "xor", + [BPF_ALU_LSH] = "lsh", + [BPF_ALU_RSH] = "rsh", + [BPF_MISC_TAX] = "tax", + [BPF_MISC_TXA] = "txa", + [BPF_RET] = "ret", +}; + +static __check_format_printf(1, 2) int rl_printf(const char *fmt, ...) +{ + int ret; + va_list vl; + + va_start(vl, fmt); + ret = vfprintf(rl_outstream, fmt, vl); + va_end(vl); + + return ret; +} + +static int matches(const char *cmd, const char *pattern) +{ + int len = strlen(cmd); + + if (len > strlen(pattern)) + return -1; + + return memcmp(pattern, cmd, len); +} + +static void hex_dump(const uint8_t *buf, size_t len) +{ + int i; + + rl_printf("%3u: ", 0); + for (i = 0; i < len; i++) { + if (i && !(i % 16)) + rl_printf("\n%3u: ", i); + rl_printf("%02x ", buf[i]); + } + rl_printf("\n"); +} + +static bool bpf_prog_loaded(void) +{ + if (bpf_prog_len == 0) + rl_printf("no bpf program loaded!\n"); + + return bpf_prog_len > 0; +} + +static void bpf_disasm(const struct sock_filter f, unsigned int i) +{ + const char *op, *fmt; + int val = f.k; + char buf[256]; + + switch (f.code) { + case BPF_RET | BPF_K: + op = op_table[BPF_RET]; + fmt = "#%#x"; + break; + case BPF_RET | BPF_A: + op = op_table[BPF_RET]; + fmt = "a"; + break; + case BPF_RET | BPF_X: + op = op_table[BPF_RET]; + fmt = "x"; + break; + case BPF_MISC_TAX: + op = op_table[BPF_MISC_TAX]; + fmt = ""; + break; + case BPF_MISC_TXA: + op = op_table[BPF_MISC_TXA]; + fmt = ""; + break; + case BPF_ST: + op = op_table[BPF_ST]; + fmt = "M[%d]"; + break; + case BPF_STX: + op = op_table[BPF_STX]; + fmt = "M[%d]"; + break; + case BPF_LD_W | BPF_ABS: + op = op_table[BPF_LD_W]; + fmt = "[%d]"; + break; + case BPF_LD_H | BPF_ABS: + op = op_table[BPF_LD_H]; + fmt = "[%d]"; + break; + case BPF_LD_B | BPF_ABS: + op = op_table[BPF_LD_B]; + fmt = "[%d]"; + break; + case BPF_LD_W | BPF_LEN: + op = op_table[BPF_LD_W]; + fmt = "#len"; + break; + case BPF_LD_W | BPF_IND: + op = op_table[BPF_LD_W]; + fmt = "[x+%d]"; + break; + case BPF_LD_H | BPF_IND: + op = op_table[BPF_LD_H]; + fmt = "[x+%d]"; + break; + case BPF_LD_B | BPF_IND: + op = op_table[BPF_LD_B]; + fmt = "[x+%d]"; + break; + case BPF_LD | BPF_IMM: + op = op_table[BPF_LD_W]; + fmt = "#%#x"; + break; + case BPF_LDX | BPF_IMM: + op = op_table[BPF_LDX]; + fmt = "#%#x"; + break; + case BPF_LDX_B | BPF_MSH: + op = op_table[BPF_LDX_B]; + fmt = "4*([%d]&0xf)"; + break; + case BPF_LD | BPF_MEM: + op = op_table[BPF_LD_W]; + fmt = "M[%d]"; + break; + case BPF_LDX | BPF_MEM: + op = op_table[BPF_LDX]; + fmt = "M[%d]"; + break; + case BPF_JMP_JA: + op = op_table[BPF_JMP_JA]; + fmt = "%d"; + val = i + 1 + f.k; + break; + case BPF_JMP_JGT | BPF_X: + op = op_table[BPF_JMP_JGT]; + fmt = "x"; + break; + case BPF_JMP_JGT | BPF_K: + op = op_table[BPF_JMP_JGT]; + fmt = "#%#x"; + break; + case BPF_JMP_JGE | BPF_X: + op = op_table[BPF_JMP_JGE]; + fmt = "x"; + break; + case BPF_JMP_JGE | BPF_K: + op = op_table[BPF_JMP_JGE]; + fmt = "#%#x"; + break; + case BPF_JMP_JEQ | BPF_X: + op = op_table[BPF_JMP_JEQ]; + fmt = "x"; + break; + case BPF_JMP_JEQ | BPF_K: + op = op_table[BPF_JMP_JEQ]; + fmt = "#%#x"; + break; + case BPF_JMP_JSET | BPF_X: + op = op_table[BPF_JMP_JSET]; + fmt = "x"; + break; + case BPF_JMP_JSET | BPF_K: + op = op_table[BPF_JMP_JSET]; + fmt = "#%#x"; + break; + case BPF_ALU_NEG: + op = op_table[BPF_ALU_NEG]; + fmt = ""; + break; + case BPF_ALU_LSH | BPF_X: + op = op_table[BPF_ALU_LSH]; + fmt = "x"; + break; + case BPF_ALU_LSH | BPF_K: + op = op_table[BPF_ALU_LSH]; + fmt = "#%d"; + break; + case BPF_ALU_RSH | BPF_X: + op = op_table[BPF_ALU_RSH]; + fmt = "x"; + break; + case BPF_ALU_RSH | BPF_K: + op = op_table[BPF_ALU_RSH]; + fmt = "#%d"; + break; + case BPF_ALU_ADD | BPF_X: + op = op_table[BPF_ALU_ADD]; + fmt = "x"; + break; + case BPF_ALU_ADD | BPF_K: + op = op_table[BPF_ALU_ADD]; + fmt = "#%d"; + break; + case BPF_ALU_SUB | BPF_X: + op = op_table[BPF_ALU_SUB]; + fmt = "x"; + break; + case BPF_ALU_SUB | BPF_K: + op = op_table[BPF_ALU_SUB]; + fmt = "#%d"; + break; + case BPF_ALU_MUL | BPF_X: + op = op_table[BPF_ALU_MUL]; + fmt = "x"; + break; + case BPF_ALU_MUL | BPF_K: + op = op_table[BPF_ALU_MUL]; + fmt = "#%d"; + break; + case BPF_ALU_DIV | BPF_X: + op = op_table[BPF_ALU_DIV]; + fmt = "x"; + break; + case BPF_ALU_DIV | BPF_K: + op = op_table[BPF_ALU_DIV]; + fmt = "#%d"; + break; + case BPF_ALU_MOD | BPF_X: + op = op_table[BPF_ALU_MOD]; + fmt = "x"; + break; + case BPF_ALU_MOD | BPF_K: + op = op_table[BPF_ALU_MOD]; + fmt = "#%d"; + break; + case BPF_ALU_AND | BPF_X: + op = op_table[BPF_ALU_AND]; + fmt = "x"; + break; + case BPF_ALU_AND | BPF_K: + op = op_table[BPF_ALU_AND]; + fmt = "#%#x"; + break; + case BPF_ALU_OR | BPF_X: + op = op_table[BPF_ALU_OR]; + fmt = "x"; + break; + case BPF_ALU_OR | BPF_K: + op = op_table[BPF_ALU_OR]; + fmt = "#%#x"; + break; + case BPF_ALU_XOR | BPF_X: + op = op_table[BPF_ALU_XOR]; + fmt = "x"; + break; + case BPF_ALU_XOR | BPF_K: + op = op_table[BPF_ALU_XOR]; + fmt = "#%#x"; + break; + default: + op = "nosup"; + fmt = "%#x"; + val = f.code; + break; + } + + memset(buf, 0, sizeof(buf)); + snprintf(buf, sizeof(buf), fmt, val); + buf[sizeof(buf) - 1] = 0; + + if ((BPF_CLASS(f.code) == BPF_JMP && BPF_OP(f.code) != BPF_JA)) + rl_printf("l%d:\t%s %s, l%d, l%d\n", i, op, buf, + i + 1 + f.jt, i + 1 + f.jf); + else + rl_printf("l%d:\t%s %s\n", i, op, buf); +} + +static void bpf_dump_curr(struct bpf_regs *r, struct sock_filter *f) +{ + int i, m = 0; + + rl_printf("pc: [%u]\n", r->Pc); + rl_printf("code: [%u] jt[%u] jf[%u] k[%u]\n", + f->code, f->jt, f->jf, f->k); + rl_printf("curr: "); + bpf_disasm(*f, r->Pc); + + if (f->jt || f->jf) { + rl_printf("jt: "); + bpf_disasm(*(f + f->jt + 1), r->Pc + f->jt + 1); + rl_printf("jf: "); + bpf_disasm(*(f + f->jf + 1), r->Pc + f->jf + 1); + } + + rl_printf("A: [%#08x][%u]\n", r->A, r->A); + rl_printf("X: [%#08x][%u]\n", r->X, r->X); + if (r->Rs) + rl_printf("ret: [%#08x][%u]!\n", r->R, r->R); + + for (i = 0; i < BPF_MEMWORDS; i++) { + if (r->M[i]) { + m++; + rl_printf("M[%d]: [%#08x][%u]\n", i, r->M[i], r->M[i]); + } + } + if (m == 0) + rl_printf("M[0,%d]: [%#08x][%u]\n", BPF_MEMWORDS - 1, 0, 0); +} + +static void bpf_dump_pkt(uint8_t *pkt, uint32_t pkt_caplen, uint32_t pkt_len) +{ + if (pkt_caplen != pkt_len) + rl_printf("cap: %u, len: %u\n", pkt_caplen, pkt_len); + else + rl_printf("len: %u\n", pkt_len); + + hex_dump(pkt, pkt_caplen); +} + +static void bpf_disasm_all(const struct sock_filter *f, unsigned int len) +{ + unsigned int i; + + for (i = 0; i < len; i++) + bpf_disasm(f[i], i); +} + +static void bpf_dump_all(const struct sock_filter *f, unsigned int len) +{ + unsigned int i; + + rl_printf("/* { op, jt, jf, k }, */\n"); + for (i = 0; i < len; i++) + rl_printf("{ %#04x, %2u, %2u, %#010x },\n", + f[i].code, f[i].jt, f[i].jf, f[i].k); +} + +static bool bpf_runnable(struct sock_filter *f, unsigned int len) +{ + int sock, ret, i; + struct sock_fprog bpf = { + .filter = f, + .len = len, + }; + + sock = socket(AF_INET, SOCK_DGRAM, 0); + if (sock < 0) { + rl_printf("cannot open socket!\n"); + return false; + } + ret = setsockopt(sock, SOL_SOCKET, SO_ATTACH_FILTER, &bpf, sizeof(bpf)); + close(sock); + if (ret < 0) { + rl_printf("program not allowed to run by kernel!\n"); + return false; + } + for (i = 0; i < len; i++) { + if (BPF_CLASS(f[i].code) == BPF_LD && + f[i].k > SKF_AD_OFF) { + rl_printf("extensions currently not supported!\n"); + return false; + } + } + + return true; +} + +static void bpf_reset_breakpoints(void) +{ + int i; + + for (i = 0; i < array_size(bpf_breakpoints); i++) + bpf_breakpoints[i] = -1; +} + +static void bpf_set_breakpoints(unsigned int where) +{ + int i; + bool set = false; + + for (i = 0; i < array_size(bpf_breakpoints); i++) { + if (bpf_breakpoints[i] == (int) where) { + rl_printf("breakpoint already set!\n"); + set = true; + break; + } + + if (bpf_breakpoints[i] == -1 && set == false) { + bpf_breakpoints[i] = where; + set = true; + } + } + + if (!set) + rl_printf("too many breakpoints set, reset first!\n"); +} + +static void bpf_dump_breakpoints(void) +{ + int i; + + rl_printf("breakpoints: "); + + for (i = 0; i < array_size(bpf_breakpoints); i++) { + if (bpf_breakpoints[i] < 0) + continue; + rl_printf("%d ", bpf_breakpoints[i]); + } + + rl_printf("\n"); +} + +static void bpf_reset(void) +{ + bpf_regs_len = 0; + + memset(bpf_regs, 0, sizeof(bpf_regs)); + memset(&bpf_curr, 0, sizeof(bpf_curr)); +} + +static void bpf_safe_regs(void) +{ + memcpy(&bpf_regs[bpf_regs_len++], &bpf_curr, sizeof(bpf_curr)); +} + +static bool bpf_restore_regs(int off) +{ + unsigned int index = bpf_regs_len - 1 + off; + + if (index == 0) { + bpf_reset(); + return true; + } else if (index < bpf_regs_len) { + memcpy(&bpf_curr, &bpf_regs[index], sizeof(bpf_curr)); + bpf_regs_len = index; + return true; + } else { + rl_printf("reached bottom of register history stack!\n"); + return false; + } +} + +static uint32_t extract_u32(uint8_t *pkt, uint32_t off) +{ + uint32_t r; + + memcpy(&r, &pkt[off], sizeof(r)); + + return ntohl(r); +} + +static uint16_t extract_u16(uint8_t *pkt, uint32_t off) +{ + uint16_t r; + + memcpy(&r, &pkt[off], sizeof(r)); + + return ntohs(r); +} + +static uint8_t extract_u8(uint8_t *pkt, uint32_t off) +{ + return pkt[off]; +} + +static void set_return(struct bpf_regs *r) +{ + r->R = 0; + r->Rs = true; +} + +static void bpf_single_step(struct bpf_regs *r, struct sock_filter *f, + uint8_t *pkt, uint32_t pkt_caplen, + uint32_t pkt_len) +{ + uint32_t K = f->k; + int d; + + switch (f->code) { + case BPF_RET | BPF_K: + r->R = K; + r->Rs = true; + break; + case BPF_RET | BPF_A: + r->R = r->A; + r->Rs = true; + break; + case BPF_RET | BPF_X: + r->R = r->X; + r->Rs = true; + break; + case BPF_MISC_TAX: + r->X = r->A; + break; + case BPF_MISC_TXA: + r->A = r->X; + break; + case BPF_ST: + r->M[K] = r->A; + break; + case BPF_STX: + r->M[K] = r->X; + break; + case BPF_LD_W | BPF_ABS: + d = pkt_caplen - K; + if (d >= sizeof(uint32_t)) + r->A = extract_u32(pkt, K); + else + set_return(r); + break; + case BPF_LD_H | BPF_ABS: + d = pkt_caplen - K; + if (d >= sizeof(uint16_t)) + r->A = extract_u16(pkt, K); + else + set_return(r); + break; + case BPF_LD_B | BPF_ABS: + d = pkt_caplen - K; + if (d >= sizeof(uint8_t)) + r->A = extract_u8(pkt, K); + else + set_return(r); + break; + case BPF_LD_W | BPF_IND: + d = pkt_caplen - (r->X + K); + if (d >= sizeof(uint32_t)) + r->A = extract_u32(pkt, r->X + K); + break; + case BPF_LD_H | BPF_IND: + d = pkt_caplen - (r->X + K); + if (d >= sizeof(uint16_t)) + r->A = extract_u16(pkt, r->X + K); + else + set_return(r); + break; + case BPF_LD_B | BPF_IND: + d = pkt_caplen - (r->X + K); + if (d >= sizeof(uint8_t)) + r->A = extract_u8(pkt, r->X + K); + else + set_return(r); + break; + case BPF_LDX_B | BPF_MSH: + d = pkt_caplen - K; + if (d >= sizeof(uint8_t)) { + r->X = extract_u8(pkt, K); + r->X = (r->X & 0xf) << 2; + } else + set_return(r); + break; + case BPF_LD_W | BPF_LEN: + r->A = pkt_len; + break; + case BPF_LDX_W | BPF_LEN: + r->A = pkt_len; + break; + case BPF_LD | BPF_IMM: + r->A = K; + break; + case BPF_LDX | BPF_IMM: + r->X = K; + break; + case BPF_LD | BPF_MEM: + r->A = r->M[K]; + break; + case BPF_LDX | BPF_MEM: + r->X = r->M[K]; + break; + case BPF_JMP_JA: + r->Pc += K; + break; + case BPF_JMP_JGT | BPF_X: + r->Pc += r->A > r->X ? f->jt : f->jf; + break; + case BPF_JMP_JGT | BPF_K: + r->Pc += r->A > K ? f->jt : f->jf; + break; + case BPF_JMP_JGE | BPF_X: + r->Pc += r->A >= r->X ? f->jt : f->jf; + break; + case BPF_JMP_JGE | BPF_K: + r->Pc += r->A >= K ? f->jt : f->jf; + break; + case BPF_JMP_JEQ | BPF_X: + r->Pc += r->A == r->X ? f->jt : f->jf; + break; + case BPF_JMP_JEQ | BPF_K: + r->Pc += r->A == K ? f->jt : f->jf; + break; + case BPF_JMP_JSET | BPF_X: + r->Pc += r->A & r->X ? f->jt : f->jf; + break; + case BPF_JMP_JSET | BPF_K: + r->Pc += r->A & K ? f->jt : f->jf; + break; + case BPF_ALU_NEG: + r->A = -r->A; + break; + case BPF_ALU_LSH | BPF_X: + r->A <<= r->X; + break; + case BPF_ALU_LSH | BPF_K: + r->A <<= K; + break; + case BPF_ALU_RSH | BPF_X: + r->A >>= r->X; + break; + case BPF_ALU_RSH | BPF_K: + r->A >>= K; + break; + case BPF_ALU_ADD | BPF_X: + r->A += r->X; + break; + case BPF_ALU_ADD | BPF_K: + r->A += K; + break; + case BPF_ALU_SUB | BPF_X: + r->A -= r->X; + break; + case BPF_ALU_SUB | BPF_K: + r->A -= K; + break; + case BPF_ALU_MUL | BPF_X: + r->A *= r->X; + break; + case BPF_ALU_MUL | BPF_K: + r->A *= K; + break; + case BPF_ALU_DIV | BPF_X: + case BPF_ALU_MOD | BPF_X: + if (r->X == 0) { + set_return(r); + break; + } + goto do_div; + case BPF_ALU_DIV | BPF_K: + case BPF_ALU_MOD | BPF_K: + if (K == 0) { + set_return(r); + break; + } +do_div: + switch (f->code) { + case BPF_ALU_DIV | BPF_X: + r->A /= r->X; + break; + case BPF_ALU_DIV | BPF_K: + r->A /= K; + break; + case BPF_ALU_MOD | BPF_X: + r->A %= r->X; + break; + case BPF_ALU_MOD | BPF_K: + r->A %= K; + break; + } + break; + case BPF_ALU_AND | BPF_X: + r->A &= r->X; + break; + case BPF_ALU_AND | BPF_K: + r->A &= K; + break; + case BPF_ALU_OR | BPF_X: + r->A |= r->X; + break; + case BPF_ALU_OR | BPF_K: + r->A |= K; + break; + case BPF_ALU_XOR | BPF_X: + r->A ^= r->X; + break; + case BPF_ALU_XOR | BPF_K: + r->A ^= K; + break; + } +} + +static bool bpf_pc_has_breakpoint(uint16_t pc) +{ + int i; + + for (i = 0; i < array_size(bpf_breakpoints); i++) { + if (bpf_breakpoints[i] < 0) + continue; + if (bpf_breakpoints[i] == pc) + return true; + } + + return false; +} + +static bool bpf_handle_breakpoint(struct bpf_regs *r, struct sock_filter *f, + uint8_t *pkt, uint32_t pkt_caplen, + uint32_t pkt_len) +{ + rl_printf("-- register dump --\n"); + bpf_dump_curr(r, &f[r->Pc]); + rl_printf("-- packet dump --\n"); + bpf_dump_pkt(pkt, pkt_caplen, pkt_len); + rl_printf("(breakpoint)\n"); + return true; +} + +static int bpf_run_all(struct sock_filter *f, uint16_t bpf_len, uint8_t *pkt, + uint32_t pkt_caplen, uint32_t pkt_len) +{ + bool stop = false; + + while (bpf_curr.Rs == false && stop == false) { + bpf_safe_regs(); + + if (bpf_pc_has_breakpoint(bpf_curr.Pc)) + stop = bpf_handle_breakpoint(&bpf_curr, f, pkt, + pkt_caplen, pkt_len); + + bpf_single_step(&bpf_curr, &f[bpf_curr.Pc], pkt, pkt_caplen, + pkt_len); + bpf_curr.Pc++; + } + + return stop ? -1 : bpf_curr.R; +} + +static int bpf_run_stepping(struct sock_filter *f, uint16_t bpf_len, + uint8_t *pkt, uint32_t pkt_caplen, + uint32_t pkt_len, int next) +{ + bool stop = false; + int i = 1; + + while (bpf_curr.Rs == false && stop == false) { + bpf_safe_regs(); + + if (i++ == next) + stop = bpf_handle_breakpoint(&bpf_curr, f, pkt, + pkt_caplen, pkt_len); + + bpf_single_step(&bpf_curr, &f[bpf_curr.Pc], pkt, pkt_caplen, + pkt_len); + bpf_curr.Pc++; + } + + return stop ? -1 : bpf_curr.R; +} + +static bool pcap_loaded(void) +{ + if (pcap_fd < 0) + rl_printf("no pcap file loaded!\n"); + + return pcap_fd >= 0; +} + +static struct pcap_pkthdr *pcap_curr_pkt(void) +{ + return (void *) pcap_ptr_va_curr; +} + +static bool pcap_next_pkt(void) +{ + struct pcap_pkthdr *hdr = pcap_curr_pkt(); + + if (pcap_ptr_va_curr + sizeof(*hdr) - + pcap_ptr_va_start >= pcap_map_size) + return false; + if (hdr->caplen == 0 || hdr->len == 0 || hdr->caplen > hdr->len) + return false; + if (pcap_ptr_va_curr + sizeof(*hdr) + hdr->caplen - + pcap_ptr_va_start >= pcap_map_size) + return false; + + pcap_ptr_va_curr += (sizeof(*hdr) + hdr->caplen); + return true; +} + +static void pcap_reset_pkt(void) +{ + pcap_ptr_va_curr = pcap_ptr_va_start + sizeof(struct pcap_filehdr); +} + +static int try_load_pcap(const char *file) +{ + struct pcap_filehdr *hdr; + struct stat sb; + int ret; + + pcap_fd = open(file, O_RDONLY); + if (pcap_fd < 0) { + rl_printf("cannot open pcap [%s]!\n", strerror(errno)); + return CMD_ERR; + } + + ret = fstat(pcap_fd, &sb); + if (ret < 0) { + rl_printf("cannot fstat pcap file!\n"); + return CMD_ERR; + } + + if (!S_ISREG(sb.st_mode)) { + rl_printf("not a regular pcap file, duh!\n"); + return CMD_ERR; + } + + pcap_map_size = sb.st_size; + if (pcap_map_size <= sizeof(struct pcap_filehdr)) { + rl_printf("pcap file too small!\n"); + return CMD_ERR; + } + + pcap_ptr_va_start = mmap(NULL, pcap_map_size, PROT_READ, + MAP_SHARED | MAP_LOCKED, pcap_fd, 0); + if (pcap_ptr_va_start == MAP_FAILED) { + rl_printf("mmap of file failed!"); + return CMD_ERR; + } + + hdr = (void *) pcap_ptr_va_start; + if (hdr->magic != TCPDUMP_MAGIC) { + rl_printf("wrong pcap magic!\n"); + return CMD_ERR; + } + + pcap_reset_pkt(); + + return CMD_OK; + +} + +static void try_close_pcap(void) +{ + if (pcap_fd >= 0) { + munmap(pcap_ptr_va_start, pcap_map_size); + close(pcap_fd); + + pcap_ptr_va_start = pcap_ptr_va_curr = NULL; + pcap_map_size = 0; + pcap_packet = 0; + pcap_fd = -1; + } +} + +static int cmd_load_bpf(char *bpf_string) +{ + char sp, *token, separator = ','; + unsigned short bpf_len, i = 0; + struct sock_filter tmp; + + bpf_prog_len = 0; + memset(bpf_image, 0, sizeof(bpf_image)); + + if (sscanf(bpf_string, "%hu%c", &bpf_len, &sp) != 2 || + sp != separator || bpf_len > BPF_MAXINSNS || bpf_len == 0) { + rl_printf("syntax error in head length encoding!\n"); + return CMD_ERR; + } + + token = bpf_string; + while ((token = strchr(token, separator)) && (++token)[0]) { + if (i >= bpf_len) { + rl_printf("program exceeds encoded length!\n"); + return CMD_ERR; + } + + if (sscanf(token, "%hu %hhu %hhu %u,", + &tmp.code, &tmp.jt, &tmp.jf, &tmp.k) != 4) { + rl_printf("syntax error at instruction %d!\n", i); + return CMD_ERR; + } + + bpf_image[i].code = tmp.code; + bpf_image[i].jt = tmp.jt; + bpf_image[i].jf = tmp.jf; + bpf_image[i].k = tmp.k; + + i++; + } + + if (i != bpf_len) { + rl_printf("syntax error exceeding encoded length!\n"); + return CMD_ERR; + } else + bpf_prog_len = bpf_len; + if (!bpf_runnable(bpf_image, bpf_prog_len)) + bpf_prog_len = 0; + + return CMD_OK; +} + +static int cmd_load_pcap(char *file) +{ + char *file_trim, *tmp; + + file_trim = strtok_r(file, " ", &tmp); + if (file_trim == NULL) + return CMD_ERR; + + try_close_pcap(); + + return try_load_pcap(file_trim); +} + +static int cmd_load(char *arg) +{ + char *subcmd, *cont, *tmp = strdup(arg); + int ret = CMD_OK; + + subcmd = strtok_r(tmp, " ", &cont); + if (subcmd == NULL) + goto out; + if (matches(subcmd, "bpf") == 0) { + bpf_reset(); + bpf_reset_breakpoints(); + + ret = cmd_load_bpf(cont); + } else if (matches(subcmd, "pcap") == 0) { + ret = cmd_load_pcap(cont); + } else { +out: + rl_printf("bpf : load bpf code\n"); + rl_printf("pcap : load pcap file\n"); + ret = CMD_ERR; + } + + free(tmp); + return ret; +} + +static int cmd_step(char *num) +{ + struct pcap_pkthdr *hdr; + int steps, ret; + + if (!bpf_prog_loaded() || !pcap_loaded()) + return CMD_ERR; + + steps = strtol(num, NULL, 10); + if (steps == 0 || strlen(num) == 0) + steps = 1; + if (steps < 0) { + if (!bpf_restore_regs(steps)) + return CMD_ERR; + steps = 1; + } + + hdr = pcap_curr_pkt(); + ret = bpf_run_stepping(bpf_image, bpf_prog_len, + (uint8_t *) hdr + sizeof(*hdr), + hdr->caplen, hdr->len, steps); + if (ret >= 0 || bpf_curr.Rs) { + bpf_reset(); + if (!pcap_next_pkt()) { + rl_printf("(going back to first packet)\n"); + pcap_reset_pkt(); + } else { + rl_printf("(next packet)\n"); + } + } + + return CMD_OK; +} + +static int cmd_select(char *num) +{ + unsigned int which, i; + bool have_next = true; + + if (!pcap_loaded() || strlen(num) == 0) + return CMD_ERR; + + which = strtoul(num, NULL, 10); + if (which == 0) { + rl_printf("packet count starts with 1, clamping!\n"); + which = 1; + } + + pcap_reset_pkt(); + bpf_reset(); + + for (i = 0; i < which && (have_next = pcap_next_pkt()); i++) + /* noop */; + if (!have_next || pcap_curr_pkt() == NULL) { + rl_printf("no packet #%u available!\n", which); + pcap_reset_pkt(); + return CMD_ERR; + } + + return CMD_OK; +} + +static int cmd_breakpoint(char *subcmd) +{ + if (!bpf_prog_loaded()) + return CMD_ERR; + if (strlen(subcmd) == 0) + bpf_dump_breakpoints(); + else if (matches(subcmd, "reset") == 0) + bpf_reset_breakpoints(); + else { + unsigned int where = strtoul(subcmd, NULL, 10); + + if (where < bpf_prog_len) { + bpf_set_breakpoints(where); + rl_printf("breakpoint at: "); + bpf_disasm(bpf_image[where], where); + } + } + + return CMD_OK; +} + +static int cmd_run(char *num) +{ + static uint32_t pass, fail; + bool has_limit = true; + int pkts = 0, i = 0; + + if (!bpf_prog_loaded() || !pcap_loaded()) + return CMD_ERR; + + pkts = strtol(num, NULL, 10); + if (pkts == 0 || strlen(num) == 0) + has_limit = false; + + do { + struct pcap_pkthdr *hdr = pcap_curr_pkt(); + int ret = bpf_run_all(bpf_image, bpf_prog_len, + (uint8_t *) hdr + sizeof(*hdr), + hdr->caplen, hdr->len); + if (ret > 0) + pass++; + else if (ret == 0) + fail++; + else + return CMD_OK; + bpf_reset(); + } while (pcap_next_pkt() && (!has_limit || (has_limit && ++i < pkts))); + + rl_printf("bpf passes:%u fails:%u\n", pass, fail); + + pcap_reset_pkt(); + bpf_reset(); + + pass = fail = 0; + return CMD_OK; +} + +static int cmd_disassemble(char *line_string) +{ + bool single_line = false; + unsigned long line; + + if (!bpf_prog_loaded()) + return CMD_ERR; + if (strlen(line_string) > 0 && + (line = strtoul(line_string, NULL, 10)) < bpf_prog_len) + single_line = true; + if (single_line) + bpf_disasm(bpf_image[line], line); + else + bpf_disasm_all(bpf_image, bpf_prog_len); + + return CMD_OK; +} + +static int cmd_dump(char *dontcare) +{ + if (!bpf_prog_loaded()) + return CMD_ERR; + + bpf_dump_all(bpf_image, bpf_prog_len); + + return CMD_OK; +} + +static int cmd_quit(char *dontcare) +{ + return CMD_EX; +} + +static const struct shell_cmd cmds[] = { + { .name = "load", .func = cmd_load }, + { .name = "select", .func = cmd_select }, + { .name = "step", .func = cmd_step }, + { .name = "run", .func = cmd_run }, + { .name = "breakpoint", .func = cmd_breakpoint }, + { .name = "disassemble", .func = cmd_disassemble }, + { .name = "dump", .func = cmd_dump }, + { .name = "quit", .func = cmd_quit }, +}; + +static int execf(char *arg) +{ + char *cmd, *cont, *tmp = strdup(arg); + int i, ret = 0, len; + + cmd = strtok_r(tmp, " ", &cont); + if (cmd == NULL) + goto out; + len = strlen(cmd); + for (i = 0; i < array_size(cmds); i++) { + if (len != strlen(cmds[i].name)) + continue; + if (strncmp(cmds[i].name, cmd, len) == 0) { + ret = cmds[i].func(cont); + break; + } + } +out: + free(tmp); + return ret; +} + +static char *shell_comp_gen(const char *buf, int state) +{ + static int list_index, len; + + if (!state) { + list_index = 0; + len = strlen(buf); + } + + for (; list_index < array_size(cmds); ) { + const char *name = cmds[list_index].name; + + list_index++; + if (strncmp(name, buf, len) == 0) + return strdup(name); + } + + return NULL; +} + +static char **shell_completion(const char *buf, int start, int end) +{ + char **matches = NULL; + + if (start == 0) + matches = rl_completion_matches(buf, shell_comp_gen); + + return matches; +} + +static void intr_shell(int sig) +{ + if (rl_end) + rl_kill_line(-1, 0); + + rl_crlf(); + rl_refresh_line(0, 0); + rl_free_line_state(); +} + +static void init_shell(FILE *fin, FILE *fout) +{ + char file[128]; + + snprintf(file, sizeof(file), "%s/.bpf_dbg_history", getenv("HOME")); + read_history(file); + + rl_instream = fin; + rl_outstream = fout; + + rl_readline_name = "bpf_dbg"; + rl_terminal_name = getenv("TERM"); + + rl_catch_signals = 0; + rl_catch_sigwinch = 1; + + rl_attempted_completion_function = shell_completion; + + rl_bind_key('\t', rl_complete); + + rl_bind_key_in_map('\t', rl_complete, emacs_meta_keymap); + rl_bind_key_in_map('\033', rl_complete, emacs_meta_keymap); + + snprintf(file, sizeof(file), "%s/.bpf_dbg_init", getenv("HOME")); + rl_read_init_file(file); + + rl_prep_terminal(0); + rl_set_signals(); + + signal(SIGINT, intr_shell); +} + +static void exit_shell(FILE *fin, FILE *fout) +{ + char file[128]; + + snprintf(file, sizeof(file), "%s/.bpf_dbg_history", getenv("HOME")); + write_history(file); + + clear_history(); + rl_deprep_terminal(); + + try_close_pcap(); + + if (fin != stdin) + fclose(fin); + if (fout != stdout) + fclose(fout); +} + +static int run_shell_loop(FILE *fin, FILE *fout) +{ + char *buf; + + init_shell(fin, fout); + + while ((buf = readline("> ")) != NULL) { + int ret = execf(buf); + if (ret == CMD_EX) + break; + if (ret == CMD_OK && strlen(buf) > 0) + add_history(buf); + + free(buf); + } + + exit_shell(fin, fout); + return 0; +} + +int main(int argc, char **argv) +{ + FILE *fin = NULL, *fout = NULL; + + if (argc >= 2) + fin = fopen(argv[1], "r"); + if (argc >= 3) + fout = fopen(argv[2], "w"); + + return run_shell_loop(fin ? : stdin, fout ? : stdout); +} diff --git a/tools/bpf/bpf_exp.l b/tools/bpf/bpf_exp.l new file mode 100644 index 000000000000..bd83149e7be0 --- /dev/null +++ b/tools/bpf/bpf_exp.l @@ -0,0 +1,198 @@ +/* + * BPF asm code lexer + * + * This program is free software; you can distribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Syntax kept close to: + * + * Steven McCanne and Van Jacobson. 1993. The BSD packet filter: a new + * architecture for user-level packet capture. In Proceedings of the + * USENIX Winter 1993 Conference Proceedings on USENIX Winter 1993 + * Conference Proceedings (USENIX'93). USENIX Association, Berkeley, + * CA, USA, 2-2. + * + * Copyright 2013 Daniel Borkmann + * Licensed under the GNU General Public License, version 2.0 (GPLv2) + */ + +%{ + +#include +#include +#include +#include + +#include + +#include "bpf_exp.yacc.h" + +extern void yyerror(const char *str); + +%} + +%option align +%option ecs + +%option nounput +%option noreject +%option noinput +%option noyywrap + +%option 8bit +%option caseless +%option yylineno + +%% + +"ldb" { return OP_LDB; } +"ldh" { return OP_LDH; } +"ld" { return OP_LD; } +"ldi" { return OP_LDI; } +"ldx" { return OP_LDX; } +"ldxi" { return OP_LDXI; } +"ldxb" { return OP_LDXB; } +"st" { return OP_ST; } +"stx" { return OP_STX; } +"jmp" { return OP_JMP; } +"ja" { return OP_JMP; } +"jeq" { return OP_JEQ; } +"jneq" { return OP_JNEQ; } +"jne" { return OP_JNEQ; } +"jlt" { return OP_JLT; } +"jle" { return OP_JLE; } +"jgt" { return OP_JGT; } +"jge" { return OP_JGE; } +"jset" { return OP_JSET; } +"add" { return OP_ADD; } +"sub" { return OP_SUB; } +"mul" { return OP_MUL; } +"div" { return OP_DIV; } +"mod" { return OP_MOD; } +"neg" { return OP_NEG; } +"and" { return OP_AND; } +"xor" { return OP_XOR; } +"or" { return OP_OR; } +"lsh" { return OP_LSH; } +"rsh" { return OP_RSH; } +"ret" { return OP_RET; } +"tax" { return OP_TAX; } +"txa" { return OP_TXA; } + +"#"?("len") { return K_PKT_LEN; } + +"#"?("proto") { + yylval.number = SKF_AD_PROTOCOL; + return extension; + } +"#"?("type") { + yylval.number = SKF_AD_PKTTYPE; + return extension; + } +"#"?("poff") { + yylval.number = SKF_AD_PAY_OFFSET; + return extension; + } +"#"?("ifidx") { + yylval.number = SKF_AD_IFINDEX; + return extension; + } +"#"?("nla") { + yylval.number = SKF_AD_NLATTR; + return extension; + } +"#"?("nlan") { + yylval.number = SKF_AD_NLATTR_NEST; + return extension; + } +"#"?("mark") { + yylval.number = SKF_AD_MARK; + return extension; + } +"#"?("queue") { + yylval.number = SKF_AD_QUEUE; + return extension; + } +"#"?("hatype") { + yylval.number = SKF_AD_HATYPE; + return extension; + } +"#"?("rxhash") { + yylval.number = SKF_AD_RXHASH; + return extension; + } +"#"?("cpu") { + yylval.number = SKF_AD_CPU; + return extension; + } +"#"?("vlan_tci") { + yylval.number = SKF_AD_VLAN_TAG; + return extension; + } +"#"?("vlan_pr") { + yylval.number = SKF_AD_VLAN_TAG_PRESENT; + return extension; + } +"#"?("vlan_avail") { + yylval.number = SKF_AD_VLAN_TAG_PRESENT; + return extension; + } +"#"?("vlan_tpid") { + yylval.number = SKF_AD_VLAN_TPID; + return extension; + } +"#"?("rand") { + yylval.number = SKF_AD_RANDOM; + return extension; + } + +":" { return ':'; } +"," { return ','; } +"#" { return '#'; } +"%" { return '%'; } +"[" { return '['; } +"]" { return ']'; } +"(" { return '('; } +")" { return ')'; } +"x" { return 'x'; } +"a" { return 'a'; } +"+" { return '+'; } +"M" { return 'M'; } +"*" { return '*'; } +"&" { return '&'; } + +([0][x][a-fA-F0-9]+) { + yylval.number = strtoul(yytext, NULL, 16); + return number; + } +([0][b][0-1]+) { + yylval.number = strtol(yytext + 2, NULL, 2); + return number; + } +(([0])|([-+]?[1-9][0-9]*)) { + yylval.number = strtol(yytext, NULL, 10); + return number; + } +([0][0-9]+) { + yylval.number = strtol(yytext + 1, NULL, 8); + return number; + } +[a-zA-Z_][a-zA-Z0-9_]+ { + yylval.label = strdup(yytext); + return label; + } + +"/*"([^\*]|\*[^/])*"*/" { /* NOP */ } +";"[^\n]* { /* NOP */ } +^#.* { /* NOP */ } +[ \t]+ { /* NOP */ } +[ \n]+ { /* NOP */ } + +. { + printf("unknown character \'%s\'", yytext); + yyerror("lex unknown character"); + } + +%% diff --git a/tools/bpf/bpf_exp.y b/tools/bpf/bpf_exp.y new file mode 100644 index 000000000000..56ba1de50784 --- /dev/null +++ b/tools/bpf/bpf_exp.y @@ -0,0 +1,656 @@ +/* + * BPF asm code parser + * + * This program is free software; you can distribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Syntax kept close to: + * + * Steven McCanne and Van Jacobson. 1993. The BSD packet filter: a new + * architecture for user-level packet capture. In Proceedings of the + * USENIX Winter 1993 Conference Proceedings on USENIX Winter 1993 + * Conference Proceedings (USENIX'93). USENIX Association, Berkeley, + * CA, USA, 2-2. + * + * Copyright 2013 Daniel Borkmann + * Licensed under the GNU General Public License, version 2.0 (GPLv2) + */ + +%{ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bpf_exp.yacc.h" + +enum jmp_type { JTL, JFL, JKL }; + +extern FILE *yyin; +extern int yylineno; +extern int yylex(void); +extern void yyerror(const char *str); + +extern void bpf_asm_compile(FILE *fp, bool cstyle); +static void bpf_set_curr_instr(uint16_t op, uint8_t jt, uint8_t jf, uint32_t k); +static void bpf_set_curr_label(char *label); +static void bpf_set_jmp_label(char *label, enum jmp_type type); + +%} + +%union { + char *label; + uint32_t number; +} + +%token OP_LDB OP_LDH OP_LD OP_LDX OP_ST OP_STX OP_JMP OP_JEQ OP_JGT OP_JGE +%token OP_JSET OP_ADD OP_SUB OP_MUL OP_DIV OP_AND OP_OR OP_XOR OP_LSH OP_RSH +%token OP_RET OP_TAX OP_TXA OP_LDXB OP_MOD OP_NEG OP_JNEQ OP_JLT OP_JLE OP_LDI +%token OP_LDXI + +%token K_PKT_LEN + +%token ':' ',' '[' ']' '(' ')' 'x' 'a' '+' 'M' '*' '&' '#' '%' + +%token extension number label + +%type