diff options
Diffstat (limited to 'net/sched/em_meta.c')
| -rw-r--r-- | net/sched/em_meta.c | 300 |
1 files changed, 219 insertions, 81 deletions
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index 7c3de6ffa516..3f2e707a11d1 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c @@ -1,11 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * net/sched/em_meta.c Metadata ematch * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * * Authors: Thomas Graf <tgraf@suug.ch> * * ========================================================================== @@ -48,7 +44,7 @@ * be provided for non-numeric types. * * Additionally, type dependent modifiers such as shift operators - * or mask may be applied to extend the functionaliy. As of now, + * or mask may be applied to extend the functionality. As of now, * the variable length type supports shifting the byte string to * the right, eating up any number of octets and thus supporting * wildcard interface name comparisons such as "ppp%" matching @@ -63,6 +59,7 @@ #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> +#include <linux/sched/loadavg.h> #include <linux/string.h> #include <linux/skbuff.h> #include <linux/random.h> @@ -176,11 +173,12 @@ META_COLLECTOR(int_vlan_tag) { unsigned short tag; - tag = vlan_tx_tag_get(skb); - if (!tag && __vlan_get_tag(skb, &tag)) - *err = -1; - else + if (skb_vlan_tag_present(skb)) + dst->value = skb_vlan_tag_get(skb); + else if (!__vlan_get_tag(skb, &tag)) dst->value = tag; + else + *err = -1; } @@ -197,7 +195,7 @@ META_COLLECTOR(int_priority) META_COLLECTOR(int_protocol) { /* Let userspace take care of the byte ordering */ - dst->value = skb->protocol; + dst->value = skb_protocol(skb, false); } META_COLLECTOR(int_pkttype) @@ -222,7 +220,7 @@ META_COLLECTOR(int_maclen) META_COLLECTOR(int_rxhash) { - dst->value = skb_get_rxhash(skb); + dst->value = skb_get_hash(skb); } /************************************************************************** @@ -271,42 +269,57 @@ META_COLLECTOR(int_rtiif) * Socket Attributes **************************************************************************/ -#define SKIP_NONLOCAL(skb) \ - if (unlikely(skb->sk == NULL)) { \ - *err = -1; \ - return; \ - } +#define skip_nonlocal(skb) \ + (unlikely(skb->sk == NULL)) META_COLLECTOR(int_sk_family) { - SKIP_NONLOCAL(skb); + if (skip_nonlocal(skb)) { + *err = -1; + return; + } dst->value = skb->sk->sk_family; } META_COLLECTOR(int_sk_state) { - SKIP_NONLOCAL(skb); + if (skip_nonlocal(skb)) { + *err = -1; + return; + } dst->value = skb->sk->sk_state; } META_COLLECTOR(int_sk_reuse) { - SKIP_NONLOCAL(skb); + if (skip_nonlocal(skb)) { + *err = -1; + return; + } dst->value = skb->sk->sk_reuse; } META_COLLECTOR(int_sk_bound_if) { - SKIP_NONLOCAL(skb); + if (skip_nonlocal(skb)) { + *err = -1; + return; + } /* No error if bound_dev_if is 0, legal userspace check */ dst->value = skb->sk->sk_bound_dev_if; } META_COLLECTOR(var_sk_bound_if) { - SKIP_NONLOCAL(skb); + int bound_dev_if; + + if (skip_nonlocal(skb)) { + *err = -1; + return; + } - if (skb->sk->sk_bound_dev_if == 0) { + bound_dev_if = READ_ONCE(skb->sk->sk_bound_dev_if); + if (bound_dev_if == 0) { dst->value = (unsigned long) "any"; dst->len = 3; } else { @@ -314,7 +327,7 @@ META_COLLECTOR(var_sk_bound_if) rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(skb->sk), - skb->sk->sk_bound_dev_if); + bound_dev_if); *err = var_dev(dev, dst); rcu_read_unlock(); } @@ -322,152 +335,273 @@ META_COLLECTOR(var_sk_bound_if) META_COLLECTOR(int_sk_refcnt) { - SKIP_NONLOCAL(skb); - dst->value = atomic_read(&skb->sk->sk_refcnt); + if (skip_nonlocal(skb)) { + *err = -1; + return; + } + dst->value = refcount_read(&skb->sk->sk_refcnt); } META_COLLECTOR(int_sk_rcvbuf) { - SKIP_NONLOCAL(skb); - dst->value = skb->sk->sk_rcvbuf; + const struct sock *sk = skb_to_full_sk(skb); + + if (!sk) { + *err = -1; + return; + } + dst->value = sk->sk_rcvbuf; } META_COLLECTOR(int_sk_shutdown) { - SKIP_NONLOCAL(skb); - dst->value = skb->sk->sk_shutdown; + const struct sock *sk = skb_to_full_sk(skb); + + if (!sk) { + *err = -1; + return; + } + dst->value = sk->sk_shutdown; } META_COLLECTOR(int_sk_proto) { - SKIP_NONLOCAL(skb); - dst->value = skb->sk->sk_protocol; + const struct sock *sk = skb_to_full_sk(skb); + + if (!sk) { + *err = -1; + return; + } + dst->value = sk->sk_protocol; } META_COLLECTOR(int_sk_type) { - SKIP_NONLOCAL(skb); - dst->value = skb->sk->sk_type; + const struct sock *sk = skb_to_full_sk(skb); + + if (!sk) { + *err = -1; + return; + } + dst->value = sk->sk_type; } META_COLLECTOR(int_sk_rmem_alloc) { - SKIP_NONLOCAL(skb); - dst->value = sk_rmem_alloc_get(skb->sk); + const struct sock *sk = skb_to_full_sk(skb); + + if (!sk) { + *err = -1; + return; + } + dst->value = sk_rmem_alloc_get(sk); } META_COLLECTOR(int_sk_wmem_alloc) { - SKIP_NONLOCAL(skb); - dst->value = sk_wmem_alloc_get(skb->sk); + const struct sock *sk = skb_to_full_sk(skb); + + if (!sk) { + *err = -1; + return; + } + dst->value = sk_wmem_alloc_get(sk); } META_COLLECTOR(int_sk_omem_alloc) { - SKIP_NONLOCAL(skb); - dst->value = atomic_read(&skb->sk->sk_omem_alloc); + const struct sock *sk = skb_to_full_sk(skb); + + if (!sk) { + *err = -1; + return; + } + dst->value = atomic_read(&sk->sk_omem_alloc); } META_COLLECTOR(int_sk_rcv_qlen) { - SKIP_NONLOCAL(skb); - dst->value = skb->sk->sk_receive_queue.qlen; + const struct sock *sk = skb_to_full_sk(skb); + + if (!sk) { + *err = -1; + return; + } + dst->value = sk->sk_receive_queue.qlen; } META_COLLECTOR(int_sk_snd_qlen) { - SKIP_NONLOCAL(skb); - dst->value = skb->sk->sk_write_queue.qlen; + const struct sock *sk = skb_to_full_sk(skb); + + if (!sk) { + *err = -1; + return; + } + dst->value = sk->sk_write_queue.qlen; } META_COLLECTOR(int_sk_wmem_queued) { - SKIP_NONLOCAL(skb); - dst->value = skb->sk->sk_wmem_queued; + const struct sock *sk = skb_to_full_sk(skb); + + if (!sk) { + *err = -1; + return; + } + dst->value = READ_ONCE(sk->sk_wmem_queued); } META_COLLECTOR(int_sk_fwd_alloc) { - SKIP_NONLOCAL(skb); - dst->value = skb->sk->sk_forward_alloc; + const struct sock *sk = skb_to_full_sk(skb); + + if (!sk) { + *err = -1; + return; + } + dst->value = READ_ONCE(sk->sk_forward_alloc); } META_COLLECTOR(int_sk_sndbuf) { - SKIP_NONLOCAL(skb); - dst->value = skb->sk->sk_sndbuf; + const struct sock *sk = skb_to_full_sk(skb); + + if (!sk) { + *err = -1; + return; + } + dst->value = sk->sk_sndbuf; } META_COLLECTOR(int_sk_alloc) { - SKIP_NONLOCAL(skb); - dst->value = (__force int) skb->sk->sk_allocation; + const struct sock *sk = skb_to_full_sk(skb); + + if (!sk) { + *err = -1; + return; + } + dst->value = (__force int) sk->sk_allocation; } META_COLLECTOR(int_sk_hash) { - SKIP_NONLOCAL(skb); + if (skip_nonlocal(skb)) { + *err = -1; + return; + } dst->value = skb->sk->sk_hash; } META_COLLECTOR(int_sk_lingertime) { - SKIP_NONLOCAL(skb); - dst->value = skb->sk->sk_lingertime / HZ; + const struct sock *sk = skb_to_full_sk(skb); + + if (!sk) { + *err = -1; + return; + } + dst->value = READ_ONCE(sk->sk_lingertime) / HZ; } META_COLLECTOR(int_sk_err_qlen) { - SKIP_NONLOCAL(skb); - dst->value = skb->sk->sk_error_queue.qlen; + const struct sock *sk = skb_to_full_sk(skb); + + if (!sk) { + *err = -1; + return; + } + dst->value = sk->sk_error_queue.qlen; } META_COLLECTOR(int_sk_ack_bl) { - SKIP_NONLOCAL(skb); - dst->value = skb->sk->sk_ack_backlog; + const struct sock *sk = skb_to_full_sk(skb); + + if (!sk) { + *err = -1; + return; + } + dst->value = READ_ONCE(sk->sk_ack_backlog); } META_COLLECTOR(int_sk_max_ack_bl) { - SKIP_NONLOCAL(skb); - dst->value = skb->sk->sk_max_ack_backlog; + const struct sock *sk = skb_to_full_sk(skb); + + if (!sk) { + *err = -1; + return; + } + dst->value = READ_ONCE(sk->sk_max_ack_backlog); } META_COLLECTOR(int_sk_prio) { - SKIP_NONLOCAL(skb); - dst->value = skb->sk->sk_priority; + const struct sock *sk = skb_to_full_sk(skb); + + if (!sk) { + *err = -1; + return; + } + dst->value = READ_ONCE(sk->sk_priority); } META_COLLECTOR(int_sk_rcvlowat) { - SKIP_NONLOCAL(skb); - dst->value = skb->sk->sk_rcvlowat; + const struct sock *sk = skb_to_full_sk(skb); + + if (!sk) { + *err = -1; + return; + } + dst->value = READ_ONCE(sk->sk_rcvlowat); } META_COLLECTOR(int_sk_rcvtimeo) { - SKIP_NONLOCAL(skb); - dst->value = skb->sk->sk_rcvtimeo / HZ; + const struct sock *sk = skb_to_full_sk(skb); + + if (!sk) { + *err = -1; + return; + } + dst->value = READ_ONCE(sk->sk_rcvtimeo) / HZ; } META_COLLECTOR(int_sk_sndtimeo) { - SKIP_NONLOCAL(skb); - dst->value = skb->sk->sk_sndtimeo / HZ; + const struct sock *sk = skb_to_full_sk(skb); + + if (!sk) { + *err = -1; + return; + } + dst->value = READ_ONCE(sk->sk_sndtimeo) / HZ; } META_COLLECTOR(int_sk_sendmsg_off) { - SKIP_NONLOCAL(skb); - dst->value = skb->sk->sk_frag.offset; + const struct sock *sk = skb_to_full_sk(skb); + + if (!sk) { + *err = -1; + return; + } + dst->value = sk->sk_frag.offset; } META_COLLECTOR(int_sk_write_pend) { - SKIP_NONLOCAL(skb); - dst->value = skb->sk->sk_write_pending; + const struct sock *sk = skb_to_full_sk(skb); + + if (!sk) { + *err = -1; + return; + } + dst->value = sk->sk_write_pending; } /************************************************************************** @@ -663,7 +797,7 @@ struct meta_type_ops { int (*dump)(struct sk_buff *, struct meta_value *, int); }; -static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX + 1] = { +static const struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX + 1] = { [TCF_META_TYPE_VAR] = { .destroy = meta_var_destroy, .compare = meta_var_compare, @@ -679,7 +813,7 @@ static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX + 1] = { } }; -static inline struct meta_type_ops *meta_type_ops(struct meta_value *v) +static inline const struct meta_type_ops *meta_type_ops(struct meta_value *v) { return &__meta_type_ops[meta_type(v)]; } @@ -737,7 +871,7 @@ static int em_meta_match(struct sk_buff *skb, struct tcf_ematch *m, static void meta_delete(struct meta_match *meta) { if (meta) { - struct meta_type_ops *ops = meta_type_ops(&meta->lvalue); + const struct meta_type_ops *ops = meta_type_ops(&meta->lvalue); if (ops && ops->destroy) { ops->destroy(&meta->lvalue); @@ -769,7 +903,7 @@ static const struct nla_policy meta_policy[TCA_EM_META_MAX + 1] = { [TCA_EM_META_HDR] = { .len = sizeof(struct tcf_meta_hdr) }, }; -static int em_meta_change(struct tcf_proto *tp, void *data, int len, +static int em_meta_change(struct net *net, void *data, int len, struct tcf_ematch *m) { int err; @@ -777,7 +911,8 @@ static int em_meta_change(struct tcf_proto *tp, void *data, int len, struct tcf_meta_hdr *hdr; struct meta_match *meta = NULL; - err = nla_parse(tb, TCA_EM_META_MAX, data, len, meta_policy); + err = nla_parse_deprecated(tb, TCA_EM_META_MAX, data, len, + meta_policy, NULL); if (err < 0) goto errout; @@ -793,8 +928,10 @@ static int em_meta_change(struct tcf_proto *tp, void *data, int len, goto errout; meta = kzalloc(sizeof(*meta), GFP_KERNEL); - if (meta == NULL) + if (meta == NULL) { + err = -ENOMEM; goto errout; + } memcpy(&meta->lvalue.hdr, &hdr->left, sizeof(hdr->left)); memcpy(&meta->rvalue.hdr, &hdr->right, sizeof(hdr->right)); @@ -819,7 +956,7 @@ errout: return err; } -static void em_meta_destroy(struct tcf_proto *tp, struct tcf_ematch *m) +static void em_meta_destroy(struct tcf_ematch *m) { if (m) meta_delete((struct meta_match *) m->data); @@ -829,7 +966,7 @@ static int em_meta_dump(struct sk_buff *skb, struct tcf_ematch *em) { struct meta_match *meta = (struct meta_match *) em->data; struct tcf_meta_hdr hdr; - struct meta_type_ops *ops; + const struct meta_type_ops *ops; memset(&hdr, 0, sizeof(hdr)); memcpy(&hdr.left, &meta->lvalue.hdr, sizeof(hdr.left)); @@ -869,6 +1006,7 @@ static void __exit exit_em_meta(void) tcf_em_unregister(&em_meta_ops); } +MODULE_DESCRIPTION("ematch classifier for various internal kernel metadata, skb metadata and sk metadata"); MODULE_LICENSE("GPL"); module_init(init_em_meta); |
