diff options
Diffstat (limited to 'net/sched/cls_flower.c')
| -rw-r--r-- | net/sched/cls_flower.c | 647 |
1 files changed, 529 insertions, 118 deletions
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 0b15698b3531..7669371c1354 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -11,6 +11,7 @@ #include <linux/rhashtable.h> #include <linux/workqueue.h> #include <linux/refcount.h> +#include <linux/bitfield.h> #include <linux/if_ether.h> #include <linux/in6.h> @@ -27,6 +28,7 @@ #include <net/vxlan.h> #include <net/erspan.h> #include <net/gtp.h> +#include <net/pfcp.h> #include <net/tc_wrapper.h> #include <net/dst.h> @@ -39,6 +41,16 @@ #define TCA_FLOWER_KEY_CT_FLAGS_MASK \ (TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) +#define TCA_FLOWER_KEY_FLAGS_POLICY_MASK \ + (TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT | \ + TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST) + +#define TCA_FLOWER_KEY_ENC_FLAGS_POLICY_MASK \ + (TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM | \ + TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT | \ + TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM | \ + TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT) + struct fl_flow_key { struct flow_dissector_key_meta meta; struct flow_dissector_key_control control; @@ -71,6 +83,8 @@ struct fl_flow_key { struct flow_dissector_key_num_of_vlans num_of_vlans; struct flow_dissector_key_pppoe pppoe; struct flow_dissector_key_l2tpv3 l2tpv3; + struct flow_dissector_key_ipsec ipsec; + struct flow_dissector_key_cfm cfm; } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */ struct fl_flow_mask_range { @@ -120,6 +134,7 @@ struct cls_fl_filter { u32 handle; u32 flags; u32 in_hw_count; + u8 needs_tc_skb_ext:1; struct rcu_work rwork; struct net_device *hw_dev; /* Flower classifier is unlocked, which means that its reference counter @@ -311,7 +326,7 @@ TC_INDIRECT_SCOPE int fl_classify(struct sk_buff *skb, struct tcf_result *res) { struct cls_fl_head *head = rcu_dereference_bh(tp->root); - bool post_ct = tc_skb_cb(skb)->post_ct; + bool post_ct = qdisc_skb_cb(skb)->post_ct; u16 zone = tc_skb_cb(skb)->zone; struct fl_flow_key skb_key; struct fl_flow_mask *mask; @@ -415,6 +430,8 @@ static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp) static void __fl_destroy_filter(struct cls_fl_filter *f) { + if (f->needs_tc_skb_ext) + tc_skb_ext_tc_disable(); tcf_exts_destroy(&f->exts); tcf_exts_put_net(&f->exts); kfree(f); @@ -502,12 +519,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f, tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, rtnl_held); - tcf_exts_hw_stats_update(&f->exts, cls_flower.stats.bytes, - cls_flower.stats.pkts, - cls_flower.stats.drops, - cls_flower.stats.lastused, - cls_flower.stats.used_hw_stats, - cls_flower.stats.used_hw_stats_valid); + tcf_exts_hw_stats_update(&f->exts, &cls_flower.stats, cls_flower.use_act_stats); } static void __fl_put(struct cls_fl_filter *f) @@ -534,6 +546,15 @@ static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle) return f; } +static struct tcf_exts *fl_get_exts(const struct tcf_proto *tp, u32 handle) +{ + struct cls_fl_head *head = rcu_dereference_bh(tp->root); + struct cls_fl_filter *f; + + f = idr_find(&head->handle_idr, handle); + return f ? &f->exts : NULL; +} + static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f, bool *last, bool rtnl_held, struct netlink_ext_ack *extack) @@ -611,7 +632,8 @@ static void *fl_get(struct tcf_proto *tp, u32 handle) } static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { - [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC }, + [TCA_FLOWER_UNSPEC] = { .strict_start_type = + TCA_FLOWER_L2_MISS }, [TCA_FLOWER_CLASSID] = { .type = NLA_U32 }, [TCA_FLOWER_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ }, @@ -657,8 +679,10 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 }, - [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 }, - [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 }, + [TCA_FLOWER_KEY_FLAGS] = NLA_POLICY_MASK(NLA_BE32, + TCA_FLOWER_KEY_FLAGS_POLICY_MASK), + [TCA_FLOWER_KEY_FLAGS_MASK] = NLA_POLICY_MASK(NLA_BE32, + TCA_FLOWER_KEY_FLAGS_POLICY_MASK), [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 }, @@ -716,7 +740,14 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { [TCA_FLOWER_KEY_PPPOE_SID] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_PPP_PROTO] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_L2TPV3_SID] = { .type = NLA_U32 }, - + [TCA_FLOWER_KEY_SPI] = { .type = NLA_U32 }, + [TCA_FLOWER_KEY_SPI_MASK] = { .type = NLA_U32 }, + [TCA_FLOWER_L2_MISS] = NLA_POLICY_MAX(NLA_U8, 1), + [TCA_FLOWER_KEY_CFM] = { .type = NLA_NESTED }, + [TCA_FLOWER_KEY_ENC_FLAGS] = NLA_POLICY_MASK(NLA_BE32, + TCA_FLOWER_KEY_ENC_FLAGS_POLICY_MASK), + [TCA_FLOWER_KEY_ENC_FLAGS_MASK] = NLA_POLICY_MASK(NLA_BE32, + TCA_FLOWER_KEY_ENC_FLAGS_POLICY_MASK), }; static const struct nla_policy @@ -727,6 +758,7 @@ enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = { [TCA_FLOWER_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED }, [TCA_FLOWER_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED }, [TCA_FLOWER_KEY_ENC_OPTS_GTP] = { .type = NLA_NESTED }, + [TCA_FLOWER_KEY_ENC_OPTS_PFCP] = { .type = NLA_NESTED }, }; static const struct nla_policy @@ -734,7 +766,7 @@ geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = { [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY, - .len = 128 }, + .len = 127 }, }; static const struct nla_policy @@ -757,6 +789,12 @@ gtp_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1] = { }; static const struct nla_policy +pfcp_opt_policy[TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX + 1] = { + [TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE] = { .type = NLA_U8 }, + [TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID] = { .type = NLA_U64 }, +}; + +static const struct nla_policy mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = { [TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL] = { .type = NLA_U8 }, @@ -765,6 +803,13 @@ mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = { [TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL] = { .type = NLA_U32 }, }; +static const struct nla_policy +cfm_opt_policy[TCA_FLOWER_KEY_CFM_OPT_MAX + 1] = { + [TCA_FLOWER_KEY_CFM_MD_LEVEL] = NLA_POLICY_MAX(NLA_U8, + FLOW_DIS_CFM_MDL_MAX), + [TCA_FLOWER_KEY_CFM_OPCODE] = { .type = NLA_U8 }, +}; + static void fl_set_key_val(struct nlattr **tb, void *val, int val_type, void *mask, int mask_type, int len) @@ -778,6 +823,24 @@ static void fl_set_key_val(struct nlattr **tb, nla_memcpy(mask, tb[mask_type], len); } +static int fl_set_key_spi(struct nlattr **tb, struct fl_flow_key *key, + struct fl_flow_key *mask, + struct netlink_ext_ack *extack) +{ + if (key->basic.ip_proto != IPPROTO_ESP && + key->basic.ip_proto != IPPROTO_AH) { + NL_SET_ERR_MSG(extack, + "Protocol must be either ESP or AH"); + return -EINVAL; + } + + fl_set_key_val(tb, &key->ipsec.spi, + TCA_FLOWER_KEY_SPI, + &mask->ipsec.spi, TCA_FLOWER_KEY_SPI_MASK, + sizeof(key->ipsec.spi)); + return 0; +} + static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key, struct fl_flow_key *mask, struct netlink_ext_ack *extack) @@ -795,6 +858,16 @@ static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key, TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src)); + if (mask->tp_range.tp_min.dst != mask->tp_range.tp_max.dst) { + NL_SET_ERR_MSG(extack, + "Both min and max destination ports must be specified"); + return -EINVAL; + } + if (mask->tp_range.tp_min.src != mask->tp_range.tp_max.src) { + NL_SET_ERR_MSG(extack, + "Both min and max source ports must be specified"); + return -EINVAL; + } if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst && ntohs(key->tp_range.tp_max.dst) <= ntohs(key->tp_range.tp_min.dst)) { @@ -1053,7 +1126,7 @@ static void fl_set_key_pppoe(struct nlattr **tb, * because ETH_P_PPP_SES was stored in basic.n_proto * which might get overwritten by ppp_proto * or might be set to 0, the role of key_val::type - * is simmilar to vlan_key::tpid + * is similar to vlan_key::tpid */ key_val->type = htons(ETH_P_PPP_SES); key_mask->type = cpu_to_be16(~0); @@ -1098,19 +1171,29 @@ static void fl_set_key_flag(u32 flower_key, u32 flower_mask, } } -static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key, - u32 *flags_mask, struct netlink_ext_ack *extack) +static int fl_set_key_flags(struct nlattr *tca_opts, struct nlattr **tb, + bool encap, u32 *flags_key, u32 *flags_mask, + struct netlink_ext_ack *extack) { + int fl_key, fl_mask; u32 key, mask; + if (encap) { + fl_key = TCA_FLOWER_KEY_ENC_FLAGS; + fl_mask = TCA_FLOWER_KEY_ENC_FLAGS_MASK; + } else { + fl_key = TCA_FLOWER_KEY_FLAGS; + fl_mask = TCA_FLOWER_KEY_FLAGS_MASK; + } + /* mask is mandatory for flags */ - if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) { + if (NL_REQ_ATTR_CHECK(extack, tca_opts, tb, fl_mask)) { NL_SET_ERR_MSG(extack, "Missing flags mask"); return -EINVAL; } - key = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS])); - mask = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS_MASK])); + key = be32_to_cpu(nla_get_be32(tb[fl_key])); + mask = be32_to_cpu(nla_get_be32(tb[fl_mask])); *flags_key = 0; *flags_mask = 0; @@ -1121,6 +1204,21 @@ static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key, TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST, FLOW_DIS_FIRST_FRAG); + fl_set_key_flag(key, mask, flags_key, flags_mask, + TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM, + FLOW_DIS_F_TUNNEL_CSUM); + + fl_set_key_flag(key, mask, flags_key, flags_mask, + TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT, + FLOW_DIS_F_TUNNEL_DONT_FRAGMENT); + + fl_set_key_flag(key, mask, flags_key, flags_mask, + TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM, FLOW_DIS_F_TUNNEL_OAM); + + fl_set_key_flag(key, mask, flags_key, flags_mask, + TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT, + FLOW_DIS_F_TUNNEL_CRIT_OPT); + return 0; } @@ -1149,6 +1247,9 @@ static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key, if (option_len > sizeof(struct geneve_opt)) data_len = option_len - sizeof(struct geneve_opt); + if (key->enc_opts.len > FLOW_DIS_TUN_OPTS_MAX - 4) + return -ERANGE; + opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len]; memset(opt, 0xff, option_len); opt->length = data_len / 4; @@ -1268,7 +1369,6 @@ static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key, int err; md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len]; - memset(md, 0xff, sizeof(*md)); md->version = 1; if (!depth) @@ -1297,9 +1397,9 @@ static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key, NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index"); return -EINVAL; } + memset(&md->u.index, 0xff, sizeof(md->u.index)); if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) { nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]; - memset(&md->u, 0x00, sizeof(md->u)); md->u.index = nla_get_be32(nla); } } else if (md->version == 2) { @@ -1308,10 +1408,12 @@ static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key, NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid"); return -EINVAL; } + md->u.md2.dir = 1; if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) { nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]; md->u.md2.dir = nla_get_u8(nla); } + set_hwid(&md->u.md2, 0xff); if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) { nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]; set_hwid(&md->u.md2, nla_get_u8(nla)); @@ -1367,6 +1469,44 @@ static int fl_set_gtp_opt(const struct nlattr *nla, struct fl_flow_key *key, return sizeof(*sinfo); } +static int fl_set_pfcp_opt(const struct nlattr *nla, struct fl_flow_key *key, + int depth, int option_len, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX + 1]; + struct pfcp_metadata *md; + int err; + + md = (struct pfcp_metadata *)&key->enc_opts.data[key->enc_opts.len]; + memset(md, 0xff, sizeof(*md)); + + if (!depth) + return sizeof(*md); + + if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_PFCP) { + NL_SET_ERR_MSG_MOD(extack, "Non-pfcp option type for mask"); + return -EINVAL; + } + + err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX, nla, + pfcp_opt_policy, extack); + if (err < 0) + return err; + + if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE]) { + NL_SET_ERR_MSG_MOD(extack, "Missing tunnel key pfcp option type"); + return -EINVAL; + } + + if (tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE]) + md->type = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE]); + + if (tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID]) + md->seid = nla_get_be64(tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID]); + + return sizeof(*md); +} + static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key, struct fl_flow_key *mask, struct netlink_ext_ack *extack) @@ -1402,12 +1542,13 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key, switch (nla_type(nla_opt_key)) { case TCA_FLOWER_KEY_ENC_OPTS_GENEVE: if (key->enc_opts.dst_opt_type && - key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) { + key->enc_opts.dst_opt_type != + IP_TUNNEL_GENEVE_OPT_BIT) { NL_SET_ERR_MSG(extack, "Duplicate type for geneve options"); return -EINVAL; } option_len = 0; - key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT; + key->enc_opts.dst_opt_type = IP_TUNNEL_GENEVE_OPT_BIT; option_len = fl_set_geneve_opt(nla_opt_key, key, key_depth, option_len, extack); @@ -1418,7 +1559,7 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key, /* At the same time we need to parse through the mask * in order to verify exact and mask attribute lengths. */ - mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT; + mask->enc_opts.dst_opt_type = IP_TUNNEL_GENEVE_OPT_BIT; option_len = fl_set_geneve_opt(nla_opt_msk, mask, msk_depth, option_len, extack); @@ -1437,7 +1578,7 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key, return -EINVAL; } option_len = 0; - key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT; + key->enc_opts.dst_opt_type = IP_TUNNEL_VXLAN_OPT_BIT; option_len = fl_set_vxlan_opt(nla_opt_key, key, key_depth, option_len, extack); @@ -1448,7 +1589,7 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key, /* At the same time we need to parse through the mask * in order to verify exact and mask attribute lengths. */ - mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT; + mask->enc_opts.dst_opt_type = IP_TUNNEL_VXLAN_OPT_BIT; option_len = fl_set_vxlan_opt(nla_opt_msk, mask, msk_depth, option_len, extack); @@ -1467,7 +1608,7 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key, return -EINVAL; } option_len = 0; - key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT; + key->enc_opts.dst_opt_type = IP_TUNNEL_ERSPAN_OPT_BIT; option_len = fl_set_erspan_opt(nla_opt_key, key, key_depth, option_len, extack); @@ -1478,7 +1619,7 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key, /* At the same time we need to parse through the mask * in order to verify exact and mask attribute lengths. */ - mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT; + mask->enc_opts.dst_opt_type = IP_TUNNEL_ERSPAN_OPT_BIT; option_len = fl_set_erspan_opt(nla_opt_msk, mask, msk_depth, option_len, extack); @@ -1498,7 +1639,7 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key, return -EINVAL; } option_len = 0; - key->enc_opts.dst_opt_type = TUNNEL_GTP_OPT; + key->enc_opts.dst_opt_type = IP_TUNNEL_GTP_OPT_BIT; option_len = fl_set_gtp_opt(nla_opt_key, key, key_depth, option_len, extack); @@ -1509,7 +1650,7 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key, /* At the same time we need to parse through the mask * in order to verify exact and mask attribute lengths. */ - mask->enc_opts.dst_opt_type = TUNNEL_GTP_OPT; + mask->enc_opts.dst_opt_type = IP_TUNNEL_GTP_OPT_BIT; option_len = fl_set_gtp_opt(nla_opt_msk, mask, msk_depth, option_len, extack); @@ -1523,6 +1664,36 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key, return -EINVAL; } break; + case TCA_FLOWER_KEY_ENC_OPTS_PFCP: + if (key->enc_opts.dst_opt_type) { + NL_SET_ERR_MSG_MOD(extack, "Duplicate type for pfcp options"); + return -EINVAL; + } + option_len = 0; + key->enc_opts.dst_opt_type = IP_TUNNEL_PFCP_OPT_BIT; + option_len = fl_set_pfcp_opt(nla_opt_key, key, + key_depth, option_len, + extack); + if (option_len < 0) + return option_len; + + key->enc_opts.len += option_len; + /* At the same time we need to parse through the mask + * in order to verify exact and mask attribute lengths. + */ + mask->enc_opts.dst_opt_type = IP_TUNNEL_PFCP_OPT_BIT; + option_len = fl_set_pfcp_opt(nla_opt_msk, mask, + msk_depth, option_len, + extack); + if (option_len < 0) + return option_len; + + mask->enc_opts.len += option_len; + if (key->enc_opts.len != mask->enc_opts.len) { + NL_SET_ERR_MSG_MOD(extack, "Key and mask miss aligned"); + return -EINVAL; + } + break; default: NL_SET_ERR_MSG(extack, "Unknown tunnel option type"); return -EINVAL; @@ -1649,9 +1820,56 @@ static bool is_vlan_key(struct nlattr *tb, __be16 *ethertype, return false; } -static int fl_set_key(struct net *net, struct nlattr **tb, - struct fl_flow_key *key, struct fl_flow_key *mask, - struct netlink_ext_ack *extack) +static void fl_set_key_cfm_md_level(struct nlattr **tb, + struct fl_flow_key *key, + struct fl_flow_key *mask, + struct netlink_ext_ack *extack) +{ + u8 level; + + if (!tb[TCA_FLOWER_KEY_CFM_MD_LEVEL]) + return; + + level = nla_get_u8(tb[TCA_FLOWER_KEY_CFM_MD_LEVEL]); + key->cfm.mdl_ver = FIELD_PREP(FLOW_DIS_CFM_MDL_MASK, level); + mask->cfm.mdl_ver = FLOW_DIS_CFM_MDL_MASK; +} + +static void fl_set_key_cfm_opcode(struct nlattr **tb, + struct fl_flow_key *key, + struct fl_flow_key *mask, + struct netlink_ext_ack *extack) +{ + fl_set_key_val(tb, &key->cfm.opcode, TCA_FLOWER_KEY_CFM_OPCODE, + &mask->cfm.opcode, TCA_FLOWER_UNSPEC, + sizeof(key->cfm.opcode)); +} + +static int fl_set_key_cfm(struct nlattr **tb, + struct fl_flow_key *key, + struct fl_flow_key *mask, + struct netlink_ext_ack *extack) +{ + struct nlattr *nla_cfm_opt[TCA_FLOWER_KEY_CFM_OPT_MAX + 1]; + int err; + + if (!tb[TCA_FLOWER_KEY_CFM]) + return 0; + + err = nla_parse_nested(nla_cfm_opt, TCA_FLOWER_KEY_CFM_OPT_MAX, + tb[TCA_FLOWER_KEY_CFM], cfm_opt_policy, extack); + if (err < 0) + return err; + + fl_set_key_cfm_opcode(nla_cfm_opt, key, mask, extack); + fl_set_key_cfm_md_level(nla_cfm_opt, key, mask, extack); + + return 0; +} + +static int fl_set_key(struct net *net, struct nlattr *tca_opts, + struct nlattr **tb, struct fl_flow_key *key, + struct fl_flow_key *mask, struct netlink_ext_ack *extack) { __be16 ethertype; int ret = 0; @@ -1664,6 +1882,10 @@ static int fl_set_key(struct net *net, struct nlattr **tb, mask->meta.ingress_ifindex = 0xffffffff; } + fl_set_key_val(tb, &key->meta.l2_miss, TCA_FLOWER_L2_MISS, + &mask->meta.l2_miss, TCA_FLOWER_UNSPEC, + sizeof(key->meta.l2_miss)); + fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST, mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK, sizeof(key->eth.dst)); @@ -1799,6 +2021,10 @@ static int fl_set_key(struct net *net, struct nlattr **tb, TCA_FLOWER_KEY_L2TPV3_SID, &mask->l2tpv3.session_id, TCA_FLOWER_UNSPEC, sizeof(key->l2tpv3.session_id)); + } else if (key->basic.n_proto == htons(ETH_P_CFM)) { + ret = fl_set_key_cfm(tb, key, mask, extack); + if (ret) + return ret; } if (key->basic.ip_proto == IPPROTO_TCP || @@ -1809,6 +2035,12 @@ static int fl_set_key(struct net *net, struct nlattr **tb, return ret; } + if (tb[TCA_FLOWER_KEY_SPI]) { + ret = fl_set_key_spi(tb, key, mask, extack); + if (ret) + return ret; + } + if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] || tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) { key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; @@ -1869,9 +2101,18 @@ static int fl_set_key(struct net *net, struct nlattr **tb, if (ret) return ret; - if (tb[TCA_FLOWER_KEY_FLAGS]) - ret = fl_set_key_flags(tb, &key->control.flags, + if (tb[TCA_FLOWER_KEY_FLAGS]) { + ret = fl_set_key_flags(tca_opts, tb, false, + &key->control.flags, &mask->control.flags, extack); + if (ret) + return ret; + } + + if (tb[TCA_FLOWER_KEY_ENC_FLAGS]) + ret = fl_set_key_flags(tca_opts, tb, true, + &key->enc_control.flags, + &mask->enc_control.flags, extack); return ret; } @@ -1962,7 +2203,8 @@ static void fl_init_dissector(struct flow_dissector *dissector, FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6); if (FL_KEY_IS_MASKED(mask, enc_ipv4) || - FL_KEY_IS_MASKED(mask, enc_ipv6)) + FL_KEY_IS_MASKED(mask, enc_ipv6) || + FL_KEY_IS_MASKED(mask, enc_control)) FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL, enc_control); FL_KEY_SET_IF_MASKED(mask, keys, cnt, @@ -1981,6 +2223,10 @@ static void fl_init_dissector(struct flow_dissector *dissector, FLOW_DISSECTOR_KEY_PPPOE, pppoe); FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_L2TPV3, l2tpv3); + FL_KEY_SET_IF_MASKED(mask, keys, cnt, + FLOW_DISSECTOR_KEY_IPSEC, ipsec); + FL_KEY_SET_IF_MASKED(mask, keys, cnt, + FLOW_DISSECTOR_KEY_CFM, cfm); skb_flow_dissector_init(dissector, keys, cnt); } @@ -2081,43 +2327,9 @@ errout_cleanup: return ret; } -static int fl_set_parms(struct net *net, struct tcf_proto *tp, - struct cls_fl_filter *f, struct fl_flow_mask *mask, - unsigned long base, struct nlattr **tb, - struct nlattr *est, - struct fl_flow_tmplt *tmplt, - u32 flags, u32 fl_flags, - struct netlink_ext_ack *extack) +static bool fl_needs_tc_skb_ext(const struct fl_flow_key *mask) { - int err; - - err = tcf_exts_validate_ex(net, tp, tb, est, &f->exts, flags, - fl_flags, extack); - if (err < 0) - return err; - - if (tb[TCA_FLOWER_CLASSID]) { - f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]); - if (flags & TCA_ACT_FLAGS_NO_RTNL) - rtnl_lock(); - tcf_bind_filter(tp, &f->res, base); - if (flags & TCA_ACT_FLAGS_NO_RTNL) - rtnl_unlock(); - } - - err = fl_set_key(net, tb, &f->key, &mask->key, extack); - if (err) - return err; - - fl_mask_update_range(mask); - fl_set_masked_key(&f->mkey, &f->key, mask); - - if (!fl_mask_fits_tmplt(tmplt, mask)) { - NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template"); - return -EINVAL; - } - - return 0; + return mask->meta.l2_miss; } static int fl_ht_insert_unique(struct cls_fl_filter *fnew, @@ -2150,14 +2362,16 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, { struct cls_fl_head *head = fl_head_dereference(tp); bool rtnl_held = !(flags & TCA_ACT_FLAGS_NO_RTNL); + struct nlattr *tca_opts = tca[TCA_OPTIONS]; struct cls_fl_filter *fold = *arg; + bool bound_to_filter = false; struct cls_fl_filter *fnew; struct fl_flow_mask *mask; struct nlattr **tb; bool in_ht; int err; - if (!tca[TCA_OPTIONS]) { + if (!tca_opts) { err = -EINVAL; goto errout_fold; } @@ -2175,7 +2389,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, } err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX, - tca[TCA_OPTIONS], fl_policy, NULL); + tca_opts, fl_policy, NULL); if (err < 0) goto errout_tb; @@ -2192,28 +2406,89 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, INIT_LIST_HEAD(&fnew->hw_list); refcount_set(&fnew->refcnt, 1); - err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0); - if (err < 0) - goto errout; - if (tb[TCA_FLOWER_FLAGS]) { fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]); if (!tc_flags_valid(fnew->flags)) { + kfree(fnew); err = -EINVAL; - goto errout; + goto errout_tb; } } - err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], - tp->chain->tmplt_priv, flags, fnew->flags, - extack); + if (!fold) { + spin_lock(&tp->lock); + if (!handle) { + handle = 1; + err = idr_alloc_u32(&head->handle_idr, NULL, &handle, + INT_MAX, GFP_ATOMIC); + } else { + err = idr_alloc_u32(&head->handle_idr, NULL, &handle, + handle, GFP_ATOMIC); + + /* Filter with specified handle was concurrently + * inserted after initial check in cls_api. This is not + * necessarily an error if NLM_F_EXCL is not set in + * message flags. Returning EAGAIN will cause cls_api to + * try to update concurrently inserted rule. + */ + if (err == -ENOSPC) + err = -EAGAIN; + } + spin_unlock(&tp->lock); + + if (err) { + kfree(fnew); + goto errout_tb; + } + } + fnew->handle = handle; + + err = tcf_exts_init_ex(&fnew->exts, net, TCA_FLOWER_ACT, 0, tp, handle, + !tc_skip_hw(fnew->flags)); + if (err < 0) + goto errout_idr; + + err = tcf_exts_validate_ex(net, tp, tb, tca[TCA_RATE], + &fnew->exts, flags, fnew->flags, + extack); + if (err < 0) + goto errout_idr; + + if (tb[TCA_FLOWER_CLASSID]) { + fnew->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]); + if (flags & TCA_ACT_FLAGS_NO_RTNL) + rtnl_lock(); + tcf_bind_filter(tp, &fnew->res, base); + if (flags & TCA_ACT_FLAGS_NO_RTNL) + rtnl_unlock(); + bound_to_filter = true; + } + + err = fl_set_key(net, tca_opts, tb, &fnew->key, &mask->key, extack); if (err) - goto errout; + goto unbind_filter; + + fl_mask_update_range(mask); + fl_set_masked_key(&fnew->mkey, &fnew->key, mask); + + if (!fl_mask_fits_tmplt(tp->chain->tmplt_priv, mask)) { + NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template"); + err = -EINVAL; + goto unbind_filter; + } + + /* Enable tc skb extension if filter matches on data extracted from + * this extension. + */ + if (fl_needs_tc_skb_ext(&mask->key)) { + fnew->needs_tc_skb_ext = 1; + tc_skb_ext_tc_enable(); + } err = fl_check_assign_mask(head, fnew, fold, mask); if (err) - goto errout; + goto unbind_filter; err = fl_ht_insert_unique(fnew, fold, &in_ht); if (err) @@ -2228,6 +2503,8 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, if (!tc_in_hw(fnew->flags)) fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW; + tcf_proto_update_usesw(tp, fnew->flags); + spin_lock(&tp->lock); /* tp was deleted concurrently. -EAGAIN will cause caller to lookup @@ -2279,29 +2556,9 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, refcount_dec(&fold->refcnt); __fl_put(fold); } else { - if (handle) { - /* user specifies a handle and it doesn't exist */ - err = idr_alloc_u32(&head->handle_idr, fnew, &handle, - handle, GFP_ATOMIC); - - /* Filter with specified handle was concurrently - * inserted after initial check in cls_api. This is not - * necessarily an error if NLM_F_EXCL is not set in - * message flags. Returning EAGAIN will cause cls_api to - * try to update concurrently inserted rule. - */ - if (err == -ENOSPC) - err = -EAGAIN; - } else { - handle = 1; - err = idr_alloc_u32(&head->handle_idr, fnew, &handle, - INT_MAX, GFP_ATOMIC); - } - if (err) - goto errout_hw; + idr_replace(&head->handle_idr, fnew, fnew->handle); refcount_inc(&fnew->refcnt); - fnew->handle = handle; list_add_tail_rcu(&fnew->list, &fnew->mask->filters); spin_unlock(&tp->lock); } @@ -2324,7 +2581,22 @@ errout_hw: fnew->mask->filter_ht_params); errout_mask: fl_mask_put(head, fnew->mask); -errout: + +unbind_filter: + if (bound_to_filter) { + if (flags & TCA_ACT_FLAGS_NO_RTNL) + rtnl_lock(); + tcf_unbind_filter(tp, &fnew->res); + if (flags & TCA_ACT_FLAGS_NO_RTNL) + rtnl_unlock(); + } + +errout_idr: + if (!fold) { + spin_lock(&tp->lock); + idr_remove(&head->handle_idr, fnew->handle); + spin_unlock(&tp->lock); + } __fl_put(fnew); errout_tb: kfree(tb); @@ -2363,7 +2635,7 @@ static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg, rcu_read_lock(); idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) { /* don't return filters that are being deleted */ - if (!refcount_inc_not_zero(&f->refcnt)) + if (!f || !refcount_inc_not_zero(&f->refcnt)) continue; rcu_read_unlock(); @@ -2535,18 +2807,19 @@ static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain, struct nlattr **tca, struct netlink_ext_ack *extack) { + struct nlattr *tca_opts = tca[TCA_OPTIONS]; struct fl_flow_tmplt *tmplt; struct nlattr **tb; int err; - if (!tca[TCA_OPTIONS]) + if (!tca_opts) return ERR_PTR(-EINVAL); tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL); if (!tb) return ERR_PTR(-ENOBUFS); err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX, - tca[TCA_OPTIONS], fl_policy, NULL); + tca_opts, fl_policy, NULL); if (err) goto errout_tb; @@ -2556,7 +2829,8 @@ static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain, goto errout_tb; } tmplt->chain = chain; - err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack); + err = fl_set_key(net, tca_opts, tb, &tmplt->dummy_key, + &tmplt->mask, extack); if (err) goto errout_tmplt; @@ -2584,6 +2858,28 @@ static void fl_tmplt_destroy(void *tmplt_priv) kfree(tmplt); } +static void fl_tmplt_reoffload(struct tcf_chain *chain, bool add, + flow_setup_cb_t *cb, void *cb_priv) +{ + struct fl_flow_tmplt *tmplt = chain->tmplt_priv; + struct flow_cls_offload cls_flower = {}; + + cls_flower.rule = flow_rule_alloc(0); + if (!cls_flower.rule) + return; + + cls_flower.common.chain_index = chain->index; + cls_flower.command = add ? FLOW_CLS_TMPLT_CREATE : + FLOW_CLS_TMPLT_DESTROY; + cls_flower.cookie = (unsigned long) tmplt; + cls_flower.rule->match.dissector = &tmplt->dissector; + cls_flower.rule->match.mask = &tmplt->mask; + cls_flower.rule->match.key = &tmplt->dummy_key; + + cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv); + kfree(cls_flower.rule); +} + static int fl_dump_key_val(struct sk_buff *skb, void *val, int val_type, void *mask, int mask_type, int len) @@ -2810,12 +3106,22 @@ static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask, } } -static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask) +static int fl_dump_key_flags(struct sk_buff *skb, bool encap, + u32 flags_key, u32 flags_mask) { - u32 key, mask; + int fl_key, fl_mask; __be32 _key, _mask; + u32 key, mask; int err; + if (encap) { + fl_key = TCA_FLOWER_KEY_ENC_FLAGS; + fl_mask = TCA_FLOWER_KEY_ENC_FLAGS_MASK; + } else { + fl_key = TCA_FLOWER_KEY_FLAGS; + fl_mask = TCA_FLOWER_KEY_FLAGS_MASK; + } + if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask))) return 0; @@ -2828,14 +3134,29 @@ static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask) TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST, FLOW_DIS_FIRST_FRAG); + fl_get_key_flag(flags_key, flags_mask, &key, &mask, + TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM, + FLOW_DIS_F_TUNNEL_CSUM); + + fl_get_key_flag(flags_key, flags_mask, &key, &mask, + TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT, + FLOW_DIS_F_TUNNEL_DONT_FRAGMENT); + + fl_get_key_flag(flags_key, flags_mask, &key, &mask, + TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM, FLOW_DIS_F_TUNNEL_OAM); + + fl_get_key_flag(flags_key, flags_mask, &key, &mask, + TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT, + FLOW_DIS_F_TUNNEL_CRIT_OPT); + _key = cpu_to_be32(key); _mask = cpu_to_be32(mask); - err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key); + err = nla_put(skb, fl_key, 4, &_key); if (err) return err; - return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask); + return nla_put(skb, fl_mask, 4, &_mask); } static int fl_dump_key_geneve_opt(struct sk_buff *skb, @@ -2955,6 +3276,32 @@ nla_put_failure: return -EMSGSIZE; } +static int fl_dump_key_pfcp_opt(struct sk_buff *skb, + struct flow_dissector_key_enc_opts *enc_opts) +{ + struct pfcp_metadata *md; + struct nlattr *nest; + + nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_PFCP); + if (!nest) + goto nla_put_failure; + + md = (struct pfcp_metadata *)&enc_opts->data[0]; + if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE, md->type)) + goto nla_put_failure; + + if (nla_put_be64(skb, TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID, + md->seid, 0)) + goto nla_put_failure; + + nla_nest_end(skb, nest); + return 0; + +nla_put_failure: + nla_nest_cancel(skb, nest); + return -EMSGSIZE; +} + static int fl_dump_key_ct(struct sk_buff *skb, struct flow_dissector_key_ct *key, struct flow_dissector_key_ct *mask) @@ -2989,6 +3336,43 @@ nla_put_failure: return -EMSGSIZE; } +static int fl_dump_key_cfm(struct sk_buff *skb, + struct flow_dissector_key_cfm *key, + struct flow_dissector_key_cfm *mask) +{ + struct nlattr *opts; + int err; + u8 mdl; + + if (!memchr_inv(mask, 0, sizeof(*mask))) + return 0; + + opts = nla_nest_start(skb, TCA_FLOWER_KEY_CFM); + if (!opts) + return -EMSGSIZE; + + if (FIELD_GET(FLOW_DIS_CFM_MDL_MASK, mask->mdl_ver)) { + mdl = FIELD_GET(FLOW_DIS_CFM_MDL_MASK, key->mdl_ver); + err = nla_put_u8(skb, TCA_FLOWER_KEY_CFM_MD_LEVEL, mdl); + if (err) + goto err_cfm_opts; + } + + if (mask->opcode) { + err = nla_put_u8(skb, TCA_FLOWER_KEY_CFM_OPCODE, key->opcode); + if (err) + goto err_cfm_opts; + } + + nla_nest_end(skb, opts); + + return 0; + +err_cfm_opts: + nla_nest_cancel(skb, opts); + return err; +} + static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type, struct flow_dissector_key_enc_opts *enc_opts) { @@ -3003,26 +3387,31 @@ static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type, goto nla_put_failure; switch (enc_opts->dst_opt_type) { - case TUNNEL_GENEVE_OPT: + case IP_TUNNEL_GENEVE_OPT_BIT: err = fl_dump_key_geneve_opt(skb, enc_opts); if (err) goto nla_put_failure; break; - case TUNNEL_VXLAN_OPT: + case IP_TUNNEL_VXLAN_OPT_BIT: err = fl_dump_key_vxlan_opt(skb, enc_opts); if (err) goto nla_put_failure; break; - case TUNNEL_ERSPAN_OPT: + case IP_TUNNEL_ERSPAN_OPT_BIT: err = fl_dump_key_erspan_opt(skb, enc_opts); if (err) goto nla_put_failure; break; - case TUNNEL_GTP_OPT: + case IP_TUNNEL_GTP_OPT_BIT: err = fl_dump_key_gtp_opt(skb, enc_opts); if (err) goto nla_put_failure; break; + case IP_TUNNEL_PFCP_OPT_BIT: + err = fl_dump_key_pfcp_opt(skb, enc_opts); + if (err) + goto nla_put_failure; + break; default: goto nla_put_failure; } @@ -3058,6 +3447,11 @@ static int fl_dump_key(struct sk_buff *skb, struct net *net, goto nla_put_failure; } + if (fl_dump_key_val(skb, &key->meta.l2_miss, + TCA_FLOWER_L2_MISS, &mask->meta.l2_miss, + TCA_FLOWER_UNSPEC, sizeof(key->meta.l2_miss))) + goto nla_put_failure; + if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST, mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK, sizeof(key->eth.dst)) || @@ -3215,6 +3609,12 @@ static int fl_dump_key(struct sk_buff *skb, struct net *net, sizeof(key->l2tpv3.session_id))) goto nla_put_failure; + if (key->ipsec.spi && + fl_dump_key_val(skb, &key->ipsec.spi, TCA_FLOWER_KEY_SPI, + &mask->ipsec.spi, TCA_FLOWER_KEY_SPI_MASK, + sizeof(key->ipsec.spi))) + goto nla_put_failure; + if ((key->basic.ip_proto == IPPROTO_TCP || key->basic.ip_proto == IPPROTO_UDP || key->basic.ip_proto == IPPROTO_SCTP) && @@ -3263,7 +3663,8 @@ static int fl_dump_key(struct sk_buff *skb, struct net *net, if (fl_dump_key_ct(skb, &key->ct, &mask->ct)) goto nla_put_failure; - if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags)) + if (fl_dump_key_flags(skb, false, key->control.flags, + mask->control.flags)) goto nla_put_failure; if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH, @@ -3271,6 +3672,13 @@ static int fl_dump_key(struct sk_buff *skb, struct net *net, sizeof(key->hash.hash))) goto nla_put_failure; + if (fl_dump_key_cfm(skb, &key->cfm, &mask->cfm)) + goto nla_put_failure; + + if (fl_dump_key_flags(skb, true, key->enc_control.flags, + mask->enc_control.flags)) + goto nla_put_failure; + return 0; nla_put_failure: @@ -3440,10 +3848,13 @@ static struct tcf_proto_ops cls_fl_ops __read_mostly = { .bind_class = fl_bind_class, .tmplt_create = fl_tmplt_create, .tmplt_destroy = fl_tmplt_destroy, + .tmplt_reoffload = fl_tmplt_reoffload, .tmplt_dump = fl_tmplt_dump, + .get_exts = fl_get_exts, .owner = THIS_MODULE, .flags = TCF_PROTO_OPS_DOIT_UNLOCKED, }; +MODULE_ALIAS_NET_CLS("flower"); static int __init cls_fl_init(void) { |
