summaryrefslogtreecommitdiff
path: root/net/sched/cls_flower.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched/cls_flower.c')
-rw-r--r--net/sched/cls_flower.c105
1 files changed, 47 insertions, 58 deletions
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index d230cb4c8094..543a3e875d05 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -87,7 +87,10 @@ struct cls_fl_filter {
struct list_head list;
u32 handle;
u32 flags;
- struct rcu_head rcu;
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
struct net_device *hw_dev;
};
@@ -152,37 +155,12 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct cls_fl_filter *f;
struct fl_flow_key skb_key;
struct fl_flow_key skb_mkey;
- struct ip_tunnel_info *info;
if (!atomic_read(&head->ht.nelems))
return -1;
fl_clear_masked_range(&skb_key, &head->mask);
- info = skb_tunnel_info(skb);
- if (info) {
- struct ip_tunnel_key *key = &info->key;
-
- switch (ip_tunnel_info_af(info)) {
- case AF_INET:
- skb_key.enc_control.addr_type =
- FLOW_DISSECTOR_KEY_IPV4_ADDRS;
- skb_key.enc_ipv4.src = key->u.ipv4.src;
- skb_key.enc_ipv4.dst = key->u.ipv4.dst;
- break;
- case AF_INET6:
- skb_key.enc_control.addr_type =
- FLOW_DISSECTOR_KEY_IPV6_ADDRS;
- skb_key.enc_ipv6.src = key->u.ipv6.src;
- skb_key.enc_ipv6.dst = key->u.ipv6.dst;
- break;
- }
-
- skb_key.enc_key_id.keyid = tunnel_id_to_key32(key->tun_id);
- skb_key.enc_tp.src = key->tp_src;
- skb_key.enc_tp.dst = key->tp_dst;
- }
-
skb_key.indev_ifindex = skb->skb_iif;
/* skb_flow_dissect() does not set n_proto in case an unknown protocol,
* so do it rather here.
@@ -215,27 +193,41 @@ static int fl_init(struct tcf_proto *tp)
return 0;
}
+static void __fl_destroy_filter(struct cls_fl_filter *f)
+{
+ tcf_exts_destroy(&f->exts);
+ tcf_exts_put_net(&f->exts);
+ kfree(f);
+}
+
+static void fl_destroy_filter_work(struct work_struct *work)
+{
+ struct cls_fl_filter *f = container_of(work, struct cls_fl_filter, work);
+
+ rtnl_lock();
+ __fl_destroy_filter(f);
+ rtnl_unlock();
+}
+
static void fl_destroy_filter(struct rcu_head *head)
{
struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
- tcf_exts_destroy(&f->exts);
- kfree(f);
+ INIT_WORK(&f->work, fl_destroy_filter_work);
+ tcf_queue_work(&f->work);
}
static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f)
{
struct tc_cls_flower_offload cls_flower = {};
- struct net_device *dev = f->hw_dev;
-
- if (!tc_can_offload(dev))
- return;
+ struct tcf_block *block = tp->chain->block;
tc_cls_common_offload_init(&cls_flower.common, tp);
cls_flower.command = TC_CLSFLOWER_DESTROY;
cls_flower.cookie = (unsigned long) f;
- dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, &cls_flower);
+ tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER,
+ &cls_flower, false);
}
static int fl_hw_replace_filter(struct tcf_proto *tp,
@@ -243,22 +235,11 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
struct fl_flow_key *mask,
struct cls_fl_filter *f)
{
- struct net_device *dev = tp->q->dev_queue->dev;
struct tc_cls_flower_offload cls_flower = {};
+ struct tcf_block *block = tp->chain->block;
+ bool skip_sw = tc_skip_sw(f->flags);
int err;
- if (!tc_can_offload(dev)) {
- if (tcf_exts_get_dev(dev, &f->exts, &f->hw_dev) ||
- (f->hw_dev && !tc_can_offload(f->hw_dev))) {
- f->hw_dev = dev;
- return tc_skip_sw(f->flags) ? -EINVAL : 0;
- }
- dev = f->hw_dev;
- cls_flower.egress_dev = true;
- } else {
- f->hw_dev = dev;
- }
-
tc_cls_common_offload_init(&cls_flower.common, tp);
cls_flower.command = TC_CLSFLOWER_REPLACE;
cls_flower.cookie = (unsigned long) f;
@@ -266,32 +247,36 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
cls_flower.mask = mask;
cls_flower.key = &f->mkey;
cls_flower.exts = &f->exts;
+ cls_flower.classid = f->res.classid;
- err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER,
- &cls_flower);
- if (!err)
+ err = tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER,
+ &cls_flower, skip_sw);
+ if (err < 0) {
+ fl_hw_destroy_filter(tp, f);
+ return err;
+ } else if (err > 0) {
f->flags |= TCA_CLS_FLAGS_IN_HW;
+ }
+
+ if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
+ return -EINVAL;
- if (tc_skip_sw(f->flags))
- return err;
return 0;
}
static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
{
struct tc_cls_flower_offload cls_flower = {};
- struct net_device *dev = f->hw_dev;
-
- if (!tc_can_offload(dev))
- return;
+ struct tcf_block *block = tp->chain->block;
tc_cls_common_offload_init(&cls_flower.common, tp);
cls_flower.command = TC_CLSFLOWER_STATS;
cls_flower.cookie = (unsigned long) f;
cls_flower.exts = &f->exts;
+ cls_flower.classid = f->res.classid;
- dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER,
- &cls_flower);
+ tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER,
+ &cls_flower, false);
}
static void __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f)
@@ -303,7 +288,10 @@ static void __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f)
if (!tc_skip_hw(f->flags))
fl_hw_destroy_filter(tp, f);
tcf_unbind_filter(tp, &f->res);
- call_rcu(&f->rcu, fl_destroy_filter);
+ if (tcf_exts_get_net(&f->exts))
+ call_rcu(&f->rcu, fl_destroy_filter);
+ else
+ __fl_destroy_filter(f);
}
static void fl_destroy_sleepable(struct work_struct *work)
@@ -973,6 +961,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
idr_replace_ext(&head->handle_idr, fnew, fnew->handle);
list_replace_rcu(&fold->list, &fnew->list);
tcf_unbind_filter(tp, &fold->res);
+ tcf_exts_get_net(&fold->exts);
call_rcu(&fold->rcu, fl_destroy_filter);
} else {
list_add_tail_rcu(&fnew->list, &head->filters);