summaryrefslogtreecommitdiff
path: root/net/sched
diff options
context:
space:
mode:
authorXin Long <lucien.xin@gmail.com>2025-01-15 09:27:54 -0500
committerDavid S. Miller <davem@davemloft.net>2025-01-20 09:21:27 +0000
commita12c76a03386e32413ae8eaaefa337e491880632 (patch)
tree2d251a5f78836e97a80c6fbb714e844f0d96c7a5 /net/sched
parent59372af69d4d71e6487614f1b35712cf241eadb4 (diff)
net: sched: refine software bypass handling in tc_run
This patch addresses issues with filter counting in block (tcf_block), particularly for software bypass scenarios, by introducing a more accurate mechanism using useswcnt. Previously, filtercnt and skipswcnt were introduced by: Commit 2081fd3445fe ("net: sched: cls_api: add filter counter") and Commit f631ef39d819 ("net: sched: cls_api: add skip_sw counter") filtercnt tracked all tp (tcf_proto) objects added to a block, and skipswcnt counted tp objects with the skipsw attribute set. The problem is: a single tp can contain multiple filters, some with skipsw and others without. The current implementation fails in the case: When the first filter in a tp has skipsw, both skipswcnt and filtercnt are incremented, then adding a second filter without skipsw to the same tp does not modify these counters because tp->counted is already set. This results in bypass software behavior based solely on skipswcnt equaling filtercnt, even when the block includes filters without skipsw. Consequently, filters without skipsw are inadvertently bypassed. To address this, the patch introduces useswcnt in block to explicitly count tp objects containing at least one filter without skipsw. Key changes include: Whenever a filter without skipsw is added, its tp is marked with usesw and counted in useswcnt. tc_run() now uses useswcnt to determine software bypass, eliminating reliance on filtercnt and skipswcnt. This refined approach prevents software bypass for blocks containing mixed filters, ensuring correct behavior in tc_run(). Additionally, as atomic operations on useswcnt ensure thread safety and tp->lock guards access to tp->usesw and tp->counted, the broader lock down_write(&block->cb_lock) is no longer required in tc_new_tfilter(), and this resolves a performance regression caused by the filter counting mechanism during parallel filter insertions. The improvement can be demonstrated using the following script: # cat insert_tc_rules.sh tc qdisc add dev ens1f0np0 ingress for i in $(seq 16); do taskset -c $i tc -b rules_$i.txt & done wait Each of rules_$i.txt files above includes 100000 tc filter rules to a mlx5 driver NIC ens1f0np0. Without this patch: # time sh insert_tc_rules.sh real 0m50.780s user 0m23.556s sys 4m13.032s With this patch: # time sh insert_tc_rules.sh real 0m17.718s user 0m7.807s sys 3m45.050s Fixes: 047f340b36fc ("net: sched: make skip_sw actually skip software") Reported-by: Shuang Li <shuali@redhat.com> Signed-off-by: Xin Long <lucien.xin@gmail.com> Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com> Reviewed-by: Asbjørn Sloth Tønnesen <ast@fiberby.net> Tested-by: Asbjørn Sloth Tønnesen <ast@fiberby.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/cls_api.c57
-rw-r--r--net/sched/cls_bpf.c2
-rw-r--r--net/sched/cls_flower.c2
-rw-r--r--net/sched/cls_matchall.c2
-rw-r--r--net/sched/cls_u32.c4
5 files changed, 33 insertions, 34 deletions
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 7578e27260c9..8e47e5355be6 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -390,6 +390,7 @@ static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
tp->protocol = protocol;
tp->prio = prio;
tp->chain = chain;
+ tp->usesw = !tp->ops->reoffload;
spin_lock_init(&tp->lock);
refcount_set(&tp->refcnt, 1);
@@ -410,39 +411,31 @@ static void tcf_proto_get(struct tcf_proto *tp)
refcount_inc(&tp->refcnt);
}
-static void tcf_maintain_bypass(struct tcf_block *block)
+static void tcf_proto_count_usesw(struct tcf_proto *tp, bool add)
{
- int filtercnt = atomic_read(&block->filtercnt);
- int skipswcnt = atomic_read(&block->skipswcnt);
- bool bypass_wanted = filtercnt > 0 && filtercnt == skipswcnt;
-
- if (bypass_wanted != block->bypass_wanted) {
#ifdef CONFIG_NET_CLS_ACT
- if (bypass_wanted)
- static_branch_inc(&tcf_bypass_check_needed_key);
- else
- static_branch_dec(&tcf_bypass_check_needed_key);
-#endif
- block->bypass_wanted = bypass_wanted;
+ struct tcf_block *block = tp->chain->block;
+ bool counted = false;
+
+ if (!add) {
+ if (tp->usesw && tp->counted) {
+ if (!atomic_dec_return(&block->useswcnt))
+ static_branch_dec(&tcf_sw_enabled_key);
+ tp->counted = false;
+ }
+ return;
}
-}
-
-static void tcf_block_filter_cnt_update(struct tcf_block *block, bool *counted, bool add)
-{
- lockdep_assert_not_held(&block->cb_lock);
- down_write(&block->cb_lock);
- if (*counted != add) {
- if (add) {
- atomic_inc(&block->filtercnt);
- *counted = true;
- } else {
- atomic_dec(&block->filtercnt);
- *counted = false;
- }
+ spin_lock(&tp->lock);
+ if (tp->usesw && !tp->counted) {
+ counted = true;
+ tp->counted = true;
}
- tcf_maintain_bypass(block);
- up_write(&block->cb_lock);
+ spin_unlock(&tp->lock);
+
+ if (counted && atomic_inc_return(&block->useswcnt) == 1)
+ static_branch_inc(&tcf_sw_enabled_key);
+#endif
}
static void tcf_chain_put(struct tcf_chain *chain);
@@ -451,7 +444,7 @@ static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
bool sig_destroy, struct netlink_ext_ack *extack)
{
tp->ops->destroy(tp, rtnl_held, extack);
- tcf_block_filter_cnt_update(tp->chain->block, &tp->counted, false);
+ tcf_proto_count_usesw(tp, false);
if (sig_destroy)
tcf_proto_signal_destroyed(tp->chain, tp);
tcf_chain_put(tp->chain);
@@ -2409,7 +2402,7 @@ replay:
tfilter_notify(net, skb, n, tp, block, q, parent, fh,
RTM_NEWTFILTER, false, rtnl_held, extack);
tfilter_put(tp, fh);
- tcf_block_filter_cnt_update(block, &tp->counted, true);
+ tcf_proto_count_usesw(tp, true);
/* q pointer is NULL for shared blocks */
if (q)
q->flags &= ~TCQ_F_CAN_BYPASS;
@@ -3532,8 +3525,6 @@ static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
if (*flags & TCA_CLS_FLAGS_IN_HW)
return;
*flags |= TCA_CLS_FLAGS_IN_HW;
- if (tc_skip_sw(*flags))
- atomic_inc(&block->skipswcnt);
atomic_inc(&block->offloadcnt);
}
@@ -3542,8 +3533,6 @@ static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
if (!(*flags & TCA_CLS_FLAGS_IN_HW))
return;
*flags &= ~TCA_CLS_FLAGS_IN_HW;
- if (tc_skip_sw(*flags))
- atomic_dec(&block->skipswcnt);
atomic_dec(&block->offloadcnt);
}
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 1941ebec23ff..7fbe42f0e5c2 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -509,6 +509,8 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
if (!tc_in_hw(prog->gen_flags))
prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
+ tcf_proto_update_usesw(tp, prog->gen_flags);
+
if (oldprog) {
idr_replace(&head->handle_idr, prog, handle);
list_replace_rcu(&oldprog->link, &prog->link);
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 1008ec8a464c..03505673d523 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -2503,6 +2503,8 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
if (!tc_in_hw(fnew->flags))
fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
+ tcf_proto_update_usesw(tp, fnew->flags);
+
spin_lock(&tp->lock);
/* tp was deleted concurrently. -EAGAIN will cause caller to lookup
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index 9f1e62ca508d..f03bf5da39ee 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -228,6 +228,8 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
if (!tc_in_hw(new->flags))
new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
+ tcf_proto_update_usesw(tp, new->flags);
+
*arg = head;
rcu_assign_pointer(tp->root, new);
return 0;
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index d3a03c57545b..2a1c00048fd6 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -951,6 +951,8 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
if (!tc_in_hw(new->flags))
new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
+ tcf_proto_update_usesw(tp, new->flags);
+
u32_replace_knode(tp, tp_c, new);
tcf_unbind_filter(tp, &n->res);
tcf_exts_get_net(&n->exts);
@@ -1164,6 +1166,8 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
if (!tc_in_hw(n->flags))
n->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
+ tcf_proto_update_usesw(tp, n->flags);
+
ins = &ht->ht[TC_U32_HASH(handle)];
for (pins = rtnl_dereference(*ins); pins;
ins = &pins->next, pins = rtnl_dereference(*ins))