summaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
authorAhmed S. Darwish <a.darwish@linutronix.de>2021-10-16 10:49:07 +0200
committerDavid S. Miller <davem@davemloft.net>2021-10-18 12:54:41 +0100
commit67c9e6270f3013e4d86ec57c4e7f27459f2a0652 (patch)
tree69cab1b15c62cd3dd449820ceebdd18107114c68 /net/core
parentf2efdb17928924c9c935c136dea764a081032006 (diff)
net: sched: Protect Qdisc::bstats with u64_stats
The not-per-CPU variant of qdisc tc (traffic control) statistics, Qdisc::gnet_stats_basic_packed bstats, is protected with Qdisc::running sequence counter. This sequence counter is used for reliably protecting bstats reads from parallel writes. Meanwhile, the seqcount's write section covers a much wider area than bstats update: qdisc_run_begin() => qdisc_run_end(). That read/write section asymmetry can lead to needless retries of the read section. To prepare for removing the Qdisc::running sequence counter altogether, introduce a u64_stats sync point inside bstats instead. Modify _bstats_update() to start/end the bstats u64_stats write section. For bisectability, and finer commits granularity, the bstats read section is still protected with a Qdisc::running read/retry loop and qdisc_run_begin/end() still starts/ends that seqcount write section. Once all call sites are modified to use _bstats_update(), the Qdisc::running seqcount will be removed and bstats read/retry loop will be modified to utilize the internal u64_stats sync point. Note, using u64_stats implies no sequence counter protection for 64-bit architectures. This can lead to the statistics "packets" vs. "bytes" values getting out of sync on rare occasions. The individual values will still be valid. [bigeasy: Minor commit message edits, init all gnet_stats_basic_packed.] Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/gen_estimator.c2
-rw-r--r--net/core/gen_stats.c14
2 files changed, 13 insertions, 3 deletions
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 205df8b5116e..64978e77368f 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -62,7 +62,7 @@ struct net_rate_estimator {
static void est_fetch_counters(struct net_rate_estimator *e,
struct gnet_stats_basic_packed *b)
{
- memset(b, 0, sizeof(*b));
+ gnet_stats_basic_packed_init(b);
if (e->stats_lock)
spin_lock(e->stats_lock);
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index 6ec11289140b..f2e12fe7112b 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -18,7 +18,7 @@
#include <linux/gen_stats.h>
#include <net/netlink.h>
#include <net/gen_stats.h>
-
+#include <net/sch_generic.h>
static inline int
gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size, int padattr)
@@ -114,6 +114,15 @@ gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
}
EXPORT_SYMBOL(gnet_stats_start_copy);
+/* Must not be inlined, due to u64_stats seqcount_t lockdep key */
+void gnet_stats_basic_packed_init(struct gnet_stats_basic_packed *b)
+{
+ b->bytes = 0;
+ b->packets = 0;
+ u64_stats_init(&b->syncp);
+}
+EXPORT_SYMBOL(gnet_stats_basic_packed_init);
+
static void gnet_stats_add_basic_cpu(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu)
{
@@ -167,8 +176,9 @@ ___gnet_stats_copy_basic(const seqcount_t *running,
struct gnet_stats_basic_packed *b,
int type)
{
- struct gnet_stats_basic_packed bstats = {0};
+ struct gnet_stats_basic_packed bstats;
+ gnet_stats_basic_packed_init(&bstats);
gnet_stats_add_basic(running, &bstats, cpu, b);
if (d->compat_tc_stats && type == TCA_STATS_BASIC) {