summaryrefslogtreecommitdiff
path: root/include/linux/filter.h
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2021-10-26 14:41:33 -0700
committerAlexei Starovoitov <ast@kernel.org>2021-10-27 11:13:52 -0700
commit61a0abaee2092eee69e44fe60336aa2f5b578938 (patch)
tree5ee181827d5064079aaa77ce0e3523a70daf1e86 /include/linux/filter.h
parentd979617aa84d96acca44c2f5778892b4565e322f (diff)
bpf: Use u64_stats_t in struct bpf_prog_stats
Commit 316580b69d0a ("u64_stats: provide u64_stats_t type") fixed possible load/store tearing on 64bit arches. For instance the following C code stats->nsecs += sched_clock() - start; Could be rightfully implemented like this by a compiler, confusing concurrent readers a lot: stats->nsecs += sched_clock(); // arbitrary delay stats->nsecs -= start; Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20211026214133.3114279-4-eric.dumazet@gmail.com
Diffstat (limited to 'include/linux/filter.h')
-rw-r--r--include/linux/filter.h10
1 files changed, 5 insertions, 5 deletions
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 2fffe9cc50f9..9782e3245852 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -553,9 +553,9 @@ struct bpf_binary_header {
};
struct bpf_prog_stats {
- u64 cnt;
- u64 nsecs;
- u64 misses;
+ u64_stats_t cnt;
+ u64_stats_t nsecs;
+ u64_stats_t misses;
struct u64_stats_sync syncp;
} __aligned(2 * sizeof(u64));
@@ -617,8 +617,8 @@ static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog,
ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
stats = this_cpu_ptr(prog->stats);
flags = u64_stats_update_begin_irqsave(&stats->syncp);
- stats->cnt++;
- stats->nsecs += sched_clock() - start;
+ u64_stats_inc(&stats->cnt);
+ u64_stats_add(&stats->nsecs, sched_clock() - start);
u64_stats_update_end_irqrestore(&stats->syncp, flags);
} else {
ret = dfunc(ctx, prog->insnsi, prog->bpf_func);