summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJose Fernandez <josef@netflix.com>2024-04-01 21:40:10 -0600
committerDaniel Borkmann <daniel@iogearbox.net>2024-04-02 16:51:15 +0200
commitce09cbdd988887662546a1175bcfdfc6c8fdd150 (patch)
tree13d5602cfc7f3f1463cb88f05adbecd2a6709b3c
parentc186ed12a8ec498532d13de43094bdec9ac6f121 (diff)
bpf: Improve program stats run-time calculation
This patch improves the run-time calculation for program stats by capturing the duration as soon as possible after the program returns. Previously, the duration included u64_stats_t operations. While the instrumentation overhead is part of the total time spent when stats are enabled, distinguishing between the program's native execution time and the time spent due to instrumentation is crucial for accurate performance analysis. By making this change, the patch facilitates more precise optimization of BPF programs, enabling users to understand their performance in environments without stats enabled. I used a virtualized environment to measure the run-time over one minute for a basic raw_tracepoint/sys_enter program, which just increments a local counter. Although the virtualization introduced some performance degradation that could affect the results, I observed approximately a 16% decrease in average run-time reported by stats with this change (310 -> 260 nsec). Signed-off-by: Jose Fernandez <josef@netflix.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Daniel Borkmann <daniel@iogearbox.net> Link: https://lore.kernel.org/bpf/20240402034010.25060-1-josef@netflix.com
-rw-r--r--include/linux/filter.h6
-rw-r--r--kernel/bpf/trampoline.c3
2 files changed, 6 insertions, 3 deletions
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 44934b968b57..531b36090122 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -654,14 +654,16 @@ static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog,
cant_migrate();
if (static_branch_unlikely(&bpf_stats_enabled_key)) {
struct bpf_prog_stats *stats;
- u64 start = sched_clock();
+ u64 duration, start = sched_clock();
unsigned long flags;
ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
+
+ duration = sched_clock() - start;
stats = this_cpu_ptr(prog->stats);
flags = u64_stats_update_begin_irqsave(&stats->syncp);
u64_stats_inc(&stats->cnt);
- u64_stats_add(&stats->nsecs, sched_clock() - start);
+ u64_stats_add(&stats->nsecs, duration);
u64_stats_update_end_irqrestore(&stats->syncp, flags);
} else {
ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index cc50607f8d8c..26ae703d3c3b 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -885,12 +885,13 @@ static void notrace update_prog_stats(struct bpf_prog *prog,
* Hence check that 'start' is valid.
*/
start > NO_START_TIME) {
+ u64 duration = sched_clock() - start;
unsigned long flags;
stats = this_cpu_ptr(prog->stats);
flags = u64_stats_update_begin_irqsave(&stats->syncp);
u64_stats_inc(&stats->cnt);
- u64_stats_add(&stats->nsecs, sched_clock() - start);
+ u64_stats_add(&stats->nsecs, duration);
u64_stats_update_end_irqrestore(&stats->syncp, flags);
}
}