diff options
author | Alexei Starovoitov <ast@kernel.org> | 2025-07-03 19:31:30 -0700 |
---|---|---|
committer | Alexei Starovoitov <ast@kernel.org> | 2025-07-03 19:31:30 -0700 |
commit | 03fe01ddd1d8be7799419ea5e5f228a0186ae8c2 (patch) | |
tree | 71b23ffbe8482b3edd886d34025022b164df8a01 | |
parent | 032547272eb0884470165e6a8fd73b80688e847f (diff) | |
parent | 82bc4abf28d8147dd5da9ba52f0aa1bac23c125e (diff) |
Merge branch 'bpf-reduce-verifier-stack-frame-size'
Yonghong Song says:
====================
bpf: Reduce verifier stack frame size
Arnd Bergmann reported an issue ([1]) where clang compiler (less than
llvm18) may trigger an error where the stack frame size exceeds the limit.
I can reproduce the error like below:
kernel/bpf/verifier.c:24491:5: error: stack frame size (2552) exceeds limit (1280) in 'bpf_check'
[-Werror,-Wframe-larger-than]
kernel/bpf/verifier.c:19921:12: error: stack frame size (1368) exceeds limit (1280) in 'do_check'
[-Werror,-Wframe-larger-than]
This patch series fixed the above two errors by reducing stack size.
See each individual patches for details.
[1] https://lore.kernel.org/bpf/20250620113846.3950478-1-arnd@kernel.org/
Changelogs:
v2 -> v3:
- v2: https://lore.kernel.org/bpf/20250702171134.2370432-1-yonghong.song@linux.dev/
- Rename env->callchain to env->callchain_buf so it is clear that
- env->callchain_buf is used for a temp buf.
v1 -> v2:
- v1: https://lore.kernel.org/bpf/20250702053332.1991516-1-yonghong.song@linux.dev/
- Simplify assignment to struct bpf_insn pointer in do_misc_fixups().
- Restore original implementation in opt_hard_wire_dead_code_branches()
as only one insn on the stack.
- Avoid unnecessary insns for 64bit modulo (mod 0/-1) operations.
====================
Link: https://patch.msgid.link/20250703141101.1482025-1-yonghong.song@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-rw-r--r-- | include/linux/bpf_verifier.h | 1 | ||||
-rw-r--r-- | kernel/bpf/verifier.c | 230 |
2 files changed, 113 insertions, 118 deletions
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 7e459e839f8b..94defa405c85 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -841,6 +841,7 @@ struct bpf_verifier_env { char tmp_str_buf[TMP_STR_BUF_LEN]; struct bpf_insn insn_buf[INSN_BUF_SIZE]; struct bpf_insn epilogue_buf[INSN_BUF_SIZE]; + struct bpf_scc_callchain callchain_buf; /* array of pointers to bpf_scc_info indexed by SCC id */ struct bpf_scc_info **scc_info; u32 scc_cnt; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 29e0dddc22f8..0f6cc2275695 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1914,19 +1914,19 @@ static char *format_callchain(struct bpf_verifier_env *env, struct bpf_scc_callc */ static int maybe_enter_scc(struct bpf_verifier_env *env, struct bpf_verifier_state *st) { - struct bpf_scc_callchain callchain; + struct bpf_scc_callchain *callchain = &env->callchain_buf; struct bpf_scc_visit *visit; - if (!compute_scc_callchain(env, st, &callchain)) + if (!compute_scc_callchain(env, st, callchain)) return 0; - visit = scc_visit_lookup(env, &callchain); - visit = visit ?: scc_visit_alloc(env, &callchain); + visit = scc_visit_lookup(env, callchain); + visit = visit ?: scc_visit_alloc(env, callchain); if (!visit) return -ENOMEM; if (!visit->entry_state) { visit->entry_state = st; if (env->log.level & BPF_LOG_LEVEL2) - verbose(env, "SCC enter %s\n", format_callchain(env, &callchain)); + verbose(env, "SCC enter %s\n", format_callchain(env, callchain)); } return 0; } @@ -1939,21 +1939,21 @@ static int propagate_backedges(struct bpf_verifier_env *env, struct bpf_scc_visi */ static int maybe_exit_scc(struct bpf_verifier_env *env, struct bpf_verifier_state *st) { - struct bpf_scc_callchain callchain; + struct bpf_scc_callchain *callchain = &env->callchain_buf; struct bpf_scc_visit *visit; - if (!compute_scc_callchain(env, st, &callchain)) + if (!compute_scc_callchain(env, st, callchain)) return 0; - visit = scc_visit_lookup(env, &callchain); + visit = scc_visit_lookup(env, callchain); if (!visit) { verifier_bug(env, "scc exit: no visit info for call chain %s", - format_callchain(env, &callchain)); + format_callchain(env, callchain)); return -EFAULT; } if (visit->entry_state != st) return 0; if (env->log.level & BPF_LOG_LEVEL2) - verbose(env, "SCC exit %s\n", format_callchain(env, &callchain)); + verbose(env, "SCC exit %s\n", format_callchain(env, callchain)); visit->entry_state = NULL; env->num_backedges -= visit->num_backedges; visit->num_backedges = 0; @@ -1968,22 +1968,22 @@ static int add_scc_backedge(struct bpf_verifier_env *env, struct bpf_verifier_state *st, struct bpf_scc_backedge *backedge) { - struct bpf_scc_callchain callchain; + struct bpf_scc_callchain *callchain = &env->callchain_buf; struct bpf_scc_visit *visit; - if (!compute_scc_callchain(env, st, &callchain)) { + if (!compute_scc_callchain(env, st, callchain)) { verifier_bug(env, "add backedge: no SCC in verification path, insn_idx %d", st->insn_idx); return -EFAULT; } - visit = scc_visit_lookup(env, &callchain); + visit = scc_visit_lookup(env, callchain); if (!visit) { verifier_bug(env, "add backedge: no visit info for call chain %s", - format_callchain(env, &callchain)); + format_callchain(env, callchain)); return -EFAULT; } if (env->log.level & BPF_LOG_LEVEL2) - verbose(env, "SCC backedge %s\n", format_callchain(env, &callchain)); + verbose(env, "SCC backedge %s\n", format_callchain(env, callchain)); backedge->next = visit->backedges; visit->backedges = backedge; visit->num_backedges++; @@ -1999,12 +1999,12 @@ static int add_scc_backedge(struct bpf_verifier_env *env, static bool incomplete_read_marks(struct bpf_verifier_env *env, struct bpf_verifier_state *st) { - struct bpf_scc_callchain callchain; + struct bpf_scc_callchain *callchain = &env->callchain_buf; struct bpf_scc_visit *visit; - if (!compute_scc_callchain(env, st, &callchain)) + if (!compute_scc_callchain(env, st, callchain)) return false; - visit = scc_visit_lookup(env, &callchain); + visit = scc_visit_lookup(env, callchain); if (!visit) return false; return !!visit->backedges; @@ -21011,7 +21011,10 @@ static int opt_remove_nops(struct bpf_verifier_env *env) static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env, const union bpf_attr *attr) { - struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4]; + struct bpf_insn *patch; + /* use env->insn_buf as two independent buffers */ + struct bpf_insn *zext_patch = env->insn_buf; + struct bpf_insn *rnd_hi32_patch = &env->insn_buf[2]; struct bpf_insn_aux_data *aux = env->insn_aux_data; int i, patch_len, delta = 0, len = env->prog->len; struct bpf_insn *insns = env->prog->insnsi; @@ -21189,13 +21192,12 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) if (env->insn_aux_data[i + delta].nospec) { WARN_ON_ONCE(env->insn_aux_data[i + delta].alu_state); - struct bpf_insn patch[] = { - BPF_ST_NOSPEC(), - *insn, - }; + struct bpf_insn *patch = insn_buf; - cnt = ARRAY_SIZE(patch); - new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt); + *patch++ = BPF_ST_NOSPEC(); + *patch++ = *insn; + cnt = patch - insn_buf; + new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; @@ -21263,13 +21265,12 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) /* nospec_result is only used to mitigate Spectre v4 and * to limit verification-time for Spectre v1. */ - struct bpf_insn patch[] = { - *insn, - BPF_ST_NOSPEC(), - }; + struct bpf_insn *patch = insn_buf; - cnt = ARRAY_SIZE(patch); - new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt); + *patch++ = *insn; + *patch++ = BPF_ST_NOSPEC(); + cnt = patch - insn_buf; + new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; @@ -21939,13 +21940,12 @@ static int do_misc_fixups(struct bpf_verifier_env *env) u16 stack_depth_extra = 0; if (env->seen_exception && !env->exception_callback_subprog) { - struct bpf_insn patch[] = { - env->prog->insnsi[insn_cnt - 1], - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }; + struct bpf_insn *patch = insn_buf; - ret = add_hidden_subprog(env, patch, ARRAY_SIZE(patch)); + *patch++ = env->prog->insnsi[insn_cnt - 1]; + *patch++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1); + *patch++ = BPF_EXIT_INSN(); + ret = add_hidden_subprog(env, insn_buf, patch - insn_buf); if (ret < 0) return ret; prog = env->prog; @@ -21981,20 +21981,18 @@ static int do_misc_fixups(struct bpf_verifier_env *env) insn->off == 1 && insn->imm == -1) { bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; bool isdiv = BPF_OP(insn->code) == BPF_DIV; - struct bpf_insn *patchlet; - struct bpf_insn chk_and_sdiv[] = { - BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) | - BPF_NEG | BPF_K, insn->dst_reg, - 0, 0, 0), - }; - struct bpf_insn chk_and_smod[] = { - BPF_MOV32_IMM(insn->dst_reg, 0), - }; + struct bpf_insn *patch = insn_buf; + + if (isdiv) + *patch++ = BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) | + BPF_NEG | BPF_K, insn->dst_reg, + 0, 0, 0); + else + *patch++ = BPF_MOV32_IMM(insn->dst_reg, 0); - patchlet = isdiv ? chk_and_sdiv : chk_and_smod; - cnt = isdiv ? ARRAY_SIZE(chk_and_sdiv) : ARRAY_SIZE(chk_and_smod); + cnt = patch - insn_buf; - new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); + new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; @@ -22013,83 +22011,79 @@ static int do_misc_fixups(struct bpf_verifier_env *env) bool isdiv = BPF_OP(insn->code) == BPF_DIV; bool is_sdiv = isdiv && insn->off == 1; bool is_smod = !isdiv && insn->off == 1; - struct bpf_insn *patchlet; - struct bpf_insn chk_and_div[] = { - /* [R,W]x div 0 -> 0 */ - BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | - BPF_JNE | BPF_K, insn->src_reg, - 0, 2, 0), - BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - *insn, - }; - struct bpf_insn chk_and_mod[] = { - /* [R,W]x mod 0 -> [R,W]x */ - BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | - BPF_JEQ | BPF_K, insn->src_reg, - 0, 1 + (is64 ? 0 : 1), 0), - *insn, - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_MOV32_REG(insn->dst_reg, insn->dst_reg), - }; - struct bpf_insn chk_and_sdiv[] = { + struct bpf_insn *patch = insn_buf; + + if (is_sdiv) { /* [R,W]x sdiv 0 -> 0 * LLONG_MIN sdiv -1 -> LLONG_MIN * INT_MIN sdiv -1 -> INT_MIN */ - BPF_MOV64_REG(BPF_REG_AX, insn->src_reg), - BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) | - BPF_ADD | BPF_K, BPF_REG_AX, - 0, 0, 1), - BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | - BPF_JGT | BPF_K, BPF_REG_AX, - 0, 4, 1), - BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | - BPF_JEQ | BPF_K, BPF_REG_AX, - 0, 1, 0), - BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) | - BPF_MOV | BPF_K, insn->dst_reg, - 0, 0, 0), + *patch++ = BPF_MOV64_REG(BPF_REG_AX, insn->src_reg); + *patch++ = BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) | + BPF_ADD | BPF_K, BPF_REG_AX, + 0, 0, 1); + *patch++ = BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | + BPF_JGT | BPF_K, BPF_REG_AX, + 0, 4, 1); + *patch++ = BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | + BPF_JEQ | BPF_K, BPF_REG_AX, + 0, 1, 0); + *patch++ = BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) | + BPF_MOV | BPF_K, insn->dst_reg, + 0, 0, 0); /* BPF_NEG(LLONG_MIN) == -LLONG_MIN == LLONG_MIN */ - BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) | - BPF_NEG | BPF_K, insn->dst_reg, - 0, 0, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - *insn, - }; - struct bpf_insn chk_and_smod[] = { + *patch++ = BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) | + BPF_NEG | BPF_K, insn->dst_reg, + 0, 0, 0); + *patch++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); + *patch++ = *insn; + cnt = patch - insn_buf; + } else if (is_smod) { /* [R,W]x mod 0 -> [R,W]x */ /* [R,W]x mod -1 -> 0 */ - BPF_MOV64_REG(BPF_REG_AX, insn->src_reg), - BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) | - BPF_ADD | BPF_K, BPF_REG_AX, - 0, 0, 1), - BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | - BPF_JGT | BPF_K, BPF_REG_AX, - 0, 3, 1), - BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | - BPF_JEQ | BPF_K, BPF_REG_AX, - 0, 3 + (is64 ? 0 : 1), 1), - BPF_MOV32_IMM(insn->dst_reg, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - *insn, - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_MOV32_REG(insn->dst_reg, insn->dst_reg), - }; - - if (is_sdiv) { - patchlet = chk_and_sdiv; - cnt = ARRAY_SIZE(chk_and_sdiv); - } else if (is_smod) { - patchlet = chk_and_smod; - cnt = ARRAY_SIZE(chk_and_smod) - (is64 ? 2 : 0); + *patch++ = BPF_MOV64_REG(BPF_REG_AX, insn->src_reg); + *patch++ = BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) | + BPF_ADD | BPF_K, BPF_REG_AX, + 0, 0, 1); + *patch++ = BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | + BPF_JGT | BPF_K, BPF_REG_AX, + 0, 3, 1); + *patch++ = BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | + BPF_JEQ | BPF_K, BPF_REG_AX, + 0, 3 + (is64 ? 0 : 1), 1); + *patch++ = BPF_MOV32_IMM(insn->dst_reg, 0); + *patch++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); + *patch++ = *insn; + + if (!is64) { + *patch++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); + *patch++ = BPF_MOV32_REG(insn->dst_reg, insn->dst_reg); + } + cnt = patch - insn_buf; + } else if (isdiv) { + /* [R,W]x div 0 -> 0 */ + *patch++ = BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | + BPF_JNE | BPF_K, insn->src_reg, + 0, 2, 0); + *patch++ = BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg); + *patch++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); + *patch++ = *insn; + cnt = patch - insn_buf; } else { - patchlet = isdiv ? chk_and_div : chk_and_mod; - cnt = isdiv ? ARRAY_SIZE(chk_and_div) : - ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0); + /* [R,W]x mod 0 -> [R,W]x */ + *patch++ = BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | + BPF_JEQ | BPF_K, insn->src_reg, + 0, 1 + (is64 ? 0 : 1), 0); + *patch++ = *insn; + + if (!is64) { + *patch++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); + *patch++ = BPF_MOV32_REG(insn->dst_reg, insn->dst_reg); + } + cnt = patch - insn_buf; } - new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); + new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; @@ -22103,7 +22097,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env) if (BPF_CLASS(insn->code) == BPF_LDX && (BPF_MODE(insn->code) == BPF_PROBE_MEM || BPF_MODE(insn->code) == BPF_PROBE_MEMSX)) { - struct bpf_insn *patch = &insn_buf[0]; + struct bpf_insn *patch = insn_buf; u64 uaddress_limit = bpf_arch_uaddress_limit(); if (!uaddress_limit) @@ -22154,7 +22148,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env) insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) { const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X; const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X; - struct bpf_insn *patch = &insn_buf[0]; + struct bpf_insn *patch = insn_buf; bool issrc, isneg, isimm; u32 off_reg; |