summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPu Lehui <pulehui@huawei.com>2025-07-19 09:17:28 +0000
committerDaniel Borkmann <daniel@iogearbox.net>2025-08-15 10:46:51 +0200
commitb18f4aae6a5db5ada5aba02b43dbdd3909e5f87c (patch)
tree453231e4a5146a2fa0b4d5484aa966d4539a0073
parent1c0196b878a69927449d649163388ad63cafcacb (diff)
riscv, bpf: Add ex_insn_off and ex_jmp_off for exception table handling
Add ex_insn_off and ex_jmp_off fields to struct rv_jit_context so that add_exception_handler() does not need to be immediately followed by the instruction to add the exception table. ex_insn_off indicates the offset of the instruction to add the exception table, and ex_jmp_off indicates the offset to jump over the faulting instruction. This is to prepare for adding the exception table to atomic instructions later, because some atomic instructions need to perform zext or other operations. Signed-off-by: Pu Lehui <pulehui@huawei.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Tested-by: Björn Töpel <bjorn@rivosinc.com> Reviewed-by: Björn Töpel <bjorn@rivosinc.com> Acked-by: Björn Töpel <bjorn@kernel.org> Link: https://lore.kernel.org/bpf/20250719091730.2660197-9-pulehui@huaweicloud.com
-rw-r--r--arch/riscv/net/bpf_jit.h2
-rw-r--r--arch/riscv/net/bpf_jit_comp64.c84
2 files changed, 39 insertions, 47 deletions
diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h
index 0790f40b7e9d..be2915444ce5 100644
--- a/arch/riscv/net/bpf_jit.h
+++ b/arch/riscv/net/bpf_jit.h
@@ -78,6 +78,8 @@ struct rv_jit_context {
int epilogue_offset;
int *offset; /* BPF to RV */
int nexentries;
+ int ex_insn_off;
+ int ex_jmp_off;
unsigned long flags;
int stack_size;
u64 arena_vm_start;
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index 8e813809d305..56b592af53a6 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -511,57 +511,54 @@ static void emit_stx_insn(u8 rd, s16 off, u8 rs, u8 size, struct rv_jit_context
}
}
-static int emit_ldx(u8 rd, s16 off, u8 rs, u8 size, bool sign_ext,
+static void emit_ldx(u8 rd, s16 off, u8 rs, u8 size, bool sign_ext,
struct rv_jit_context *ctx)
{
- int insns_start;
-
if (is_12b_int(off)) {
- insns_start = ctx->ninsns;
+ ctx->ex_insn_off = ctx->ninsns;
emit_ldx_insn(rd, off, rs, size, sign_ext, ctx);
- return ctx->ninsns - insns_start;
+ ctx->ex_jmp_off = ctx->ninsns;
+ return;
}
emit_imm(RV_REG_T1, off, ctx);
emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
- insns_start = ctx->ninsns;
+ ctx->ex_insn_off = ctx->ninsns;
emit_ldx_insn(rd, 0, RV_REG_T1, size, sign_ext, ctx);
- return ctx->ninsns - insns_start;
+ ctx->ex_jmp_off = ctx->ninsns;
}
-static int emit_st(u8 rd, s16 off, s32 imm, u8 size, struct rv_jit_context *ctx)
+static void emit_st(u8 rd, s16 off, s32 imm, u8 size, struct rv_jit_context *ctx)
{
- int insns_start;
-
emit_imm(RV_REG_T1, imm, ctx);
if (is_12b_int(off)) {
- insns_start = ctx->ninsns;
+ ctx->ex_insn_off = ctx->ninsns;
emit_stx_insn(rd, off, RV_REG_T1, size, ctx);
- return ctx->ninsns - insns_start;
+ ctx->ex_jmp_off = ctx->ninsns;
+ return;
}
emit_imm(RV_REG_T2, off, ctx);
emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
- insns_start = ctx->ninsns;
+ ctx->ex_insn_off = ctx->ninsns;
emit_stx_insn(RV_REG_T2, 0, RV_REG_T1, size, ctx);
- return ctx->ninsns - insns_start;
+ ctx->ex_jmp_off = ctx->ninsns;
}
-static int emit_stx(u8 rd, s16 off, u8 rs, u8 size, struct rv_jit_context *ctx)
+static void emit_stx(u8 rd, s16 off, u8 rs, u8 size, struct rv_jit_context *ctx)
{
- int insns_start;
-
if (is_12b_int(off)) {
- insns_start = ctx->ninsns;
+ ctx->ex_insn_off = ctx->ninsns;
emit_stx_insn(rd, off, rs, size, ctx);
- return ctx->ninsns - insns_start;
+ ctx->ex_jmp_off = ctx->ninsns;
+ return;
}
emit_imm(RV_REG_T1, off, ctx);
emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
- insns_start = ctx->ninsns;
+ ctx->ex_insn_off = ctx->ninsns;
emit_stx_insn(RV_REG_T1, 0, rs, size, ctx);
- return ctx->ninsns - insns_start;
+ ctx->ex_jmp_off = ctx->ninsns;
}
static int emit_atomic_ld_st(u8 rd, u8 rs, const struct bpf_insn *insn,
@@ -700,9 +697,8 @@ bool ex_handler_bpf(const struct exception_table_entry *ex,
}
/* For accesses to BTF pointers, add an entry to the exception table */
-static int add_exception_handler(const struct bpf_insn *insn,
- struct rv_jit_context *ctx,
- int dst_reg, int insn_len)
+static int add_exception_handler(const struct bpf_insn *insn, int dst_reg,
+ struct rv_jit_context *ctx)
{
struct exception_table_entry *ex;
unsigned long pc;
@@ -710,21 +706,22 @@ static int add_exception_handler(const struct bpf_insn *insn,
off_t fixup_offset;
if (!ctx->insns || !ctx->ro_insns || !ctx->prog->aux->extable ||
- (BPF_MODE(insn->code) != BPF_PROBE_MEM && BPF_MODE(insn->code) != BPF_PROBE_MEMSX &&
- BPF_MODE(insn->code) != BPF_PROBE_MEM32))
+ ctx->ex_insn_off <= 0 || ctx->ex_jmp_off <= 0)
return 0;
- if (WARN_ON_ONCE(ctx->nexentries >= ctx->prog->aux->num_exentries))
- return -EINVAL;
+ if (BPF_MODE(insn->code) != BPF_PROBE_MEM &&
+ BPF_MODE(insn->code) != BPF_PROBE_MEMSX &&
+ BPF_MODE(insn->code) != BPF_PROBE_MEM32)
+ return 0;
- if (WARN_ON_ONCE(insn_len > ctx->ninsns))
+ if (WARN_ON_ONCE(ctx->nexentries >= ctx->prog->aux->num_exentries))
return -EINVAL;
- if (WARN_ON_ONCE(!rvc_enabled() && insn_len == 1))
+ if (WARN_ON_ONCE(ctx->ex_insn_off > ctx->ninsns || ctx->ex_jmp_off > ctx->ninsns))
return -EINVAL;
ex = &ctx->prog->aux->extable[ctx->nexentries];
- pc = (unsigned long)&ctx->ro_insns[ctx->ninsns - insn_len];
+ pc = (unsigned long)&ctx->ro_insns[ctx->ex_insn_off];
/*
* This is the relative offset of the instruction that may fault from
@@ -748,7 +745,7 @@ static int add_exception_handler(const struct bpf_insn *insn,
* that may fault. The execution will jump to this after handling the
* fault.
*/
- fixup_offset = (long)&ex->fixup - (pc + insn_len * sizeof(u16));
+ fixup_offset = (long)&ex->fixup - (long)&ctx->ro_insns[ctx->ex_jmp_off];
if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, fixup_offset))
return -ERANGE;
@@ -765,6 +762,8 @@ static int add_exception_handler(const struct bpf_insn *insn,
FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
ex->type = EX_TYPE_BPF;
+ ctx->ex_insn_off = 0;
+ ctx->ex_jmp_off = 0;
ctx->nexentries++;
return 0;
}
@@ -1774,7 +1773,6 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW:
{
bool sign_ext;
- int insn_len;
sign_ext = BPF_MODE(insn->code) == BPF_MEMSX ||
BPF_MODE(insn->code) == BPF_PROBE_MEMSX;
@@ -1784,9 +1782,9 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
rs = RV_REG_T2;
}
- insn_len = emit_ldx(rd, off, rs, BPF_SIZE(code), sign_ext, ctx);
+ emit_ldx(rd, off, rs, BPF_SIZE(code), sign_ext, ctx);
- ret = add_exception_handler(insn, ctx, rd, insn_len);
+ ret = add_exception_handler(insn, rd, ctx);
if (ret)
return ret;
@@ -1809,21 +1807,17 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
case BPF_ST | BPF_PROBE_MEM32 | BPF_H:
case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
- {
- int insn_len;
-
if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) {
emit_add(RV_REG_T3, rd, RV_REG_ARENA, ctx);
rd = RV_REG_T3;
}
- insn_len = emit_st(rd, off, imm, BPF_SIZE(code), ctx);
+ emit_st(rd, off, imm, BPF_SIZE(code), ctx);
- ret = add_exception_handler(insn, ctx, REG_DONT_CLEAR_MARKER, insn_len);
+ ret = add_exception_handler(insn, REG_DONT_CLEAR_MARKER, ctx);
if (ret)
return ret;
break;
- }
/* STX: *(size *)(dst + off) = src */
case BPF_STX | BPF_MEM | BPF_B:
@@ -1835,21 +1829,17 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
case BPF_STX | BPF_PROBE_MEM32 | BPF_H:
case BPF_STX | BPF_PROBE_MEM32 | BPF_W:
case BPF_STX | BPF_PROBE_MEM32 | BPF_DW:
- {
- int insn_len;
-
if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) {
emit_add(RV_REG_T2, rd, RV_REG_ARENA, ctx);
rd = RV_REG_T2;
}
- insn_len = emit_stx(rd, off, rs, BPF_SIZE(code), ctx);
+ emit_stx(rd, off, rs, BPF_SIZE(code), ctx);
- ret = add_exception_handler(insn, ctx, REG_DONT_CLEAR_MARKER, insn_len);
+ ret = add_exception_handler(insn, REG_DONT_CLEAR_MARKER, ctx);
if (ret)
return ret;
break;
- }
case BPF_STX | BPF_ATOMIC | BPF_B:
case BPF_STX | BPF_ATOMIC | BPF_H: