diff options
Diffstat (limited to 'arch/loongarch/net/bpf_jit.c')
-rw-r--r-- | arch/loongarch/net/bpf_jit.c | 86 |
1 files changed, 61 insertions, 25 deletions
diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c index abfdb6bb5c38..cbe53d0b7fb0 100644 --- a/arch/loongarch/net/bpf_jit.c +++ b/arch/loongarch/net/bpf_jit.c @@ -527,13 +527,11 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext emit_zext_32(ctx, dst, is32); break; case 8: - move_reg(ctx, t1, src); - emit_insn(ctx, extwb, dst, t1); + emit_insn(ctx, extwb, dst, src); emit_zext_32(ctx, dst, is32); break; case 16: - move_reg(ctx, t1, src); - emit_insn(ctx, extwh, dst, t1); + emit_insn(ctx, extwh, dst, src); emit_zext_32(ctx, dst, is32); break; case 32: @@ -1294,8 +1292,10 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type, u32 old_insns[LOONGARCH_LONG_JUMP_NINSNS] = {[0 ... 4] = INSN_NOP}; u32 new_insns[LOONGARCH_LONG_JUMP_NINSNS] = {[0 ... 4] = INSN_NOP}; - if (!is_kernel_text((unsigned long)ip) && - !is_bpf_text_address((unsigned long)ip)) + /* Only poking bpf text is supported. Since kernel function entry + * is set up by ftrace, we rely on ftrace to poke kernel functions. + */ + if (!is_bpf_text_address((unsigned long)ip)) return -ENOTSUPP; ret = emit_jump_or_nops(old_addr, ip, old_insns, is_call); @@ -1448,12 +1448,43 @@ void arch_free_bpf_trampoline(void *image, unsigned int size) bpf_prog_pack_free(image, size); } +/* + * Sign-extend the register if necessary + */ +static void sign_extend(struct jit_ctx *ctx, int rd, int rj, u8 size, bool sign) +{ + /* ABI requires unsigned char/short to be zero-extended */ + if (!sign && (size == 1 || size == 2)) { + if (rd != rj) + move_reg(ctx, rd, rj); + return; + } + + switch (size) { + case 1: + emit_insn(ctx, extwb, rd, rj); + break; + case 2: + emit_insn(ctx, extwh, rd, rj); + break; + case 4: + emit_insn(ctx, addiw, rd, rj, 0); + break; + case 8: + if (rd != rj) + move_reg(ctx, rd, rj); + break; + default: + pr_warn("bpf_jit: invalid size %d for sign_extend\n", size); + } +} + static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, const struct btf_func_model *m, struct bpf_tramp_links *tlinks, void *func_addr, u32 flags) { int i, ret, save_ret; - int stack_size = 0, nargs = 0; + int stack_size, nargs; int retval_off, args_off, nargs_off, ip_off, run_ctx_off, sreg_off, tcc_ptr_off; bool is_struct_ops = flags & BPF_TRAMP_F_INDIRECT; void *orig_call = func_addr; @@ -1462,9 +1493,6 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; u32 **branches = NULL; - if (flags & (BPF_TRAMP_F_ORIG_STACK | BPF_TRAMP_F_SHARE_IPMODIFY)) - return -ENOTSUPP; - /* * FP + 8 [ RA to parent func ] return address to parent * function @@ -1495,20 +1523,23 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i if (m->nr_args > LOONGARCH_MAX_REG_ARGS) return -ENOTSUPP; + /* FIXME: No support of struct argument */ + for (i = 0; i < m->nr_args; i++) { + if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) + return -ENOTSUPP; + } + if (flags & (BPF_TRAMP_F_ORIG_STACK | BPF_TRAMP_F_SHARE_IPMODIFY)) return -ENOTSUPP; - stack_size = 0; - /* Room of trampoline frame to store return address and frame pointer */ - stack_size += 16; + stack_size = 16; save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET); - if (save_ret) { - /* Save BPF R0 and A0 */ - stack_size += 16; - retval_off = stack_size; - } + if (save_ret) + stack_size += 16; /* Save BPF R0 and A0 */ + + retval_off = stack_size; /* Room of trampoline frame to store args */ nargs = m->nr_args; @@ -1595,7 +1626,7 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i orig_call += LOONGARCH_BPF_FENTRY_NBYTES; if (flags & BPF_TRAMP_F_CALL_ORIG) { - move_imm(ctx, LOONGARCH_GPR_A0, (const s64)im, false); + move_addr(ctx, LOONGARCH_GPR_A0, (const u64)im); ret = emit_call(ctx, (const u64)__bpf_tramp_enter); if (ret) return ret; @@ -1645,7 +1676,7 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i if (flags & BPF_TRAMP_F_CALL_ORIG) { im->ip_epilogue = ctx->ro_image + ctx->idx; - move_imm(ctx, LOONGARCH_GPR_A0, (const s64)im, false); + move_addr(ctx, LOONGARCH_GPR_A0, (const u64)im); ret = emit_call(ctx, (const u64)__bpf_tramp_exit); if (ret) goto out; @@ -1655,8 +1686,12 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i restore_args(ctx, m->nr_args, args_off); if (save_ret) { - emit_insn(ctx, ldd, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -retval_off); emit_insn(ctx, ldd, regmap[BPF_REG_0], LOONGARCH_GPR_FP, -(retval_off - 8)); + if (is_struct_ops) + sign_extend(ctx, LOONGARCH_GPR_A0, regmap[BPF_REG_0], + m->ret_size, m->ret_flags & BTF_FMODEL_SIGNED_ARG); + else + emit_insn(ctx, ldd, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -retval_off); } emit_insn(ctx, ldd, LOONGARCH_GPR_S1, LOONGARCH_GPR_FP, -sreg_off); @@ -1715,7 +1750,10 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image, jit_fill_hole(image, (unsigned int)(ro_image_end - ro_image)); ret = __arch_prepare_bpf_trampoline(&ctx, im, m, tlinks, func_addr, flags); - if (ret > 0 && validate_code(&ctx) < 0) { + if (ret < 0) + goto out; + + if (validate_code(&ctx) < 0) { ret = -EINVAL; goto out; } @@ -1726,7 +1764,6 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image, goto out; } - bpf_flush_icache(ro_image, ro_image_end); out: kvfree(image); return ret < 0 ? ret : size; @@ -1744,8 +1781,7 @@ int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, ret = __arch_prepare_bpf_trampoline(&ctx, &im, m, tlinks, func_addr, flags); - /* Page align */ - return ret < 0 ? ret : round_up(ret * LOONGARCH_INSN_SIZE, PAGE_SIZE); + return ret < 0 ? ret : ret * LOONGARCH_INSN_SIZE; } struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) |