summaryrefslogtreecommitdiff
path: root/arch/arm64/net/bpf_jit_comp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/net/bpf_jit_comp.c')
-rw-r--r--arch/arm64/net/bpf_jit_comp.c51
1 files changed, 36 insertions, 15 deletions
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index c870d6f01ac2..ba38d403abb2 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -70,7 +70,8 @@ struct jit_ctx {
int idx;
int epilogue_offset;
int *offset;
- u32 *image;
+ __le32 *image;
+ u32 stack_size;
};
static inline void emit(const u32 insn, struct jit_ctx *ctx)
@@ -130,7 +131,7 @@ static inline int bpf2a64_offset(int bpf_to, int bpf_from,
static void jit_fill_hole(void *area, unsigned int size)
{
- u32 *ptr;
+ __le32 *ptr;
/* We are guaranteed to have aligned memory. */
for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
*ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT);
@@ -147,16 +148,11 @@ static inline int epilogue_offset(const struct jit_ctx *ctx)
/* Stack must be multiples of 16B */
#define STACK_ALIGN(sz) (((sz) + 15) & ~15)
-#define _STACK_SIZE \
- (MAX_BPF_STACK \
- + 4 /* extra for skb_copy_bits buffer */)
-
-#define STACK_SIZE STACK_ALIGN(_STACK_SIZE)
-
#define PROLOGUE_OFFSET 8
static int build_prologue(struct jit_ctx *ctx)
{
+ const struct bpf_prog *prog = ctx->prog;
const u8 r6 = bpf2a64[BPF_REG_6];
const u8 r7 = bpf2a64[BPF_REG_7];
const u8 r8 = bpf2a64[BPF_REG_8];
@@ -178,9 +174,9 @@ static int build_prologue(struct jit_ctx *ctx)
* | |
* | ... | BPF prog stack
* | |
- * +-----+ <= (BPF_FP - MAX_BPF_STACK)
+ * +-----+ <= (BPF_FP - prog->aux->stack_depth)
* |RSVD | JIT scratchpad
- * current A64_SP => +-----+ <= (BPF_FP - STACK_SIZE)
+ * current A64_SP => +-----+ <= (BPF_FP - ctx->stack_size)
* | |
* | ... | Function call stack
* | |
@@ -204,8 +200,12 @@ static int build_prologue(struct jit_ctx *ctx)
/* Initialize tail_call_cnt */
emit(A64_MOVZ(1, tcc, 0, 0), ctx);
+ /* 4 byte extra for skb_copy_bits buffer */
+ ctx->stack_size = prog->aux->stack_depth + 4;
+ ctx->stack_size = STACK_ALIGN(ctx->stack_size);
+
/* Set up function call stack */
- emit(A64_SUB_I(1, A64_SP, A64_SP, STACK_SIZE), ctx);
+ emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
cur_offset = ctx->idx - idx0;
if (cur_offset != PROLOGUE_OFFSET) {
@@ -290,7 +290,7 @@ static void build_epilogue(struct jit_ctx *ctx)
const u8 fp = bpf2a64[BPF_REG_FP];
/* We're done with BPF stack */
- emit(A64_ADD_I(1, A64_SP, A64_SP, STACK_SIZE), ctx);
+ emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
/* Restore fs (x25) and x26 */
emit(A64_POP(fp, A64_R(26), A64_SP), ctx);
@@ -527,10 +527,14 @@ emit_bswap_uxt:
/* IF (dst COND src) JUMP off */
case BPF_JMP | BPF_JEQ | BPF_X:
case BPF_JMP | BPF_JGT | BPF_X:
+ case BPF_JMP | BPF_JLT | BPF_X:
case BPF_JMP | BPF_JGE | BPF_X:
+ case BPF_JMP | BPF_JLE | BPF_X:
case BPF_JMP | BPF_JNE | BPF_X:
case BPF_JMP | BPF_JSGT | BPF_X:
+ case BPF_JMP | BPF_JSLT | BPF_X:
case BPF_JMP | BPF_JSGE | BPF_X:
+ case BPF_JMP | BPF_JSLE | BPF_X:
emit(A64_CMP(1, dst, src), ctx);
emit_cond_jmp:
jmp_offset = bpf2a64_offset(i + off, i, ctx);
@@ -542,9 +546,15 @@ emit_cond_jmp:
case BPF_JGT:
jmp_cond = A64_COND_HI;
break;
+ case BPF_JLT:
+ jmp_cond = A64_COND_CC;
+ break;
case BPF_JGE:
jmp_cond = A64_COND_CS;
break;
+ case BPF_JLE:
+ jmp_cond = A64_COND_LS;
+ break;
case BPF_JSET:
case BPF_JNE:
jmp_cond = A64_COND_NE;
@@ -552,9 +562,15 @@ emit_cond_jmp:
case BPF_JSGT:
jmp_cond = A64_COND_GT;
break;
+ case BPF_JSLT:
+ jmp_cond = A64_COND_LT;
+ break;
case BPF_JSGE:
jmp_cond = A64_COND_GE;
break;
+ case BPF_JSLE:
+ jmp_cond = A64_COND_LE;
+ break;
default:
return -EFAULT;
}
@@ -566,10 +582,14 @@ emit_cond_jmp:
/* IF (dst COND imm) JUMP off */
case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_JMP | BPF_JGT | BPF_K:
+ case BPF_JMP | BPF_JLT | BPF_K:
case BPF_JMP | BPF_JGE | BPF_K:
+ case BPF_JMP | BPF_JLE | BPF_K:
case BPF_JMP | BPF_JNE | BPF_K:
case BPF_JMP | BPF_JSGT | BPF_K:
+ case BPF_JMP | BPF_JSLT | BPF_K:
case BPF_JMP | BPF_JSGE | BPF_K:
+ case BPF_JMP | BPF_JSLE | BPF_K:
emit_a64_mov_i(1, tmp, imm, ctx);
emit(A64_CMP(1, dst, tmp), ctx);
goto emit_cond_jmp;
@@ -589,7 +609,7 @@ emit_cond_jmp:
break;
}
/* tail call */
- case BPF_JMP | BPF_CALL | BPF_X:
+ case BPF_JMP | BPF_TAIL_CALL:
if (emit_bpf_tail_call(ctx))
return -EFAULT;
break;
@@ -735,7 +755,7 @@ emit_cond_jmp:
return -EINVAL;
}
emit_a64_mov_i64(r3, size, ctx);
- emit(A64_SUB_I(1, r4, fp, STACK_SIZE), ctx);
+ emit(A64_SUB_I(1, r4, fp, ctx->stack_size), ctx);
emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx);
emit(A64_BLR(r5), ctx);
emit(A64_MOV(1, r0, A64_R(0)), ctx);
@@ -874,7 +894,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
/* 2. Now, the actual pass. */
- ctx.image = (u32 *)image_ptr;
+ ctx.image = (__le32 *)image_ptr;
ctx.idx = 0;
build_prologue(&ctx);
@@ -903,6 +923,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
bpf_jit_binary_lock_ro(header);
prog->bpf_func = (void *)ctx.image;
prog->jited = 1;
+ prog->jited_len = image_size;
out_off:
kfree(ctx.offset);