summaryrefslogtreecommitdiff
path: root/arch/powerpc/net/bpf_jit_comp64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/net/bpf_jit_comp64.c')
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c1077
1 files changed, 524 insertions, 553 deletions
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index be3517ef0574..79f23974a320 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -15,30 +15,63 @@
#include <linux/if_vlan.h>
#include <asm/kprobes.h>
#include <linux/bpf.h>
+#include <asm/security_features.h>
-#include "bpf_jit64.h"
+#include "bpf_jit.h"
-static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
-{
- memset32(area, BREAKPOINT_INSTRUCTION, size/4);
-}
+/*
+ * Stack layout:
+ * Ensure the top half (upto local_tmp_var) stays consistent
+ * with our redzone usage.
+ *
+ * [ prev sp ] <-------------
+ * [ nv gpr save area ] 5*8 |
+ * [ tail_call_cnt ] 8 |
+ * [ local_tmp_var ] 16 |
+ * fp (r31) --> [ ebpf stack space ] upto 512 |
+ * [ frame header ] 32/112 |
+ * sp (r1) ---> [ stack pointer ] --------------
+ */
-static inline void bpf_flush_icache(void *start, void *end)
-{
- smp_wmb();
- flush_icache_range((unsigned long)start, (unsigned long)end);
-}
+/* for gpr non volatile registers BPG_REG_6 to 10 */
+#define BPF_PPC_STACK_SAVE (5*8)
+/* for bpf JIT code internal usage */
+#define BPF_PPC_STACK_LOCALS 24
+/* stack frame excluding BPF stack, ensure this is quadword aligned */
+#define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \
+ BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
-static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i)
-{
- return (ctx->seen & (1 << (31 - b2p[i])));
-}
+/* BPF register usage */
+#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
+#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
-static inline void bpf_set_seen_register(struct codegen_context *ctx, int i)
+/* BPF to ppc register mappings */
+void bpf_jit_init_reg_mapping(struct codegen_context *ctx)
{
- ctx->seen |= (1 << (31 - b2p[i]));
+ /* function return value */
+ ctx->b2p[BPF_REG_0] = _R8;
+ /* function arguments */
+ ctx->b2p[BPF_REG_1] = _R3;
+ ctx->b2p[BPF_REG_2] = _R4;
+ ctx->b2p[BPF_REG_3] = _R5;
+ ctx->b2p[BPF_REG_4] = _R6;
+ ctx->b2p[BPF_REG_5] = _R7;
+ /* non volatile registers */
+ ctx->b2p[BPF_REG_6] = _R27;
+ ctx->b2p[BPF_REG_7] = _R28;
+ ctx->b2p[BPF_REG_8] = _R29;
+ ctx->b2p[BPF_REG_9] = _R30;
+ /* frame pointer aka BPF_REG_10 */
+ ctx->b2p[BPF_REG_FP] = _R31;
+ /* eBPF jit internal registers */
+ ctx->b2p[BPF_REG_AX] = _R12;
+ ctx->b2p[TMP_REG_1] = _R9;
+ ctx->b2p[TMP_REG_2] = _R10;
}
+/* PPC NVR range -- update this if we ever use NVRs below r27 */
+#define BPF_PPC_NVR_MIN _R27
+
static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
{
/*
@@ -47,7 +80,7 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
* - the bpf program uses its stack area
* The latter condition is deduced from the usage of BPF_REG_FP
*/
- return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, BPF_REG_FP);
+ return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP));
}
/*
@@ -56,9 +89,9 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
* [ prev sp ] <-------------
* [ ... ] |
* sp (r1) ---> [ stack pointer ] --------------
- * [ nv gpr save area ] 6*8
+ * [ nv gpr save area ] 5*8
* [ tail_call_cnt ] 8
- * [ local_tmp_var ] 8
+ * [ local_tmp_var ] 16
* [ unused red zone ] 208 bytes protected
*/
static int bpf_jit_stack_local(struct codegen_context *ctx)
@@ -66,12 +99,12 @@ static int bpf_jit_stack_local(struct codegen_context *ctx)
if (bpf_has_stack_frame(ctx))
return STACK_FRAME_MIN_SIZE + ctx->stack_size;
else
- return -(BPF_PPC_STACK_SAVE + 16);
+ return -(BPF_PPC_STACK_SAVE + 24);
}
static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
{
- return bpf_jit_stack_local(ctx) + 8;
+ return bpf_jit_stack_local(ctx) + 16;
}
static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
@@ -85,37 +118,44 @@ static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
BUG();
}
-static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
+void bpf_jit_realloc_regs(struct codegen_context *ctx)
+{
+}
+
+void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
{
int i;
+#ifndef CONFIG_PPC_KERNEL_PCREL
+ if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
+ EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
+#endif
+
/*
* Initialize tail_call_cnt if we do tail calls.
* Otherwise, put in NOPs so that it can be skipped when we are
* invoked through a tail call.
*/
if (ctx->seen & SEEN_TAILCALL) {
- PPC_LI(b2p[TMP_REG_1], 0);
+ EMIT(PPC_RAW_LI(bpf_to_ppc(TMP_REG_1), 0));
/* this goes in the redzone */
- PPC_BPF_STL(b2p[TMP_REG_1], 1, -(BPF_PPC_STACK_SAVE + 8));
+ EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_STACK_SAVE + 8)));
} else {
- PPC_NOP();
- PPC_NOP();
+ EMIT(PPC_RAW_NOP());
+ EMIT(PPC_RAW_NOP());
}
-#define BPF_TAILCALL_PROLOGUE_SIZE 8
-
if (bpf_has_stack_frame(ctx)) {
/*
* We need a stack frame, but we don't necessarily need to
* save/restore LR unless we call other functions
*/
if (ctx->seen & SEEN_FUNC) {
- EMIT(PPC_INST_MFLR | __PPC_RT(R0));
- PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
+ EMIT(PPC_RAW_MFLR(_R0));
+ EMIT(PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF));
}
- PPC_BPF_STLU(1, 1, -(BPF_PPC_STACKFRAME + ctx->stack_size));
+ EMIT(PPC_RAW_STDU(_R1, _R1, -(BPF_PPC_STACKFRAME + ctx->stack_size)));
}
/*
@@ -124,13 +164,13 @@ static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
* in the protected zone below the previous stack frame
*/
for (i = BPF_REG_6; i <= BPF_REG_10; i++)
- if (bpf_is_seen_register(ctx, i))
- PPC_BPF_STL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
+ if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
+ EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
/* Setup frame pointer to point to the bpf stack area */
- if (bpf_is_seen_register(ctx, BPF_REG_FP))
- PPC_ADDI(b2p[BPF_REG_FP], 1,
- STACK_FRAME_MIN_SIZE + ctx->stack_size);
+ if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)))
+ EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
+ STACK_FRAME_MIN_SIZE + ctx->stack_size));
}
static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
@@ -139,92 +179,101 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx
/* Restore NVRs */
for (i = BPF_REG_6; i <= BPF_REG_10; i++)
- if (bpf_is_seen_register(ctx, i))
- PPC_BPF_LL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
+ if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
+ EMIT(PPC_RAW_LD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
/* Tear down our stack frame */
if (bpf_has_stack_frame(ctx)) {
- PPC_ADDI(1, 1, BPF_PPC_STACKFRAME + ctx->stack_size);
+ EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME + ctx->stack_size));
if (ctx->seen & SEEN_FUNC) {
- PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
- PPC_MTLR(0);
+ EMIT(PPC_RAW_LD(_R0, _R1, PPC_LR_STKOFF));
+ EMIT(PPC_RAW_MTLR(_R0));
}
}
}
-static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
+void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
{
bpf_jit_emit_common_epilogue(image, ctx);
/* Move result to r3 */
- PPC_MR(3, b2p[BPF_REG_0]);
+ EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0)));
- PPC_BLR();
+ EMIT(PPC_RAW_BLR());
}
-static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx,
- u64 func)
+static int bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, u64 func)
{
-#ifdef PPC64_ELF_ABI_v1
- /* func points to the function descriptor */
- PPC_LI64(b2p[TMP_REG_2], func);
- /* Load actual entry point from function descriptor */
- PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
- /* ... and move it to LR */
- PPC_MTLR(b2p[TMP_REG_1]);
- /*
- * Load TOC from function descriptor at offset 8.
- * We can clobber r2 since we get called through a
- * function pointer (so caller will save/restore r2)
- * and since we don't use a TOC ourself.
- */
- PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
-#else
- /* We can clobber r12 */
- PPC_FUNC_ADDR(12, func);
- PPC_MTLR(12);
-#endif
- PPC_BLRL();
+ unsigned long func_addr = func ? ppc_function_entry((void *)func) : 0;
+ long reladdr;
+
+ if (WARN_ON_ONCE(!core_kernel_text(func_addr)))
+ return -EINVAL;
+
+ if (IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) {
+ reladdr = func_addr - CTX_NIA(ctx);
+
+ if (reladdr >= (long)SZ_8G || reladdr < -(long)SZ_8G) {
+ pr_err("eBPF: address of %ps out of range of pcrel address.\n",
+ (void *)func);
+ return -ERANGE;
+ }
+ /* pla r12,addr */
+ EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(1) | IMM_H18(reladdr));
+ EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | IMM_L(reladdr));
+ EMIT(PPC_RAW_MTCTR(_R12));
+ EMIT(PPC_RAW_BCTR());
+
+ } else {
+ reladdr = func_addr - kernel_toc_addr();
+ if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
+ pr_err("eBPF: address of %ps out of range of kernel_toc.\n", (void *)func);
+ return -ERANGE;
+ }
+
+ EMIT(PPC_RAW_ADDIS(_R12, _R2, PPC_HA(reladdr)));
+ EMIT(PPC_RAW_ADDI(_R12, _R12, PPC_LO(reladdr)));
+ EMIT(PPC_RAW_MTCTR(_R12));
+ EMIT(PPC_RAW_BCTRL());
+ }
+
+ return 0;
}
-static void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx,
- u64 func)
+int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
{
unsigned int i, ctx_idx = ctx->idx;
+ if (WARN_ON_ONCE(func && is_module_text_address(func)))
+ return -EINVAL;
+
+ /* skip past descriptor if elf v1 */
+ func += FUNCTION_DESCR_SIZE;
+
/* Load function address into r12 */
- PPC_LI64(12, func);
+ PPC_LI64(_R12, func);
/* For bpf-to-bpf function calls, the callee's address is unknown
* until the last extra pass. As seen above, we use PPC_LI64() to
* load the callee's address, but this may optimize the number of
* instructions required based on the nature of the address.
*
- * Since we don't want the number of instructions emitted to change,
+ * Since we don't want the number of instructions emitted to increase,
* we pad the optimized PPC_LI64() call with NOPs to guarantee that
* we always have a five-instruction sequence, which is the maximum
* that PPC_LI64() can emit.
*/
- for (i = ctx->idx - ctx_idx; i < 5; i++)
- PPC_NOP();
+ if (!image)
+ for (i = ctx->idx - ctx_idx; i < 5; i++)
+ EMIT(PPC_RAW_NOP());
-#ifdef PPC64_ELF_ABI_v1
- /*
- * Load TOC from function descriptor at offset 8.
- * We can clobber r2 since we get called through a
- * function pointer (so caller will save/restore r2)
- * and since we don't use a TOC ourself.
- */
- PPC_BPF_LL(2, 12, 8);
- /* Load actual entry point from function descriptor */
- PPC_BPF_LL(12, 12, 0);
-#endif
+ EMIT(PPC_RAW_MTCTR(_R12));
+ EMIT(PPC_RAW_BCTRL());
- PPC_MTLR(12);
- PPC_BLRL();
+ return 0;
}
-static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
+static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
{
/*
* By now, the eBPF program has already setup parameters in r3, r4 and r5
@@ -232,67 +281,90 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
* r4/BPF_REG_2 - pointer to bpf_array
* r5/BPF_REG_3 - index in bpf_array
*/
- int b2p_bpf_array = b2p[BPF_REG_2];
- int b2p_index = b2p[BPF_REG_3];
+ int b2p_bpf_array = bpf_to_ppc(BPF_REG_2);
+ int b2p_index = bpf_to_ppc(BPF_REG_3);
+ int bpf_tailcall_prologue_size = 8;
+
+ if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
+ bpf_tailcall_prologue_size += 4; /* skip past the toc load */
/*
* if (index >= array->map.max_entries)
* goto out;
*/
- PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries));
- PPC_RLWINM(b2p_index, b2p_index, 0, 0, 31);
- PPC_CMPLW(b2p_index, b2p[TMP_REG_1]);
- PPC_BCC(COND_GE, out);
+ EMIT(PPC_RAW_LWZ(bpf_to_ppc(TMP_REG_1), b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
+ EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31));
+ EMIT(PPC_RAW_CMPLW(b2p_index, bpf_to_ppc(TMP_REG_1)));
+ PPC_BCC_SHORT(COND_GE, out);
/*
- * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
+ * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
* goto out;
*/
- PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
- PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT);
- PPC_BCC(COND_GT, out);
+ EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
+ EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT));
+ PPC_BCC_SHORT(COND_GE, out);
/*
* tail_call_cnt++;
*/
- PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], 1);
- PPC_BPF_STL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
+ EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 1));
+ EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
/* prog = array->ptrs[index]; */
- PPC_MULI(b2p[TMP_REG_1], b2p_index, 8);
- PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array);
- PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
+ EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_1), b2p_index, 8));
+ EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), b2p_bpf_array));
+ EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_array, ptrs)));
/*
* if (prog == NULL)
* goto out;
*/
- PPC_CMPLDI(b2p[TMP_REG_1], 0);
- PPC_BCC(COND_EQ, out);
+ EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_1), 0));
+ PPC_BCC_SHORT(COND_EQ, out);
/* goto *(prog->bpf_func + prologue_size); */
- PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
-#ifdef PPC64_ELF_ABI_v1
- /* skip past the function descriptor */
- PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
- FUNCTION_DESCR_SIZE + BPF_TAILCALL_PROLOGUE_SIZE);
-#else
- PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], BPF_TAILCALL_PROLOGUE_SIZE);
-#endif
- PPC_MTCTR(b2p[TMP_REG_1]);
+ EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_prog, bpf_func)));
+ EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1),
+ FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size));
+ EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
/* tear down stack, restore NVRs, ... */
bpf_jit_emit_common_epilogue(image, ctx);
- PPC_BCTR();
+ EMIT(PPC_RAW_BCTR());
+
/* out: */
+ return 0;
}
+/*
+ * We spill into the redzone always, even if the bpf program has its own stackframe.
+ * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
+ */
+void bpf_stf_barrier(void);
+
+asm (
+" .global bpf_stf_barrier ;"
+" bpf_stf_barrier: ;"
+" std 21,-64(1) ;"
+" std 22,-56(1) ;"
+" sync ;"
+" ld 21,-64(1) ;"
+" ld 22,-56(1) ;"
+" ori 31,31,0 ;"
+" .rept 14 ;"
+" b 1f ;"
+" 1: ;"
+" .endr ;"
+" blr ;"
+);
+
/* Assemble the body code between the prologue & epilogue */
-static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
- struct codegen_context *ctx,
- u32 *addrs, bool extra_pass)
+int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx,
+ u32 *addrs, int pass, bool extra_pass)
{
+ enum stf_barrier_type stf_barrier = stf_barrier_type_get();
const struct bpf_insn *insn = fp->insnsi;
int flen = fp->len;
int i, ret;
@@ -302,8 +374,12 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
for (i = 0; i < flen; i++) {
u32 code = insn[i].code;
- u32 dst_reg = b2p[insn[i].dst_reg];
- u32 src_reg = b2p[insn[i].src_reg];
+ u32 dst_reg = bpf_to_ppc(insn[i].dst_reg);
+ u32 src_reg = bpf_to_ppc(insn[i].src_reg);
+ u32 size = BPF_SIZE(code);
+ u32 tmp1_reg = bpf_to_ppc(TMP_REG_1);
+ u32 tmp2_reg = bpf_to_ppc(TMP_REG_2);
+ u32 save_reg, ret_reg;
s16 off = insn[i].off;
s32 imm = insn[i].imm;
bool func_addr_fixed;
@@ -311,6 +387,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
u64 imm64;
u32 true_cond;
u32 tmp_idx;
+ int j;
/*
* addrs[] maps a BPF bytecode address into a real offset from
@@ -330,9 +407,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
* any issues.
*/
if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
- bpf_set_seen_register(ctx, insn[i].dst_reg);
+ bpf_set_seen_register(ctx, dst_reg);
if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
- bpf_set_seen_register(ctx, insn[i].src_reg);
+ bpf_set_seen_register(ctx, src_reg);
switch (code) {
/*
@@ -340,67 +417,70 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
*/
case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
- PPC_ADD(dst_reg, dst_reg, src_reg);
+ EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
goto bpf_alu32_trunc;
case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
- PPC_SUB(dst_reg, dst_reg, src_reg);
+ EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
goto bpf_alu32_trunc;
case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
- case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
+ if (!imm) {
+ goto bpf_alu32_trunc;
+ } else if (imm >= -32768 && imm < 32768) {
+ EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
+ } else {
+ PPC_LI32(tmp1_reg, imm);
+ EMIT(PPC_RAW_ADD(dst_reg, dst_reg, tmp1_reg));
+ }
+ goto bpf_alu32_trunc;
+ case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
- if (BPF_OP(code) == BPF_SUB)
- imm = -imm;
- if (imm) {
- if (imm >= -32768 && imm < 32768)
- PPC_ADDI(dst_reg, dst_reg, IMM_L(imm));
- else {
- PPC_LI32(b2p[TMP_REG_1], imm);
- PPC_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]);
- }
+ if (!imm) {
+ goto bpf_alu32_trunc;
+ } else if (imm > -32768 && imm <= 32768) {
+ EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
+ } else {
+ PPC_LI32(tmp1_reg, imm);
+ EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
}
goto bpf_alu32_trunc;
case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
if (BPF_CLASS(code) == BPF_ALU)
- PPC_MULW(dst_reg, dst_reg, src_reg);
+ EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
else
- PPC_MULD(dst_reg, dst_reg, src_reg);
+ EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg));
goto bpf_alu32_trunc;
case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
if (imm >= -32768 && imm < 32768)
- PPC_MULI(dst_reg, dst_reg, IMM_L(imm));
+ EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm)));
else {
- PPC_LI32(b2p[TMP_REG_1], imm);
+ PPC_LI32(tmp1_reg, imm);
if (BPF_CLASS(code) == BPF_ALU)
- PPC_MULW(dst_reg, dst_reg,
- b2p[TMP_REG_1]);
+ EMIT(PPC_RAW_MULW(dst_reg, dst_reg, tmp1_reg));
else
- PPC_MULD(dst_reg, dst_reg,
- b2p[TMP_REG_1]);
+ EMIT(PPC_RAW_MULD(dst_reg, dst_reg, tmp1_reg));
}
goto bpf_alu32_trunc;
case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
if (BPF_OP(code) == BPF_MOD) {
- PPC_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg);
- PPC_MULW(b2p[TMP_REG_1], src_reg,
- b2p[TMP_REG_1]);
- PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
+ EMIT(PPC_RAW_DIVWU(tmp1_reg, dst_reg, src_reg));
+ EMIT(PPC_RAW_MULW(tmp1_reg, src_reg, tmp1_reg));
+ EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
} else
- PPC_DIVWU(dst_reg, dst_reg, src_reg);
+ EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
goto bpf_alu32_trunc;
case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
if (BPF_OP(code) == BPF_MOD) {
- PPC_DIVDU(b2p[TMP_REG_1], dst_reg, src_reg);
- PPC_MULD(b2p[TMP_REG_1], src_reg,
- b2p[TMP_REG_1]);
- PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
+ EMIT(PPC_RAW_DIVDU(tmp1_reg, dst_reg, src_reg));
+ EMIT(PPC_RAW_MULD(tmp1_reg, src_reg, tmp1_reg));
+ EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
} else
- PPC_DIVDU(dst_reg, dst_reg, src_reg);
+ EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg));
break;
case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
@@ -408,42 +488,38 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
if (imm == 0)
return -EINVAL;
- else if (imm == 1)
- goto bpf_alu32_trunc;
+ if (imm == 1) {
+ if (BPF_OP(code) == BPF_DIV) {
+ goto bpf_alu32_trunc;
+ } else {
+ EMIT(PPC_RAW_LI(dst_reg, 0));
+ break;
+ }
+ }
- PPC_LI32(b2p[TMP_REG_1], imm);
+ PPC_LI32(tmp1_reg, imm);
switch (BPF_CLASS(code)) {
case BPF_ALU:
if (BPF_OP(code) == BPF_MOD) {
- PPC_DIVWU(b2p[TMP_REG_2], dst_reg,
- b2p[TMP_REG_1]);
- PPC_MULW(b2p[TMP_REG_1],
- b2p[TMP_REG_1],
- b2p[TMP_REG_2]);
- PPC_SUB(dst_reg, dst_reg,
- b2p[TMP_REG_1]);
+ EMIT(PPC_RAW_DIVWU(tmp2_reg, dst_reg, tmp1_reg));
+ EMIT(PPC_RAW_MULW(tmp1_reg, tmp1_reg, tmp2_reg));
+ EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
} else
- PPC_DIVWU(dst_reg, dst_reg,
- b2p[TMP_REG_1]);
+ EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, tmp1_reg));
break;
case BPF_ALU64:
if (BPF_OP(code) == BPF_MOD) {
- PPC_DIVDU(b2p[TMP_REG_2], dst_reg,
- b2p[TMP_REG_1]);
- PPC_MULD(b2p[TMP_REG_1],
- b2p[TMP_REG_1],
- b2p[TMP_REG_2]);
- PPC_SUB(dst_reg, dst_reg,
- b2p[TMP_REG_1]);
+ EMIT(PPC_RAW_DIVDU(tmp2_reg, dst_reg, tmp1_reg));
+ EMIT(PPC_RAW_MULD(tmp1_reg, tmp1_reg, tmp2_reg));
+ EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
} else
- PPC_DIVDU(dst_reg, dst_reg,
- b2p[TMP_REG_1]);
+ EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, tmp1_reg));
break;
}
goto bpf_alu32_trunc;
case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
case BPF_ALU64 | BPF_NEG: /* dst = -dst */
- PPC_NEG(dst_reg, dst_reg);
+ EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
goto bpf_alu32_trunc;
/*
@@ -451,101 +527,101 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
*/
case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
- PPC_AND(dst_reg, dst_reg, src_reg);
+ EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
goto bpf_alu32_trunc;
case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
if (!IMM_H(imm))
- PPC_ANDI(dst_reg, dst_reg, IMM_L(imm));
+ EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
else {
/* Sign-extended */
- PPC_LI32(b2p[TMP_REG_1], imm);
- PPC_AND(dst_reg, dst_reg, b2p[TMP_REG_1]);
+ PPC_LI32(tmp1_reg, imm);
+ EMIT(PPC_RAW_AND(dst_reg, dst_reg, tmp1_reg));
}
goto bpf_alu32_trunc;
case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
- PPC_OR(dst_reg, dst_reg, src_reg);
+ EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
goto bpf_alu32_trunc;
case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
/* Sign-extended */
- PPC_LI32(b2p[TMP_REG_1], imm);
- PPC_OR(dst_reg, dst_reg, b2p[TMP_REG_1]);
+ PPC_LI32(tmp1_reg, imm);
+ EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp1_reg));
} else {
if (IMM_L(imm))
- PPC_ORI(dst_reg, dst_reg, IMM_L(imm));
+ EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
if (IMM_H(imm))
- PPC_ORIS(dst_reg, dst_reg, IMM_H(imm));
+ EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
}
goto bpf_alu32_trunc;
case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
- PPC_XOR(dst_reg, dst_reg, src_reg);
+ EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
goto bpf_alu32_trunc;
case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
/* Sign-extended */
- PPC_LI32(b2p[TMP_REG_1], imm);
- PPC_XOR(dst_reg, dst_reg, b2p[TMP_REG_1]);
+ PPC_LI32(tmp1_reg, imm);
+ EMIT(PPC_RAW_XOR(dst_reg, dst_reg, tmp1_reg));
} else {
if (IMM_L(imm))
- PPC_XORI(dst_reg, dst_reg, IMM_L(imm));
+ EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
if (IMM_H(imm))
- PPC_XORIS(dst_reg, dst_reg, IMM_H(imm));
+ EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
}
goto bpf_alu32_trunc;
case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
/* slw clears top 32 bits */
- PPC_SLW(dst_reg, dst_reg, src_reg);
+ EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
/* skip zero extension move, but set address map. */
if (insn_is_zext(&insn[i + 1]))
addrs[++i] = ctx->idx * 4;
break;
case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
- PPC_SLD(dst_reg, dst_reg, src_reg);
+ EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg));
break;
case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
/* with imm 0, we still need to clear top 32 bits */
- PPC_SLWI(dst_reg, dst_reg, imm);
+ EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
if (insn_is_zext(&insn[i + 1]))
addrs[++i] = ctx->idx * 4;
break;
case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
if (imm != 0)
- PPC_SLDI(dst_reg, dst_reg, imm);
+ EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm));
break;
case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
- PPC_SRW(dst_reg, dst_reg, src_reg);
+ EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
if (insn_is_zext(&insn[i + 1]))
addrs[++i] = ctx->idx * 4;
break;
case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
- PPC_SRD(dst_reg, dst_reg, src_reg);
+ EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg));
break;
case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
- PPC_SRWI(dst_reg, dst_reg, imm);
+ EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
if (insn_is_zext(&insn[i + 1]))
addrs[++i] = ctx->idx * 4;
break;
case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
if (imm != 0)
- PPC_SRDI(dst_reg, dst_reg, imm);
+ EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm));
break;
case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
- PPC_SRAW(dst_reg, dst_reg, src_reg);
+ EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
goto bpf_alu32_trunc;
case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
- PPC_SRAD(dst_reg, dst_reg, src_reg);
+ EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg));
break;
case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
- PPC_SRAWI(dst_reg, dst_reg, imm);
+ EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
goto bpf_alu32_trunc;
case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
if (imm != 0)
- PPC_SRADI(dst_reg, dst_reg, imm);
+ EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm));
break;
/*
@@ -555,10 +631,10 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
if (imm == 1) {
/* special mov32 for zext */
- PPC_RLWINM(dst_reg, dst_reg, 0, 0, 31);
+ EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
break;
}
- PPC_MR(dst_reg, src_reg);
+ EMIT(PPC_RAW_MR(dst_reg, src_reg));
goto bpf_alu32_trunc;
case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
@@ -572,7 +648,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
bpf_alu32_trunc:
/* Truncate to 32-bits */
if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext)
- PPC_RLWINM(dst_reg, dst_reg, 0, 0, 31);
+ EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
break;
/*
@@ -590,11 +666,11 @@ bpf_alu32_trunc:
switch (imm) {
case 16:
/* Rotate 8 bits left & mask with 0x0000ff00 */
- PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 16, 23);
+ EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 16, 23));
/* Rotate 8 bits right & insert LSB to reg */
- PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 24, 31);
+ EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 24, 31));
/* Move result back to dst_reg */
- PPC_MR(dst_reg, b2p[TMP_REG_1]);
+ EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
break;
case 32:
/*
@@ -602,25 +678,29 @@ bpf_alu32_trunc:
* 2 bytes are already in their final position
* -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
*/
- PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 0, 31);
+ EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 0, 31));
/* Rotate 24 bits and insert byte 1 */
- PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 0, 7);
+ EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 0, 7));
/* Rotate 24 bits and insert byte 3 */
- PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 16, 23);
- PPC_MR(dst_reg, b2p[TMP_REG_1]);
+ EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 16, 23));
+ EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
break;
case 64:
- /*
- * Way easier and faster(?) to store the value
- * into stack and then use ldbrx
- *
- * ctx->seen will be reliable in pass2, but
- * the instructions generated will remain the
- * same across all passes
- */
- PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx));
- PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx));
- PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]);
+ /* Store the value to stack and then use byte-reverse loads */
+ EMIT(PPC_RAW_STD(dst_reg, _R1, bpf_jit_stack_local(ctx)));
+ EMIT(PPC_RAW_ADDI(tmp1_reg, _R1, bpf_jit_stack_local(ctx)));
+ if (cpu_has_feature(CPU_FTR_ARCH_206)) {
+ EMIT(PPC_RAW_LDBRX(dst_reg, 0, tmp1_reg));
+ } else {
+ EMIT(PPC_RAW_LWBRX(dst_reg, 0, tmp1_reg));
+ if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
+ EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32));
+ EMIT(PPC_RAW_LI(tmp2_reg, 4));
+ EMIT(PPC_RAW_LWBRX(tmp2_reg, tmp2_reg, tmp1_reg));
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ EMIT(PPC_RAW_SLDI(tmp2_reg, tmp2_reg, 32));
+ EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp2_reg));
+ }
break;
}
break;
@@ -629,14 +709,14 @@ emit_clear:
switch (imm) {
case 16:
/* zero-extend 16 bits into 64 bits */
- PPC_RLDICL(dst_reg, dst_reg, 0, 48);
+ EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48));
if (insn_is_zext(&insn[i + 1]))
addrs[++i] = ctx->idx * 4;
break;
case 32:
if (!fp->aux->verifier_zext)
/* zero-extend 32 bits into 64 bits */
- PPC_RLDICL(dst_reg, dst_reg, 0, 32);
+ EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32));
break;
case 64:
/* nop */
@@ -645,66 +725,155 @@ emit_clear:
break;
/*
+ * BPF_ST NOSPEC (speculation barrier)
+ */
+ case BPF_ST | BPF_NOSPEC:
+ if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
+ !security_ftr_enabled(SEC_FTR_STF_BARRIER))
+ break;
+
+ switch (stf_barrier) {
+ case STF_BARRIER_EIEIO:
+ EMIT(PPC_RAW_EIEIO() | 0x02000000);
+ break;
+ case STF_BARRIER_SYNC_ORI:
+ EMIT(PPC_RAW_SYNC());
+ EMIT(PPC_RAW_LD(tmp1_reg, _R13, 0));
+ EMIT(PPC_RAW_ORI(_R31, _R31, 0));
+ break;
+ case STF_BARRIER_FALLBACK:
+ ctx->seen |= SEEN_FUNC;
+ PPC_LI64(_R12, dereference_kernel_function_descriptor(bpf_stf_barrier));
+ EMIT(PPC_RAW_MTCTR(_R12));
+ EMIT(PPC_RAW_BCTRL());
+ break;
+ case STF_BARRIER_NONE:
+ break;
+ }
+ break;
+
+ /*
* BPF_ST(X)
*/
case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
if (BPF_CLASS(code) == BPF_ST) {
- PPC_LI(b2p[TMP_REG_1], imm);
- src_reg = b2p[TMP_REG_1];
+ EMIT(PPC_RAW_LI(tmp1_reg, imm));
+ src_reg = tmp1_reg;
}
- PPC_STB(src_reg, dst_reg, off);
+ EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
break;
case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
if (BPF_CLASS(code) == BPF_ST) {
- PPC_LI(b2p[TMP_REG_1], imm);
- src_reg = b2p[TMP_REG_1];
+ EMIT(PPC_RAW_LI(tmp1_reg, imm));
+ src_reg = tmp1_reg;
}
- PPC_STH(src_reg, dst_reg, off);
+ EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
break;
case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
if (BPF_CLASS(code) == BPF_ST) {
- PPC_LI32(b2p[TMP_REG_1], imm);
- src_reg = b2p[TMP_REG_1];
+ PPC_LI32(tmp1_reg, imm);
+ src_reg = tmp1_reg;
}
- PPC_STW(src_reg, dst_reg, off);
+ EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
break;
case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
if (BPF_CLASS(code) == BPF_ST) {
- PPC_LI32(b2p[TMP_REG_1], imm);
- src_reg = b2p[TMP_REG_1];
+ PPC_LI32(tmp1_reg, imm);
+ src_reg = tmp1_reg;
+ }
+ if (off % 4) {
+ EMIT(PPC_RAW_LI(tmp2_reg, off));
+ EMIT(PPC_RAW_STDX(src_reg, dst_reg, tmp2_reg));
+ } else {
+ EMIT(PPC_RAW_STD(src_reg, dst_reg, off));
}
- PPC_BPF_STL(src_reg, dst_reg, off);
break;
/*
- * BPF_STX XADD (atomic_add)
+ * BPF_STX ATOMIC (atomic ops)
*/
- /* *(u32 *)(dst + off) += src */
- case BPF_STX | BPF_XADD | BPF_W:
- /* Get EA into TMP_REG_1 */
- PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
+ case BPF_STX | BPF_ATOMIC | BPF_W:
+ case BPF_STX | BPF_ATOMIC | BPF_DW:
+ save_reg = tmp2_reg;
+ ret_reg = src_reg;
+
+ /* Get offset into TMP_REG_1 */
+ EMIT(PPC_RAW_LI(tmp1_reg, off));
tmp_idx = ctx->idx * 4;
/* load value from memory into TMP_REG_2 */
- PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
- /* add value from src_reg into this */
- PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
- /* store result back */
- PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
+ if (size == BPF_DW)
+ EMIT(PPC_RAW_LDARX(tmp2_reg, tmp1_reg, dst_reg, 0));
+ else
+ EMIT(PPC_RAW_LWARX(tmp2_reg, tmp1_reg, dst_reg, 0));
+
+ /* Save old value in _R0 */
+ if (imm & BPF_FETCH)
+ EMIT(PPC_RAW_MR(_R0, tmp2_reg));
+
+ switch (imm) {
+ case BPF_ADD:
+ case BPF_ADD | BPF_FETCH:
+ EMIT(PPC_RAW_ADD(tmp2_reg, tmp2_reg, src_reg));
+ break;
+ case BPF_AND:
+ case BPF_AND | BPF_FETCH:
+ EMIT(PPC_RAW_AND(tmp2_reg, tmp2_reg, src_reg));
+ break;
+ case BPF_OR:
+ case BPF_OR | BPF_FETCH:
+ EMIT(PPC_RAW_OR(tmp2_reg, tmp2_reg, src_reg));
+ break;
+ case BPF_XOR:
+ case BPF_XOR | BPF_FETCH:
+ EMIT(PPC_RAW_XOR(tmp2_reg, tmp2_reg, src_reg));
+ break;
+ case BPF_CMPXCHG:
+ /*
+ * Return old value in BPF_REG_0 for BPF_CMPXCHG &
+ * in src_reg for other cases.
+ */
+ ret_reg = bpf_to_ppc(BPF_REG_0);
+
+ /* Compare with old value in BPF_R0 */
+ if (size == BPF_DW)
+ EMIT(PPC_RAW_CMPD(bpf_to_ppc(BPF_REG_0), tmp2_reg));
+ else
+ EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), tmp2_reg));
+ /* Don't set if different from old value */
+ PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4);
+ fallthrough;
+ case BPF_XCHG:
+ save_reg = src_reg;
+ break;
+ default:
+ pr_err_ratelimited(
+ "eBPF filter atomic op code %02x (@%d) unsupported\n",
+ code, i);
+ return -EOPNOTSUPP;
+ }
+
+ /* store new value */
+ if (size == BPF_DW)
+ EMIT(PPC_RAW_STDCX(save_reg, tmp1_reg, dst_reg));
+ else
+ EMIT(PPC_RAW_STWCX(save_reg, tmp1_reg, dst_reg));
/* we're done if this succeeded */
PPC_BCC_SHORT(COND_NE, tmp_idx);
- break;
- /* *(u64 *)(dst + off) += src */
- case BPF_STX | BPF_XADD | BPF_DW:
- PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
- tmp_idx = ctx->idx * 4;
- PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
- PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
- PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
- PPC_BCC_SHORT(COND_NE, tmp_idx);
+
+ if (imm & BPF_FETCH) {
+ EMIT(PPC_RAW_MR(ret_reg, _R0));
+ /*
+ * Skip unnecessary zero-extension for 32-bit cmpxchg.
+ * For context, see commit 39491867ace5.
+ */
+ if (size != BPF_DW && imm == BPF_CMPXCHG &&
+ insn_is_zext(&insn[i + 1]))
+ addrs[++i] = ctx->idx * 4;
+ }
break;
/*
@@ -712,25 +881,70 @@ emit_clear:
*/
/* dst = *(u8 *)(ul) (src + off) */
case BPF_LDX | BPF_MEM | BPF_B:
- PPC_LBZ(dst_reg, src_reg, off);
- if (insn_is_zext(&insn[i + 1]))
- addrs[++i] = ctx->idx * 4;
- break;
+ case BPF_LDX | BPF_PROBE_MEM | BPF_B:
/* dst = *(u16 *)(ul) (src + off) */
case BPF_LDX | BPF_MEM | BPF_H:
- PPC_LHZ(dst_reg, src_reg, off);
- if (insn_is_zext(&insn[i + 1]))
- addrs[++i] = ctx->idx * 4;
- break;
+ case BPF_LDX | BPF_PROBE_MEM | BPF_H:
/* dst = *(u32 *)(ul) (src + off) */
case BPF_LDX | BPF_MEM | BPF_W:
- PPC_LWZ(dst_reg, src_reg, off);
- if (insn_is_zext(&insn[i + 1]))
- addrs[++i] = ctx->idx * 4;
- break;
+ case BPF_LDX | BPF_PROBE_MEM | BPF_W:
/* dst = *(u64 *)(ul) (src + off) */
case BPF_LDX | BPF_MEM | BPF_DW:
- PPC_BPF_LL(dst_reg, src_reg, off);
+ case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
+ /*
+ * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
+ * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
+ * load only if addr is kernel address (see is_kernel_addr()), otherwise
+ * set dst_reg=0 and move on.
+ */
+ if (BPF_MODE(code) == BPF_PROBE_MEM) {
+ EMIT(PPC_RAW_ADDI(tmp1_reg, src_reg, off));
+ if (IS_ENABLED(CONFIG_PPC_BOOK3E_64))
+ PPC_LI64(tmp2_reg, 0x8000000000000000ul);
+ else /* BOOK3S_64 */
+ PPC_LI64(tmp2_reg, PAGE_OFFSET);
+ EMIT(PPC_RAW_CMPLD(tmp1_reg, tmp2_reg));
+ PPC_BCC_SHORT(COND_GT, (ctx->idx + 3) * 4);
+ EMIT(PPC_RAW_LI(dst_reg, 0));
+ /*
+ * Check if 'off' is word aligned for BPF_DW, because
+ * we might generate two instructions.
+ */
+ if (BPF_SIZE(code) == BPF_DW && (off & 3))
+ PPC_JMP((ctx->idx + 3) * 4);
+ else
+ PPC_JMP((ctx->idx + 2) * 4);
+ }
+
+ switch (size) {
+ case BPF_B:
+ EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
+ break;
+ case BPF_H:
+ EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
+ break;
+ case BPF_W:
+ EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
+ break;
+ case BPF_DW:
+ if (off % 4) {
+ EMIT(PPC_RAW_LI(tmp1_reg, off));
+ EMIT(PPC_RAW_LDX(dst_reg, src_reg, tmp1_reg));
+ } else {
+ EMIT(PPC_RAW_LD(dst_reg, src_reg, off));
+ }
+ break;
+ }
+
+ if (size != BPF_DW && insn_is_zext(&insn[i + 1]))
+ addrs[++i] = ctx->idx * 4;
+
+ if (BPF_MODE(code) == BPF_PROBE_MEM) {
+ ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx,
+ ctx->idx - 1, 4, dst_reg);
+ if (ret)
+ return ret;
+ }
break;
/*
@@ -740,9 +954,14 @@ emit_clear:
case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
imm64 = ((u64)(u32) insn[i].imm) |
(((u64)(u32) insn[i+1].imm) << 32);
+ tmp_idx = ctx->idx;
+ PPC_LI64(dst_reg, imm64);
+ /* padding to allow full 5 instructions for later patching */
+ if (!image)
+ for (j = ctx->idx - tmp_idx; j < 5; j++)
+ EMIT(PPC_RAW_NOP());
/* Adjust for two bpf instructions */
addrs[++i] = ctx->idx * 4;
- PPC_LI64(dst_reg, imm64);
break;
/*
@@ -754,8 +973,11 @@ emit_clear:
* the epilogue. If we _are_ the last instruction,
* we'll just fall through to the epilogue.
*/
- if (i != flen - 1)
- PPC_JMP(exit_addr);
+ if (i != flen - 1) {
+ ret = bpf_jit_emit_exit_insn(image, ctx, tmp1_reg, exit_addr);
+ if (ret)
+ return ret;
+ }
/* else fall through to the epilogue */
break;
@@ -771,11 +993,15 @@ emit_clear:
return ret;
if (func_addr_fixed)
- bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
+ ret = bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
else
- bpf_jit_emit_func_call_rel(image, ctx, func_addr);
+ ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr);
+
+ if (ret)
+ return ret;
+
/* move return value from r3 to BPF_REG_0 */
- PPC_MR(b2p[BPF_REG_0], 3);
+ EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0), _R3));
break;
/*
@@ -860,9 +1086,9 @@ cond_branch:
case BPF_JMP32 | BPF_JNE | BPF_X:
/* unsigned comparison */
if (BPF_CLASS(code) == BPF_JMP32)
- PPC_CMPLW(dst_reg, src_reg);
+ EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
else
- PPC_CMPLD(dst_reg, src_reg);
+ EMIT(PPC_RAW_CMPLD(dst_reg, src_reg));
break;
case BPF_JMP | BPF_JSGT | BPF_X:
case BPF_JMP | BPF_JSLT | BPF_X:
@@ -874,21 +1100,17 @@ cond_branch:
case BPF_JMP32 | BPF_JSLE | BPF_X:
/* signed comparison */
if (BPF_CLASS(code) == BPF_JMP32)
- PPC_CMPW(dst_reg, src_reg);
+ EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
else
- PPC_CMPD(dst_reg, src_reg);
+ EMIT(PPC_RAW_CMPD(dst_reg, src_reg));
break;
case BPF_JMP | BPF_JSET | BPF_X:
case BPF_JMP32 | BPF_JSET | BPF_X:
if (BPF_CLASS(code) == BPF_JMP) {
- PPC_AND_DOT(b2p[TMP_REG_1], dst_reg,
- src_reg);
+ EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg, src_reg));
} else {
- int tmp_reg = b2p[TMP_REG_1];
-
- PPC_AND(tmp_reg, dst_reg, src_reg);
- PPC_RLWINM_DOT(tmp_reg, tmp_reg, 0, 0,
- 31);
+ EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, src_reg));
+ EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg, 0, 0, 31));
}
break;
case BPF_JMP | BPF_JNE | BPF_K:
@@ -912,19 +1134,17 @@ cond_branch:
*/
if (imm >= 0 && imm < 32768) {
if (is_jmp32)
- PPC_CMPLWI(dst_reg, imm);
+ EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
else
- PPC_CMPLDI(dst_reg, imm);
+ EMIT(PPC_RAW_CMPLDI(dst_reg, imm));
} else {
/* sign-extending load */
- PPC_LI32(b2p[TMP_REG_1], imm);
+ PPC_LI32(tmp1_reg, imm);
/* ... but unsigned comparison */
if (is_jmp32)
- PPC_CMPLW(dst_reg,
- b2p[TMP_REG_1]);
+ EMIT(PPC_RAW_CMPLW(dst_reg, tmp1_reg));
else
- PPC_CMPLD(dst_reg,
- b2p[TMP_REG_1]);
+ EMIT(PPC_RAW_CMPLD(dst_reg, tmp1_reg));
}
break;
}
@@ -945,17 +1165,15 @@ cond_branch:
*/
if (imm >= -32768 && imm < 32768) {
if (is_jmp32)
- PPC_CMPWI(dst_reg, imm);
+ EMIT(PPC_RAW_CMPWI(dst_reg, imm));
else
- PPC_CMPDI(dst_reg, imm);
+ EMIT(PPC_RAW_CMPDI(dst_reg, imm));
} else {
- PPC_LI32(b2p[TMP_REG_1], imm);
+ PPC_LI32(tmp1_reg, imm);
if (is_jmp32)
- PPC_CMPW(dst_reg,
- b2p[TMP_REG_1]);
+ EMIT(PPC_RAW_CMPW(dst_reg, tmp1_reg));
else
- PPC_CMPD(dst_reg,
- b2p[TMP_REG_1]);
+ EMIT(PPC_RAW_CMPD(dst_reg, tmp1_reg));
}
break;
}
@@ -964,19 +1182,16 @@ cond_branch:
/* andi does not sign-extend the immediate */
if (imm >= 0 && imm < 32768)
/* PPC_ANDI is _only/always_ dot-form */
- PPC_ANDI(b2p[TMP_REG_1], dst_reg, imm);
+ EMIT(PPC_RAW_ANDI(tmp1_reg, dst_reg, imm));
else {
- int tmp_reg = b2p[TMP_REG_1];
-
- PPC_LI32(tmp_reg, imm);
+ PPC_LI32(tmp1_reg, imm);
if (BPF_CLASS(code) == BPF_JMP) {
- PPC_AND_DOT(tmp_reg, dst_reg,
- tmp_reg);
+ EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg,
+ tmp1_reg));
} else {
- PPC_AND(tmp_reg, dst_reg,
- tmp_reg);
- PPC_RLWINM_DOT(tmp_reg, tmp_reg,
- 0, 0, 31);
+ EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, tmp1_reg));
+ EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg,
+ 0, 0, 31));
}
}
break;
@@ -989,7 +1204,9 @@ cond_branch:
*/
case BPF_JMP | BPF_TAIL_CALL:
ctx->seen |= SEEN_TAILCALL;
- bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
+ ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
+ if (ret < 0)
+ return ret;
break;
default:
@@ -1009,249 +1226,3 @@ cond_branch:
return 0;
}
-
-/* Fix the branch target addresses for subprog calls */
-static int bpf_jit_fixup_subprog_calls(struct bpf_prog *fp, u32 *image,
- struct codegen_context *ctx, u32 *addrs)
-{
- const struct bpf_insn *insn = fp->insnsi;
- bool func_addr_fixed;
- u64 func_addr;
- u32 tmp_idx;
- int i, ret;
-
- for (i = 0; i < fp->len; i++) {
- /*
- * During the extra pass, only the branch target addresses for
- * the subprog calls need to be fixed. All other instructions
- * can left untouched.
- *
- * The JITed image length does not change because we already
- * ensure that the JITed instruction sequence for these calls
- * are of fixed length by padding them with NOPs.
- */
- if (insn[i].code == (BPF_JMP | BPF_CALL) &&
- insn[i].src_reg == BPF_PSEUDO_CALL) {
- ret = bpf_jit_get_func_addr(fp, &insn[i], true,
- &func_addr,
- &func_addr_fixed);
- if (ret < 0)
- return ret;
-
- /*
- * Save ctx->idx as this would currently point to the
- * end of the JITed image and set it to the offset of
- * the instruction sequence corresponding to the
- * subprog call temporarily.
- */
- tmp_idx = ctx->idx;
- ctx->idx = addrs[i] / 4;
- bpf_jit_emit_func_call_rel(image, ctx, func_addr);
-
- /*
- * Restore ctx->idx here. This is safe as the length
- * of the JITed sequence remains unchanged.
- */
- ctx->idx = tmp_idx;
- }
- }
-
- return 0;
-}
-
-struct powerpc64_jit_data {
- struct bpf_binary_header *header;
- u32 *addrs;
- u8 *image;
- u32 proglen;
- struct codegen_context ctx;
-};
-
-bool bpf_jit_needs_zext(void)
-{
- return true;
-}
-
-struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
-{
- u32 proglen;
- u32 alloclen;
- u8 *image = NULL;
- u32 *code_base;
- u32 *addrs;
- struct powerpc64_jit_data *jit_data;
- struct codegen_context cgctx;
- int pass;
- int flen;
- struct bpf_binary_header *bpf_hdr;
- struct bpf_prog *org_fp = fp;
- struct bpf_prog *tmp_fp;
- bool bpf_blinded = false;
- bool extra_pass = false;
-
- if (!fp->jit_requested)
- return org_fp;
-
- tmp_fp = bpf_jit_blind_constants(org_fp);
- if (IS_ERR(tmp_fp))
- return org_fp;
-
- if (tmp_fp != org_fp) {
- bpf_blinded = true;
- fp = tmp_fp;
- }
-
- jit_data = fp->aux->jit_data;
- if (!jit_data) {
- jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
- if (!jit_data) {
- fp = org_fp;
- goto out;
- }
- fp->aux->jit_data = jit_data;
- }
-
- flen = fp->len;
- addrs = jit_data->addrs;
- if (addrs) {
- cgctx = jit_data->ctx;
- image = jit_data->image;
- bpf_hdr = jit_data->header;
- proglen = jit_data->proglen;
- alloclen = proglen + FUNCTION_DESCR_SIZE;
- extra_pass = true;
- goto skip_init_ctx;
- }
-
- addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL);
- if (addrs == NULL) {
- fp = org_fp;
- goto out_addrs;
- }
-
- memset(&cgctx, 0, sizeof(struct codegen_context));
-
- /* Make sure that the stack is quadword aligned. */
- cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
-
- /* Scouting faux-generate pass 0 */
- if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) {
- /* We hit something illegal or unsupported. */
- fp = org_fp;
- goto out_addrs;
- }
-
- /*
- * If we have seen a tail call, we need a second pass.
- * This is because bpf_jit_emit_common_epilogue() is called
- * from bpf_jit_emit_tail_call() with a not yet stable ctx->seen.
- */
- if (cgctx.seen & SEEN_TAILCALL) {
- cgctx.idx = 0;
- if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) {
- fp = org_fp;
- goto out_addrs;
- }
- }
-
- /*
- * Pretend to build prologue, given the features we've seen. This will
- * update ctgtx.idx as it pretends to output instructions, then we can
- * calculate total size from idx.
- */
- bpf_jit_build_prologue(0, &cgctx);
- bpf_jit_build_epilogue(0, &cgctx);
-
- proglen = cgctx.idx * 4;
- alloclen = proglen + FUNCTION_DESCR_SIZE;
-
- bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4,
- bpf_jit_fill_ill_insns);
- if (!bpf_hdr) {
- fp = org_fp;
- goto out_addrs;
- }
-
-skip_init_ctx:
- code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
-
- if (extra_pass) {
- /*
- * Do not touch the prologue and epilogue as they will remain
- * unchanged. Only fix the branch target address for subprog
- * calls in the body.
- *
- * This does not change the offsets and lengths of the subprog
- * call instruction sequences and hence, the size of the JITed
- * image as well.
- */
- bpf_jit_fixup_subprog_calls(fp, code_base, &cgctx, addrs);
-
- /* There is no need to perform the usual passes. */
- goto skip_codegen_passes;
- }
-
- /* Code generation passes 1-2 */
- for (pass = 1; pass < 3; pass++) {
- /* Now build the prologue, body code & epilogue for real. */
- cgctx.idx = 0;
- bpf_jit_build_prologue(code_base, &cgctx);
- bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass);
- bpf_jit_build_epilogue(code_base, &cgctx);
-
- if (bpf_jit_enable > 1)
- pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
- proglen - (cgctx.idx * 4), cgctx.seen);
- }
-
-skip_codegen_passes:
- if (bpf_jit_enable > 1)
- /*
- * Note that we output the base address of the code_base
- * rather than image, since opcodes are in code_base.
- */
- bpf_jit_dump(flen, proglen, pass, code_base);
-
-#ifdef PPC64_ELF_ABI_v1
- /* Function descriptor nastiness: Address + TOC */
- ((u64 *)image)[0] = (u64)code_base;
- ((u64 *)image)[1] = local_paca->kernel_toc;
-#endif
-
- fp->bpf_func = (void *)image;
- fp->jited = 1;
- fp->jited_len = alloclen;
-
- bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE));
- if (!fp->is_func || extra_pass) {
- bpf_prog_fill_jited_linfo(fp, addrs);
-out_addrs:
- kfree(addrs);
- kfree(jit_data);
- fp->aux->jit_data = NULL;
- } else {
- jit_data->addrs = addrs;
- jit_data->ctx = cgctx;
- jit_data->proglen = proglen;
- jit_data->image = image;
- jit_data->header = bpf_hdr;
- }
-
-out:
- if (bpf_blinded)
- bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp);
-
- return fp;
-}
-
-/* Overriding bpf_jit_free() as we don't set images read-only. */
-void bpf_jit_free(struct bpf_prog *fp)
-{
- unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
- struct bpf_binary_header *bpf_hdr = (void *)addr;
-
- if (fp->jited)
- bpf_jit_binary_free(bpf_hdr);
-
- bpf_prog_unlock_free(fp);
-}