diff options
Diffstat (limited to 'arch/powerpc/net')
| -rw-r--r-- | arch/powerpc/net/Makefile | 3 | ||||
| -rw-r--r-- | arch/powerpc/net/bpf_jit.h | 329 | ||||
| -rw-r--r-- | arch/powerpc/net/bpf_jit_64.S | 222 | ||||
| -rw-r--r-- | arch/powerpc/net/bpf_jit_comp.c | 1767 | ||||
| -rw-r--r-- | arch/powerpc/net/bpf_jit_comp32.c | 1388 | ||||
| -rw-r--r-- | arch/powerpc/net/bpf_jit_comp64.c | 1630 |
6 files changed, 4322 insertions, 1017 deletions
diff --git a/arch/powerpc/net/Makefile b/arch/powerpc/net/Makefile index 266b3950c3ac..8e60af32e51e 100644 --- a/arch/powerpc/net/Makefile +++ b/arch/powerpc/net/Makefile @@ -1,4 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0 # # Arch-specific network modules # -obj-$(CONFIG_BPF_JIT) += bpf_jit_64.o bpf_jit_comp.o +obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o bpf_jit_comp$(BITS).o diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index 8a5dfaf5c6b7..8334cd667bba 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -1,195 +1,117 @@ -/* bpf_jit.h: BPF JIT compiler for PPC64 +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * bpf_jit.h: BPF JIT compiler for PPC * * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; version 2 - * of the License. + * 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> */ #ifndef _BPF_JIT_H #define _BPF_JIT_H -#define BPF_PPC_STACK_LOCALS 32 -#define BPF_PPC_STACK_BASIC (48+64) -#define BPF_PPC_STACK_SAVE (18*8) -#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \ - BPF_PPC_STACK_SAVE) -#define BPF_PPC_SLOWPATH_FRAME (48+64) - -/* - * Generated code register usage: - * - * As normal PPC C ABI (e.g. r1=sp, r2=TOC), with: - * - * skb r3 (Entry parameter) - * A register r4 - * X register r5 - * addr param r6 - * r7-r10 scratch - * skb->data r14 - * skb headlen r15 (skb->len - skb->data_len) - * m[0] r16 - * m[...] ... - * m[15] r31 - */ -#define r_skb 3 -#define r_ret 3 -#define r_A 4 -#define r_X 5 -#define r_addr 6 -#define r_scratch1 7 -#define r_D 14 -#define r_HL 15 -#define r_M 16 - -#ifndef __ASSEMBLY__ - -/* - * Assembly helpers from arch/powerpc/net/bpf_jit.S: - */ -#define DECLARE_LOAD_FUNC(func) \ - extern u8 func[], func##_negative_offset[], func##_positive_offset[] +#ifndef __ASSEMBLER__ -DECLARE_LOAD_FUNC(sk_load_word); -DECLARE_LOAD_FUNC(sk_load_half); -DECLARE_LOAD_FUNC(sk_load_byte); -DECLARE_LOAD_FUNC(sk_load_byte_msh); +#include <asm/types.h> +#include <asm/ppc-opcode.h> +#include <linux/build_bug.h> +#ifdef CONFIG_PPC64_ELF_ABI_V1 #define FUNCTION_DESCR_SIZE 24 +#else +#define FUNCTION_DESCR_SIZE 0 +#endif -/* - * 16-bit immediate helper macros: HA() is for use with sign-extending instrs - * (e.g. LD, ADDI). If the bottom 16 bits is "-ve", add another bit into the - * top half to negate the effect (i.e. 0xffff + 1 = 0x(1)0000). - */ -#define IMM_H(i) ((uintptr_t)(i)>>16) -#define IMM_HA(i) (((uintptr_t)(i)>>16) + \ - (((uintptr_t)(i) & 0x8000) >> 15)) -#define IMM_L(i) ((uintptr_t)(i) & 0xffff) +#define CTX_NIA(ctx) ((unsigned long)ctx->idx * 4) + +#define SZL sizeof(unsigned long) +#define BPF_INSN_SAFETY 64 #define PLANT_INSTR(d, idx, instr) \ do { if (d) { (d)[idx] = instr; } idx++; } while (0) #define EMIT(instr) PLANT_INSTR(image, ctx->idx, instr) -#define PPC_NOP() EMIT(PPC_INST_NOP) -#define PPC_BLR() EMIT(PPC_INST_BLR) -#define PPC_BLRL() EMIT(PPC_INST_BLRL) -#define PPC_MTLR(r) EMIT(PPC_INST_MTLR | ___PPC_RT(r)) -#define PPC_ADDI(d, a, i) EMIT(PPC_INST_ADDI | ___PPC_RT(d) | \ - ___PPC_RA(a) | IMM_L(i)) -#define PPC_MR(d, a) PPC_OR(d, a, a) -#define PPC_LI(r, i) PPC_ADDI(r, 0, i) -#define PPC_ADDIS(d, a, i) EMIT(PPC_INST_ADDIS | \ - ___PPC_RS(d) | ___PPC_RA(a) | IMM_L(i)) -#define PPC_LIS(r, i) PPC_ADDIS(r, 0, i) -#define PPC_STD(r, base, i) EMIT(PPC_INST_STD | ___PPC_RS(r) | \ - ___PPC_RA(base) | ((i) & 0xfffc)) - -#define PPC_LD(r, base, i) EMIT(PPC_INST_LD | ___PPC_RT(r) | \ - ___PPC_RA(base) | IMM_L(i)) -#define PPC_LWZ(r, base, i) EMIT(PPC_INST_LWZ | ___PPC_RT(r) | \ - ___PPC_RA(base) | IMM_L(i)) -#define PPC_LHZ(r, base, i) EMIT(PPC_INST_LHZ | ___PPC_RT(r) | \ - ___PPC_RA(base) | IMM_L(i)) -/* Convenience helpers for the above with 'far' offsets: */ -#define PPC_LD_OFFS(r, base, i) do { if ((i) < 32768) PPC_LD(r, base, i); \ - else { PPC_ADDIS(r, base, IMM_HA(i)); \ - PPC_LD(r, r, IMM_L(i)); } } while(0) - -#define PPC_LWZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LWZ(r, base, i); \ - else { PPC_ADDIS(r, base, IMM_HA(i)); \ - PPC_LWZ(r, r, IMM_L(i)); } } while(0) - -#define PPC_LHZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LHZ(r, base, i); \ - else { PPC_ADDIS(r, base, IMM_HA(i)); \ - PPC_LHZ(r, r, IMM_L(i)); } } while(0) - -#define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i)) -#define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i)) -#define PPC_CMPLWI(a, i) EMIT(PPC_INST_CMPLWI | ___PPC_RA(a) | IMM_L(i)) -#define PPC_CMPLW(a, b) EMIT(PPC_INST_CMPLW | ___PPC_RA(a) | ___PPC_RB(b)) - -#define PPC_SUB(d, a, b) EMIT(PPC_INST_SUB | ___PPC_RT(d) | \ - ___PPC_RB(a) | ___PPC_RA(b)) -#define PPC_ADD(d, a, b) EMIT(PPC_INST_ADD | ___PPC_RT(d) | \ - ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_MUL(d, a, b) EMIT(PPC_INST_MULLW | ___PPC_RT(d) | \ - ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_MULHWU(d, a, b) EMIT(PPC_INST_MULHWU | ___PPC_RT(d) | \ - ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_MULI(d, a, i) EMIT(PPC_INST_MULLI | ___PPC_RT(d) | \ - ___PPC_RA(a) | IMM_L(i)) -#define PPC_DIVWU(d, a, b) EMIT(PPC_INST_DIVWU | ___PPC_RT(d) | \ - ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_AND(d, a, b) EMIT(PPC_INST_AND | ___PPC_RA(d) | \ - ___PPC_RS(a) | ___PPC_RB(b)) -#define PPC_ANDI(d, a, i) EMIT(PPC_INST_ANDI | ___PPC_RA(d) | \ - ___PPC_RS(a) | IMM_L(i)) -#define PPC_AND_DOT(d, a, b) EMIT(PPC_INST_ANDDOT | ___PPC_RA(d) | \ - ___PPC_RS(a) | ___PPC_RB(b)) -#define PPC_OR(d, a, b) EMIT(PPC_INST_OR | ___PPC_RA(d) | \ - ___PPC_RS(a) | ___PPC_RB(b)) -#define PPC_ORI(d, a, i) EMIT(PPC_INST_ORI | ___PPC_RA(d) | \ - ___PPC_RS(a) | IMM_L(i)) -#define PPC_ORIS(d, a, i) EMIT(PPC_INST_ORIS | ___PPC_RA(d) | \ - ___PPC_RS(a) | IMM_L(i)) -#define PPC_XOR(d, a, b) EMIT(PPC_INST_XOR | ___PPC_RA(d) | \ - ___PPC_RS(a) | ___PPC_RB(b)) -#define PPC_XORI(d, a, i) EMIT(PPC_INST_XORI | ___PPC_RA(d) | \ - ___PPC_RS(a) | IMM_L(i)) -#define PPC_XORIS(d, a, i) EMIT(PPC_INST_XORIS | ___PPC_RA(d) | \ - ___PPC_RS(a) | IMM_L(i)) -#define PPC_SLW(d, a, s) EMIT(PPC_INST_SLW | ___PPC_RA(d) | \ - ___PPC_RS(a) | ___PPC_RB(s)) -#define PPC_SRW(d, a, s) EMIT(PPC_INST_SRW | ___PPC_RA(d) | \ - ___PPC_RS(a) | ___PPC_RB(s)) -/* slwi = rlwinm Rx, Ry, n, 0, 31-n */ -#define PPC_SLWI(d, a, i) EMIT(PPC_INST_RLWINM | ___PPC_RA(d) | \ - ___PPC_RS(a) | __PPC_SH(i) | \ - __PPC_MB(0) | __PPC_ME(31-(i))) -/* srwi = rlwinm Rx, Ry, 32-n, n, 31 */ -#define PPC_SRWI(d, a, i) EMIT(PPC_INST_RLWINM | ___PPC_RA(d) | \ - ___PPC_RS(a) | __PPC_SH(32-(i)) | \ - __PPC_MB(i) | __PPC_ME(31)) -/* sldi = rldicr Rx, Ry, n, 63-n */ -#define PPC_SLDI(d, a, i) EMIT(PPC_INST_RLDICR | ___PPC_RA(d) | \ - ___PPC_RS(a) | __PPC_SH(i) | \ - __PPC_MB(63-(i)) | (((i) & 0x20) >> 4)) -#define PPC_NEG(d, a) EMIT(PPC_INST_NEG | ___PPC_RT(d) | ___PPC_RA(a)) - /* Long jump; (unconditional 'branch') */ -#define PPC_JMP(dest) EMIT(PPC_INST_BRANCH | \ - (((dest) - (ctx->idx * 4)) & 0x03fffffc)) +#define PPC_JMP(dest) \ + do { \ + long offset = (long)(dest) - CTX_NIA(ctx); \ + if ((dest) != 0 && !is_offset_in_branch_range(offset)) { \ + pr_err_ratelimited("Branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \ + return -ERANGE; \ + } \ + EMIT(PPC_RAW_BRANCH(offset)); \ + } while (0) + /* "cond" here covers BO:BI fields. */ -#define PPC_BCC_SHORT(cond, dest) EMIT(PPC_INST_BRANCH_COND | \ - (((cond) & 0x3ff) << 16) | \ - (((dest) - (ctx->idx * 4)) & \ - 0xfffc)) -#define PPC_LI32(d, i) do { PPC_LI(d, IMM_L(i)); \ - if ((u32)(uintptr_t)(i) >= 32768) { \ - PPC_ADDIS(d, d, IMM_HA(i)); \ - } } while(0) +#define PPC_BCC_SHORT(cond, dest) \ + do { \ + long offset = (long)(dest) - CTX_NIA(ctx); \ + if ((dest) != 0 && !is_offset_in_cond_branch_range(offset)) { \ + pr_err_ratelimited("Conditional branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \ + return -ERANGE; \ + } \ + EMIT(PPC_INST_BRANCH_COND | (((cond) & 0x3ff) << 16) | (offset & 0xfffc)); \ + } while (0) + +/* + * Sign-extended 32-bit immediate load + * + * If this is a dummy pass (!image), account for + * maximum possible instructions. + */ +#define PPC_LI32(d, i) do { \ + if (!image) \ + ctx->idx += 2; \ + else { \ + if ((int)(uintptr_t)(i) >= -32768 && \ + (int)(uintptr_t)(i) < 32768) \ + EMIT(PPC_RAW_LI(d, i)); \ + else { \ + EMIT(PPC_RAW_LIS(d, IMM_H(i))); \ + if (IMM_L(i)) \ + EMIT(PPC_RAW_ORI(d, d, IMM_L(i))); \ + } \ + } } while (0) + +#ifdef CONFIG_PPC64 +/* If dummy pass (!image), account for maximum possible instructions */ #define PPC_LI64(d, i) do { \ - if (!((uintptr_t)(i) & 0xffffffff00000000ULL)) \ + if (!image) \ + ctx->idx += 5; \ + else { \ + if ((long)(i) >= -2147483648 && \ + (long)(i) < 2147483648) \ PPC_LI32(d, i); \ else { \ - PPC_LIS(d, ((uintptr_t)(i) >> 48)); \ - if ((uintptr_t)(i) & 0x0000ffff00000000ULL) \ - PPC_ORI(d, d, \ - ((uintptr_t)(i) >> 32) & 0xffff); \ - PPC_SLDI(d, d, 32); \ + if (!((uintptr_t)(i) & 0xffff800000000000ULL)) \ + EMIT(PPC_RAW_LI(d, ((uintptr_t)(i) >> 32) & \ + 0xffff)); \ + else { \ + EMIT(PPC_RAW_LIS(d, ((uintptr_t)(i) >> 48))); \ + if ((uintptr_t)(i) & 0x0000ffff00000000ULL) \ + EMIT(PPC_RAW_ORI(d, d, \ + ((uintptr_t)(i) >> 32) & 0xffff)); \ + } \ + EMIT(PPC_RAW_SLDI(d, d, 32)); \ if ((uintptr_t)(i) & 0x00000000ffff0000ULL) \ - PPC_ORIS(d, d, \ - ((uintptr_t)(i) >> 16) & 0xffff); \ + EMIT(PPC_RAW_ORIS(d, d, \ + ((uintptr_t)(i) >> 16) & 0xffff)); \ if ((uintptr_t)(i) & 0x000000000000ffffULL) \ - PPC_ORI(d, d, (uintptr_t)(i) & 0xffff); \ - } } while (0); - -static inline bool is_nearbranch(int offset) -{ - return (offset < 32768) && (offset >= -32768); -} + EMIT(PPC_RAW_ORI(d, d, (uintptr_t)(i) & \ + 0xffff)); \ + } \ + } } while (0) +#define PPC_LI_ADDR PPC_LI64 + +#ifndef CONFIG_PPC_KERNEL_PCREL +#define PPC64_LOAD_PACA() \ + EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc))) +#else +#define PPC64_LOAD_PACA() do {} while (0) +#endif +#else +#define PPC_LI64(d, i) BUILD_BUG() +#define PPC_LI_ADDR PPC_LI32 +#define PPC64_LOAD_PACA() BUILD_BUG() +#endif /* * The fly in the ointment of code size changing from pass to pass is @@ -199,12 +121,12 @@ static inline bool is_nearbranch(int offset) * state. */ #define PPC_BCC(cond, dest) do { \ - if (is_nearbranch((dest) - (ctx->idx * 4))) { \ + if (is_offset_in_cond_branch_range((long)(dest) - CTX_NIA(ctx))) { \ PPC_BCC_SHORT(cond, dest); \ - PPC_NOP(); \ + EMIT(PPC_RAW_NOP()); \ } else { \ /* Flip the 'T or F' bit to invert comparison */ \ - PPC_BCC_SHORT(cond ^ COND_CMP_TRUE, (ctx->idx+2)*4); \ + PPC_BCC_SHORT(cond ^ COND_CMP_TRUE, CTX_NIA(ctx) + 2*4); \ PPC_JMP(dest); \ } } while(0) @@ -221,19 +143,68 @@ static inline bool is_nearbranch(int offset) #define COND_EQ (CR0_EQ | COND_CMP_TRUE) #define COND_NE (CR0_EQ | COND_CMP_FALSE) #define COND_LT (CR0_LT | COND_CMP_TRUE) +#define COND_LE (CR0_GT | COND_CMP_FALSE) -#define SEEN_DATAREF 0x10000 /* might call external helpers */ -#define SEEN_XREG 0x20000 /* X reg is used */ -#define SEEN_MEM 0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary - * storage */ -#define SEEN_MEM_MSK 0x0ffff +#define SEEN_FUNC 0x20000000 /* might call external helpers */ +#define SEEN_TAILCALL 0x40000000 /* uses tail calls */ struct codegen_context { + /* + * This is used to track register usage as well + * as calls to external helpers. + * - register usage is tracked with corresponding + * bits (r3-r31) + * - rest of the bits can be used to track other + * things -- for now, we use bits 0 to 2 + * encoded in SEEN_* macros above + */ unsigned int seen; unsigned int idx; - int pc_ret0; /* bpf index of first RET #0 instruction (if any) */ + unsigned int stack_size; + int b2p[MAX_BPF_JIT_REG + 3]; + unsigned int exentry_idx; + unsigned int alt_exit_addr; + u64 arena_vm_start; + u64 user_vm_start; }; +#define bpf_to_ppc(r) (ctx->b2p[r]) + +#ifdef CONFIG_PPC32 +#define BPF_FIXUP_LEN 3 /* Three instructions => 12 bytes */ +#else +#define BPF_FIXUP_LEN 2 /* Two instructions => 8 bytes */ +#endif + +static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i) +{ + return ctx->seen & (1 << (31 - i)); +} + +static inline void bpf_set_seen_register(struct codegen_context *ctx, int i) +{ + ctx->seen |= 1 << (31 - i); +} + +static inline void bpf_clear_seen_register(struct codegen_context *ctx, int i) +{ + ctx->seen &= ~(1 << (31 - i)); +} + +void bpf_jit_init_reg_mapping(struct codegen_context *ctx); +int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func); +int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx, + u32 *addrs, int pass, bool extra_pass); +void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx); +void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx); +void bpf_jit_build_fentry_stubs(u32 *image, struct codegen_context *ctx); +void bpf_jit_realloc_regs(struct codegen_context *ctx); +int bpf_jit_emit_exit_insn(u32 *image, struct codegen_context *ctx, int tmp_reg, long exit_addr); + +int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, u32 *fimage, int pass, + struct codegen_context *ctx, int insn_idx, + int jmp_off, int dst_reg, u32 code); + #endif #endif diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_64.S deleted file mode 100644 index 7d3a3b5619a2..000000000000 --- a/arch/powerpc/net/bpf_jit_64.S +++ /dev/null @@ -1,222 +0,0 @@ -/* bpf_jit.S: Packet/header access helper functions - * for PPC64 BPF compiler. - * - * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; version 2 - * of the License. - */ - -#include <asm/ppc_asm.h> -#include "bpf_jit.h" - -/* - * All of these routines are called directly from generated code, - * whose register usage is: - * - * r3 skb - * r4,r5 A,X - * r6 *** address parameter to helper *** - * r7-r10 scratch - * r14 skb->data - * r15 skb headlen - * r16-31 M[] - */ - -/* - * To consider: These helpers are so small it could be better to just - * generate them inline. Inline code can do the simple headlen check - * then branch directly to slow_path_XXX if required. (In fact, could - * load a spare GPR with the address of slow_path_generic and pass size - * as an argument, making the call site a mtlr, li and bllr.) - */ - .globl sk_load_word -sk_load_word: - cmpdi r_addr, 0 - blt bpf_slow_path_word_neg - .globl sk_load_word_positive_offset -sk_load_word_positive_offset: - /* Are we accessing past headlen? */ - subi r_scratch1, r_HL, 4 - cmpd r_scratch1, r_addr - blt bpf_slow_path_word - /* Nope, just hitting the header. cr0 here is eq or gt! */ - lwzx r_A, r_D, r_addr - /* When big endian we don't need to byteswap. */ - blr /* Return success, cr0 != LT */ - - .globl sk_load_half -sk_load_half: - cmpdi r_addr, 0 - blt bpf_slow_path_half_neg - .globl sk_load_half_positive_offset -sk_load_half_positive_offset: - subi r_scratch1, r_HL, 2 - cmpd r_scratch1, r_addr - blt bpf_slow_path_half - lhzx r_A, r_D, r_addr - blr - - .globl sk_load_byte -sk_load_byte: - cmpdi r_addr, 0 - blt bpf_slow_path_byte_neg - .globl sk_load_byte_positive_offset -sk_load_byte_positive_offset: - cmpd r_HL, r_addr - ble bpf_slow_path_byte - lbzx r_A, r_D, r_addr - blr - -/* - * BPF_S_LDX_B_MSH: ldxb 4*([offset]&0xf) - * r_addr is the offset value - */ - .globl sk_load_byte_msh -sk_load_byte_msh: - cmpdi r_addr, 0 - blt bpf_slow_path_byte_msh_neg - .globl sk_load_byte_msh_positive_offset -sk_load_byte_msh_positive_offset: - cmpd r_HL, r_addr - ble bpf_slow_path_byte_msh - lbzx r_X, r_D, r_addr - rlwinm r_X, r_X, 2, 32-4-2, 31-2 - blr - -/* Call out to skb_copy_bits: - * We'll need to back up our volatile regs first; we have - * local variable space at r1+(BPF_PPC_STACK_BASIC). - * Allocate a new stack frame here to remain ABI-compliant in - * stashing LR. - */ -#define bpf_slow_path_common(SIZE) \ - mflr r0; \ - std r0, 16(r1); \ - /* R3 goes in parameter space of caller's frame */ \ - std r_skb, (BPF_PPC_STACKFRAME+48)(r1); \ - std r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \ - std r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \ - addi r5, r1, BPF_PPC_STACK_BASIC+(2*8); \ - stdu r1, -BPF_PPC_SLOWPATH_FRAME(r1); \ - /* R3 = r_skb, as passed */ \ - mr r4, r_addr; \ - li r6, SIZE; \ - bl skb_copy_bits; \ - nop; \ - /* R3 = 0 on success */ \ - addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \ - ld r0, 16(r1); \ - ld r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \ - ld r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \ - mtlr r0; \ - cmpdi r3, 0; \ - blt bpf_error; /* cr0 = LT */ \ - ld r_skb, (BPF_PPC_STACKFRAME+48)(r1); \ - /* Great success! */ - -bpf_slow_path_word: - bpf_slow_path_common(4) - /* Data value is on stack, and cr0 != LT */ - lwz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1) - blr - -bpf_slow_path_half: - bpf_slow_path_common(2) - lhz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1) - blr - -bpf_slow_path_byte: - bpf_slow_path_common(1) - lbz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1) - blr - -bpf_slow_path_byte_msh: - bpf_slow_path_common(1) - lbz r_X, BPF_PPC_STACK_BASIC+(2*8)(r1) - rlwinm r_X, r_X, 2, 32-4-2, 31-2 - blr - -/* Call out to bpf_internal_load_pointer_neg_helper: - * We'll need to back up our volatile regs first; we have - * local variable space at r1+(BPF_PPC_STACK_BASIC). - * Allocate a new stack frame here to remain ABI-compliant in - * stashing LR. - */ -#define sk_negative_common(SIZE) \ - mflr r0; \ - std r0, 16(r1); \ - /* R3 goes in parameter space of caller's frame */ \ - std r_skb, (BPF_PPC_STACKFRAME+48)(r1); \ - std r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \ - std r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \ - stdu r1, -BPF_PPC_SLOWPATH_FRAME(r1); \ - /* R3 = r_skb, as passed */ \ - mr r4, r_addr; \ - li r5, SIZE; \ - bl bpf_internal_load_pointer_neg_helper; \ - nop; \ - /* R3 != 0 on success */ \ - addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \ - ld r0, 16(r1); \ - ld r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \ - ld r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \ - mtlr r0; \ - cmpldi r3, 0; \ - beq bpf_error_slow; /* cr0 = EQ */ \ - mr r_addr, r3; \ - ld r_skb, (BPF_PPC_STACKFRAME+48)(r1); \ - /* Great success! */ - -bpf_slow_path_word_neg: - lis r_scratch1,-32 /* SKF_LL_OFF */ - cmpd r_addr, r_scratch1 /* addr < SKF_* */ - blt bpf_error /* cr0 = LT */ - .globl sk_load_word_negative_offset -sk_load_word_negative_offset: - sk_negative_common(4) - lwz r_A, 0(r_addr) - blr - -bpf_slow_path_half_neg: - lis r_scratch1,-32 /* SKF_LL_OFF */ - cmpd r_addr, r_scratch1 /* addr < SKF_* */ - blt bpf_error /* cr0 = LT */ - .globl sk_load_half_negative_offset -sk_load_half_negative_offset: - sk_negative_common(2) - lhz r_A, 0(r_addr) - blr - -bpf_slow_path_byte_neg: - lis r_scratch1,-32 /* SKF_LL_OFF */ - cmpd r_addr, r_scratch1 /* addr < SKF_* */ - blt bpf_error /* cr0 = LT */ - .globl sk_load_byte_negative_offset -sk_load_byte_negative_offset: - sk_negative_common(1) - lbz r_A, 0(r_addr) - blr - -bpf_slow_path_byte_msh_neg: - lis r_scratch1,-32 /* SKF_LL_OFF */ - cmpd r_addr, r_scratch1 /* addr < SKF_* */ - blt bpf_error /* cr0 = LT */ - .globl sk_load_byte_msh_negative_offset -sk_load_byte_msh_negative_offset: - sk_negative_common(1) - lbz r_X, 0(r_addr) - rlwinm r_X, r_X, 2, 32-4-2, 31-2 - blr - -bpf_error_slow: - /* fabricate a cr0 = lt */ - li r_scratch1, -1 - cmpdi r_scratch1, 0 -bpf_error: - /* Entered with cr0 = lt */ - li r3, 0 - /* Generated code will 'blt epilogue', returning 0. */ - blr diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index bf56e33f8257..5e976730b2f5 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -1,694 +1,1231 @@ -/* bpf_jit_comp.c: BPF JIT compiler for PPC64 +// SPDX-License-Identifier: GPL-2.0-only +/* + * eBPF JIT compiler * - * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation + * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> + * IBM Corporation * - * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; version 2 - * of the License. + * Based on the powerpc classic BPF JIT compiler by Matt Evans */ #include <linux/moduleloader.h> #include <asm/cacheflush.h> +#include <asm/asm-compat.h> #include <linux/netdevice.h> #include <linux/filter.h> #include <linux/if_vlan.h> +#include <linux/kernel.h> +#include <linux/memory.h> +#include <linux/bpf.h> + +#include <asm/kprobes.h> +#include <asm/text-patching.h> #include "bpf_jit.h" -#ifndef __BIG_ENDIAN -/* There are endianness assumptions herein. */ -#error "Little-endian PPC not supported in BPF compiler" +/* These offsets are from bpf prog end and stay the same across progs */ +static int bpf_jit_ool_stub, bpf_jit_long_branch_stub; + +static void bpf_jit_fill_ill_insns(void *area, unsigned int size) +{ + memset32(area, BREAKPOINT_INSTRUCTION, size / 4); +} + +void dummy_tramp(void); + +asm ( +" .pushsection .text, \"ax\", @progbits ;" +" .global dummy_tramp ;" +" .type dummy_tramp, @function ;" +"dummy_tramp: ;" +#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE +" blr ;" +#else +/* LR is always in r11, so we don't need a 'mflr r11' here */ +" mtctr 11 ;" +" mtlr 0 ;" +" bctr ;" #endif +" .size dummy_tramp, .-dummy_tramp ;" +" .popsection ;" +); + +void bpf_jit_build_fentry_stubs(u32 *image, struct codegen_context *ctx) +{ + int ool_stub_idx, long_branch_stub_idx; + + /* + * Out-of-line stub: + * mflr r0 + * [b|bl] tramp + * mtlr r0 // only with CONFIG_PPC_FTRACE_OUT_OF_LINE + * b bpf_func + 4 + */ + ool_stub_idx = ctx->idx; + EMIT(PPC_RAW_MFLR(_R0)); + EMIT(PPC_RAW_NOP()); + if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) + EMIT(PPC_RAW_MTLR(_R0)); + WARN_ON_ONCE(!is_offset_in_branch_range(4 - (long)ctx->idx * 4)); + EMIT(PPC_RAW_BRANCH(4 - (long)ctx->idx * 4)); -int bpf_jit_enable __read_mostly; + /* + * Long branch stub: + * .long <dummy_tramp_addr> + * mflr r11 + * bcl 20,31,$+4 + * mflr r12 + * ld r12, -8-SZL(r12) + * mtctr r12 + * mtlr r11 // needed to retain ftrace ABI + * bctr + */ + if (image) + *((unsigned long *)&image[ctx->idx]) = (unsigned long)dummy_tramp; + ctx->idx += SZL / 4; + long_branch_stub_idx = ctx->idx; + EMIT(PPC_RAW_MFLR(_R11)); + EMIT(PPC_RAW_BCL4()); + EMIT(PPC_RAW_MFLR(_R12)); + EMIT(PPC_RAW_LL(_R12, _R12, -8-SZL)); + EMIT(PPC_RAW_MTCTR(_R12)); + EMIT(PPC_RAW_MTLR(_R11)); + EMIT(PPC_RAW_BCTR()); + if (!bpf_jit_ool_stub) { + bpf_jit_ool_stub = (ctx->idx - ool_stub_idx) * 4; + bpf_jit_long_branch_stub = (ctx->idx - long_branch_stub_idx) * 4; + } +} -static inline void bpf_flush_icache(void *start, void *end) +int bpf_jit_emit_exit_insn(u32 *image, struct codegen_context *ctx, int tmp_reg, long exit_addr) { - smp_wmb(); - flush_icache_range((unsigned long)start, (unsigned long)end); + if (!exit_addr || is_offset_in_branch_range(exit_addr - (ctx->idx * 4))) { + PPC_JMP(exit_addr); + } else if (ctx->alt_exit_addr) { + if (WARN_ON(!is_offset_in_branch_range((long)ctx->alt_exit_addr - (ctx->idx * 4)))) + return -1; + PPC_JMP(ctx->alt_exit_addr); + } else { + ctx->alt_exit_addr = ctx->idx * 4; + bpf_jit_build_epilogue(image, ctx); + } + + return 0; } -static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image, - struct codegen_context *ctx) +struct powerpc_jit_data { + /* address of rw header */ + struct bpf_binary_header *hdr; + /* address of ro final header */ + struct bpf_binary_header *fhdr; + u32 *addrs; + u8 *fimage; + u32 proglen; + struct codegen_context ctx; +}; + +bool bpf_jit_needs_zext(void) { - int i; - const struct sock_filter *filter = fp->insns; - - if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) { - /* Make stackframe */ - if (ctx->seen & SEEN_DATAREF) { - /* If we call any helpers (for loads), save LR */ - EMIT(PPC_INST_MFLR | __PPC_RT(R0)); - PPC_STD(0, 1, 16); - - /* Back up non-volatile regs. */ - PPC_STD(r_D, 1, -(8*(32-r_D))); - PPC_STD(r_HL, 1, -(8*(32-r_HL))); - } - if (ctx->seen & SEEN_MEM) { - /* - * Conditionally save regs r15-r31 as some will be used - * for M[] data. - */ - for (i = r_M; i < (r_M+16); i++) { - if (ctx->seen & (1 << (i-r_M))) - PPC_STD(i, 1, -(8*(32-i))); - } + return true; +} + +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) +{ + u32 proglen; + u32 alloclen; + u8 *image = NULL; + u32 *code_base; + u32 *addrs; + struct powerpc_jit_data *jit_data; + struct codegen_context cgctx; + int pass; + int flen; + struct bpf_binary_header *fhdr = NULL; + struct bpf_binary_header *hdr = NULL; + struct bpf_prog *org_fp = fp; + struct bpf_prog *tmp_fp; + bool bpf_blinded = false; + bool extra_pass = false; + u8 *fimage = NULL; + u32 *fcode_base; + u32 extable_len; + u32 fixup_len; + + if (!fp->jit_requested) + return org_fp; + + tmp_fp = bpf_jit_blind_constants(org_fp); + if (IS_ERR(tmp_fp)) + return org_fp; + + if (tmp_fp != org_fp) { + bpf_blinded = true; + fp = tmp_fp; + } + + jit_data = fp->aux->jit_data; + if (!jit_data) { + jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); + if (!jit_data) { + fp = org_fp; + goto out; } - EMIT(PPC_INST_STDU | __PPC_RS(R1) | __PPC_RA(R1) | - (-BPF_PPC_STACKFRAME & 0xfffc)); + fp->aux->jit_data = jit_data; } - if (ctx->seen & SEEN_DATAREF) { + flen = fp->len; + addrs = jit_data->addrs; + if (addrs) { + cgctx = jit_data->ctx; /* - * If this filter needs to access skb data, - * prepare r_D and r_HL: - * r_HL = skb->len - skb->data_len - * r_D = skb->data + * JIT compiled to a writable location (image/code_base) first. + * It is then moved to the readonly final location (fimage/fcode_base) + * using instruction patching. */ - PPC_LWZ_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, - data_len)); - PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len)); - PPC_SUB(r_HL, r_HL, r_scratch1); - PPC_LD_OFFS(r_D, r_skb, offsetof(struct sk_buff, data)); + fimage = jit_data->fimage; + fhdr = jit_data->fhdr; + proglen = jit_data->proglen; + hdr = jit_data->hdr; + image = (void *)hdr + ((void *)fimage - (void *)fhdr); + extra_pass = true; + /* During extra pass, ensure index is reset before repopulating extable entries */ + cgctx.exentry_idx = 0; + goto skip_init_ctx; + } + + addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL); + if (addrs == NULL) { + fp = org_fp; + goto out_addrs; + } + + memset(&cgctx, 0, sizeof(struct codegen_context)); + bpf_jit_init_reg_mapping(&cgctx); + + /* Make sure that the stack is quadword aligned. */ + cgctx.stack_size = round_up(fp->aux->stack_depth, 16); + cgctx.arena_vm_start = bpf_arena_get_kern_vm_start(fp->aux->arena); + cgctx.user_vm_start = bpf_arena_get_user_vm_start(fp->aux->arena); + + /* Scouting faux-generate pass 0 */ + if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) { + /* We hit something illegal or unsupported. */ + fp = org_fp; + goto out_addrs; + } + + /* + * If we have seen a tail call, we need a second pass. + * This is because bpf_jit_emit_common_epilogue() is called + * from bpf_jit_emit_tail_call() with a not yet stable ctx->seen. + * We also need a second pass if we ended up with too large + * a program so as to ensure BPF_EXIT branches are in range. + */ + if (cgctx.seen & SEEN_TAILCALL || !is_offset_in_branch_range((long)cgctx.idx * 4)) { + cgctx.idx = 0; + if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) { + fp = org_fp; + goto out_addrs; + } + } + + bpf_jit_realloc_regs(&cgctx); + /* + * Pretend to build prologue, given the features we've seen. This will + * update ctgtx.idx as it pretends to output instructions, then we can + * calculate total size from idx. + */ + bpf_jit_build_prologue(NULL, &cgctx); + addrs[fp->len] = cgctx.idx * 4; + bpf_jit_build_epilogue(NULL, &cgctx); + + fixup_len = fp->aux->num_exentries * BPF_FIXUP_LEN * 4; + extable_len = fp->aux->num_exentries * sizeof(struct exception_table_entry); + + proglen = cgctx.idx * 4; + alloclen = proglen + FUNCTION_DESCR_SIZE + fixup_len + extable_len; + + fhdr = bpf_jit_binary_pack_alloc(alloclen, &fimage, 4, &hdr, &image, + bpf_jit_fill_ill_insns); + if (!fhdr) { + fp = org_fp; + goto out_addrs; } - if (ctx->seen & SEEN_XREG) { + if (extable_len) + fp->aux->extable = (void *)fimage + FUNCTION_DESCR_SIZE + proglen + fixup_len; + +skip_init_ctx: + code_base = (u32 *)(image + FUNCTION_DESCR_SIZE); + fcode_base = (u32 *)(fimage + FUNCTION_DESCR_SIZE); + + /* Code generation passes 1-2 */ + for (pass = 1; pass < 3; pass++) { + /* Now build the prologue, body code & epilogue for real. */ + cgctx.idx = 0; + cgctx.alt_exit_addr = 0; + bpf_jit_build_prologue(code_base, &cgctx); + if (bpf_jit_build_body(fp, code_base, fcode_base, &cgctx, addrs, pass, + extra_pass)) { + bpf_arch_text_copy(&fhdr->size, &hdr->size, sizeof(hdr->size)); + bpf_jit_binary_pack_free(fhdr, hdr); + fp = org_fp; + goto out_addrs; + } + bpf_jit_build_epilogue(code_base, &cgctx); + + if (bpf_jit_enable > 1) + pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass, + proglen - (cgctx.idx * 4), cgctx.seen); + } + + if (bpf_jit_enable > 1) /* - * TODO: Could also detect whether first instr. sets X and - * avoid this (as below, with A). + * Note that we output the base address of the code_base + * rather than image, since opcodes are in code_base. */ - PPC_LI(r_X, 0); - } - - switch (filter[0].code) { - case BPF_S_RET_K: - case BPF_S_LD_W_LEN: - case BPF_S_ANC_PROTOCOL: - case BPF_S_ANC_IFINDEX: - case BPF_S_ANC_MARK: - case BPF_S_ANC_RXHASH: - case BPF_S_ANC_VLAN_TAG: - case BPF_S_ANC_VLAN_TAG_PRESENT: - case BPF_S_ANC_CPU: - case BPF_S_ANC_QUEUE: - case BPF_S_LD_W_ABS: - case BPF_S_LD_H_ABS: - case BPF_S_LD_B_ABS: - /* first instruction sets A register (or is RET 'constant') */ - break; - default: - /* make sure we dont leak kernel information to user */ - PPC_LI(r_A, 0); + bpf_jit_dump(flen, proglen, pass, code_base); + +#ifdef CONFIG_PPC64_ELF_ABI_V1 + /* Function descriptor nastiness: Address + TOC */ + ((u64 *)image)[0] = (u64)fcode_base; + ((u64 *)image)[1] = local_paca->kernel_toc; +#endif + + fp->bpf_func = (void *)fimage; + fp->jited = 1; + fp->jited_len = cgctx.idx * 4 + FUNCTION_DESCR_SIZE; + + if (!fp->is_func || extra_pass) { + if (bpf_jit_binary_pack_finalize(fhdr, hdr)) { + fp = org_fp; + goto out_addrs; + } + bpf_prog_fill_jited_linfo(fp, addrs); +out_addrs: + kfree(addrs); + kfree(jit_data); + fp->aux->jit_data = NULL; + } else { + jit_data->addrs = addrs; + jit_data->ctx = cgctx; + jit_data->proglen = proglen; + jit_data->fimage = fimage; + jit_data->fhdr = fhdr; + jit_data->hdr = hdr; } + +out: + if (bpf_blinded) + bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp); + + return fp; } -static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) +/* + * The caller should check for (BPF_MODE(code) == BPF_PROBE_MEM) before calling + * this function, as this only applies to BPF_PROBE_MEM, for now. + */ +int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, u32 *fimage, int pass, + struct codegen_context *ctx, int insn_idx, int jmp_off, + int dst_reg, u32 code) { - int i; + off_t offset; + unsigned long pc; + struct exception_table_entry *ex, *ex_entry; + u32 *fixup; - if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) { - PPC_ADDI(1, 1, BPF_PPC_STACKFRAME); - if (ctx->seen & SEEN_DATAREF) { - PPC_LD(0, 1, 16); - PPC_MTLR(0); - PPC_LD(r_D, 1, -(8*(32-r_D))); - PPC_LD(r_HL, 1, -(8*(32-r_HL))); - } - if (ctx->seen & SEEN_MEM) { - /* Restore any saved non-vol registers */ - for (i = r_M; i < (r_M+16); i++) { - if (ctx->seen & (1 << (i-r_M))) - PPC_LD(i, 1, -(8*(32-i))); - } - } - } - /* The RETs have left a return value in R3. */ + /* Populate extable entries only in the last pass */ + if (pass != 2) + return 0; + + if (!fp->aux->extable || + WARN_ON_ONCE(ctx->exentry_idx >= fp->aux->num_exentries)) + return -EINVAL; + + /* + * Program is first written to image before copying to the + * final location (fimage). Accordingly, update in the image first. + * As all offsets used are relative, copying as is to the + * final location should be alright. + */ + pc = (unsigned long)&image[insn_idx]; + ex = (void *)fp->aux->extable - (void *)fimage + (void *)image; + + fixup = (void *)ex - + (fp->aux->num_exentries * BPF_FIXUP_LEN * 4) + + (ctx->exentry_idx * BPF_FIXUP_LEN * 4); + + fixup[0] = PPC_RAW_LI(dst_reg, 0); + if (BPF_CLASS(code) == BPF_ST || BPF_CLASS(code) == BPF_STX) + fixup[0] = PPC_RAW_NOP(); + + if (IS_ENABLED(CONFIG_PPC32)) + fixup[1] = PPC_RAW_LI(dst_reg - 1, 0); /* clear higher 32-bit register too */ + + fixup[BPF_FIXUP_LEN - 1] = + PPC_RAW_BRANCH((long)(pc + jmp_off) - (long)&fixup[BPF_FIXUP_LEN - 1]); - PPC_BLR(); + ex_entry = &ex[ctx->exentry_idx]; + + offset = pc - (long)&ex_entry->insn; + if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN)) + return -ERANGE; + ex_entry->insn = offset; + + offset = (long)fixup - (long)&ex_entry->fixup; + if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN)) + return -ERANGE; + ex_entry->fixup = offset; + + ctx->exentry_idx++; + return 0; } -#define CHOOSE_LOAD_FUNC(K, func) \ - ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) +void *bpf_arch_text_copy(void *dst, void *src, size_t len) +{ + int err; + + if (WARN_ON_ONCE(core_kernel_text((unsigned long)dst))) + return ERR_PTR(-EINVAL); -/* Assemble the body code between the prologue & epilogue. */ -static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, - struct codegen_context *ctx, - unsigned int *addrs) + mutex_lock(&text_mutex); + err = patch_instructions(dst, src, len, false); + mutex_unlock(&text_mutex); + + return err ? ERR_PTR(err) : dst; +} + +int bpf_arch_text_invalidate(void *dst, size_t len) { - const struct sock_filter *filter = fp->insns; - int flen = fp->len; - u8 *func; - unsigned int true_cond; - int i; + u32 insn = BREAKPOINT_INSTRUCTION; + int ret; - /* Start of epilogue code */ - unsigned int exit_addr = addrs[flen]; + if (WARN_ON_ONCE(core_kernel_text((unsigned long)dst))) + return -EINVAL; - for (i = 0; i < flen; i++) { - unsigned int K = filter[i].k; + mutex_lock(&text_mutex); + ret = patch_instructions(dst, &insn, len, true); + mutex_unlock(&text_mutex); + + return ret; +} + +void bpf_jit_free(struct bpf_prog *fp) +{ + if (fp->jited) { + struct powerpc_jit_data *jit_data = fp->aux->jit_data; + struct bpf_binary_header *hdr; /* - * addrs[] maps a BPF bytecode address into a real offset from - * the start of the body code. + * If we fail the final pass of JIT (from jit_subprogs), + * the program may not be finalized yet. Call finalize here + * before freeing it. */ - addrs[i] = ctx->idx * 4; - - switch (filter[i].code) { - /*** ALU ops ***/ - case BPF_S_ALU_ADD_X: /* A += X; */ - ctx->seen |= SEEN_XREG; - PPC_ADD(r_A, r_A, r_X); - break; - case BPF_S_ALU_ADD_K: /* A += K; */ - if (!K) - break; - PPC_ADDI(r_A, r_A, IMM_L(K)); - if (K >= 32768) - PPC_ADDIS(r_A, r_A, IMM_HA(K)); - break; - case BPF_S_ALU_SUB_X: /* A -= X; */ - ctx->seen |= SEEN_XREG; - PPC_SUB(r_A, r_A, r_X); - break; - case BPF_S_ALU_SUB_K: /* A -= K */ - if (!K) - break; - PPC_ADDI(r_A, r_A, IMM_L(-K)); - if (K >= 32768) - PPC_ADDIS(r_A, r_A, IMM_HA(-K)); - break; - case BPF_S_ALU_MUL_X: /* A *= X; */ - ctx->seen |= SEEN_XREG; - PPC_MUL(r_A, r_A, r_X); - break; - case BPF_S_ALU_MUL_K: /* A *= K */ - if (K < 32768) - PPC_MULI(r_A, r_A, K); - else { - PPC_LI32(r_scratch1, K); - PPC_MUL(r_A, r_A, r_scratch1); - } - break; - case BPF_S_ALU_DIV_X: /* A /= X; */ - ctx->seen |= SEEN_XREG; - PPC_CMPWI(r_X, 0); - if (ctx->pc_ret0 != -1) { - PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]); - } else { - /* - * Exit, returning 0; first pass hits here - * (longer worst-case code size). - */ - PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12); - PPC_LI(r_ret, 0); - PPC_JMP(exit_addr); - } - PPC_DIVWU(r_A, r_A, r_X); - break; - case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */ - PPC_LI32(r_scratch1, K); - /* Top 32 bits of 64bit result -> A */ - PPC_MULHWU(r_A, r_A, r_scratch1); - break; - case BPF_S_ALU_AND_X: - ctx->seen |= SEEN_XREG; - PPC_AND(r_A, r_A, r_X); - break; - case BPF_S_ALU_AND_K: - if (!IMM_H(K)) - PPC_ANDI(r_A, r_A, K); - else { - PPC_LI32(r_scratch1, K); - PPC_AND(r_A, r_A, r_scratch1); - } - break; - case BPF_S_ALU_OR_X: - ctx->seen |= SEEN_XREG; - PPC_OR(r_A, r_A, r_X); - break; - case BPF_S_ALU_OR_K: - if (IMM_L(K)) - PPC_ORI(r_A, r_A, IMM_L(K)); - if (K >= 65536) - PPC_ORIS(r_A, r_A, IMM_H(K)); - break; - case BPF_S_ANC_ALU_XOR_X: - case BPF_S_ALU_XOR_X: /* A ^= X */ - ctx->seen |= SEEN_XREG; - PPC_XOR(r_A, r_A, r_X); - break; - case BPF_S_ALU_XOR_K: /* A ^= K */ - if (IMM_L(K)) - PPC_XORI(r_A, r_A, IMM_L(K)); - if (K >= 65536) - PPC_XORIS(r_A, r_A, IMM_H(K)); - break; - case BPF_S_ALU_LSH_X: /* A <<= X; */ - ctx->seen |= SEEN_XREG; - PPC_SLW(r_A, r_A, r_X); - break; - case BPF_S_ALU_LSH_K: - if (K == 0) - break; - else - PPC_SLWI(r_A, r_A, K); - break; - case BPF_S_ALU_RSH_X: /* A >>= X; */ - ctx->seen |= SEEN_XREG; - PPC_SRW(r_A, r_A, r_X); - break; - case BPF_S_ALU_RSH_K: /* A >>= K; */ - if (K == 0) - break; - else - PPC_SRWI(r_A, r_A, K); - break; - case BPF_S_ALU_NEG: - PPC_NEG(r_A, r_A); - break; - case BPF_S_RET_K: - PPC_LI32(r_ret, K); - if (!K) { - if (ctx->pc_ret0 == -1) - ctx->pc_ret0 = i; - } - /* - * If this isn't the very last instruction, branch to - * the epilogue if we've stuff to clean up. Otherwise, - * if there's nothing to tidy, just return. If we /are/ - * the last instruction, we're about to fall through to - * the epilogue to return. - */ - if (i != flen - 1) { - /* - * Note: 'seen' is properly valid only on pass - * #2. Both parts of this conditional are the - * same instruction size though, meaning the - * first pass will still correctly determine the - * code size/addresses. - */ - if (ctx->seen) - PPC_JMP(exit_addr); - else - PPC_BLR(); - } - break; - case BPF_S_RET_A: - PPC_MR(r_ret, r_A); - if (i != flen - 1) { - if (ctx->seen) - PPC_JMP(exit_addr); - else - PPC_BLR(); - } - break; - case BPF_S_MISC_TAX: /* X = A */ - PPC_MR(r_X, r_A); - break; - case BPF_S_MISC_TXA: /* A = X */ - ctx->seen |= SEEN_XREG; - PPC_MR(r_A, r_X); - break; - - /*** Constant loads/M[] access ***/ - case BPF_S_LD_IMM: /* A = K */ - PPC_LI32(r_A, K); - break; - case BPF_S_LDX_IMM: /* X = K */ - PPC_LI32(r_X, K); - break; - case BPF_S_LD_MEM: /* A = mem[K] */ - PPC_MR(r_A, r_M + (K & 0xf)); - ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); - break; - case BPF_S_LDX_MEM: /* X = mem[K] */ - PPC_MR(r_X, r_M + (K & 0xf)); - ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); - break; - case BPF_S_ST: /* mem[K] = A */ - PPC_MR(r_M + (K & 0xf), r_A); - ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); - break; - case BPF_S_STX: /* mem[K] = X */ - PPC_MR(r_M + (K & 0xf), r_X); - ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf)); - break; - case BPF_S_LD_W_LEN: /* A = skb->len; */ - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); - PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len)); - break; - case BPF_S_LDX_W_LEN: /* X = skb->len; */ - PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len)); - break; - - /*** Ancillary info loads ***/ - - /* None of the BPF_S_ANC* codes appear to be passed by - * sk_chk_filter(). The interpreter and the x86 BPF - * compiler implement them so we do too -- they may be - * planted in future. - */ - case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */ - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, - protocol) != 2); - PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, - protocol)); - /* ntohs is a NOP with BE loads. */ - break; - case BPF_S_ANC_IFINDEX: - PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, - dev)); - PPC_CMPDI(r_scratch1, 0); - if (ctx->pc_ret0 != -1) { - PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]); - } else { - /* Exit, returning 0; first pass hits here. */ - PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12); - PPC_LI(r_ret, 0); - PPC_JMP(exit_addr); - } - BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, - ifindex) != 4); - PPC_LWZ_OFFS(r_A, r_scratch1, - offsetof(struct net_device, ifindex)); - break; - case BPF_S_ANC_MARK: - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); - PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, - mark)); - break; - case BPF_S_ANC_RXHASH: - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4); - PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, - rxhash)); - break; - case BPF_S_ANC_VLAN_TAG: - case BPF_S_ANC_VLAN_TAG_PRESENT: - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); - PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, - vlan_tci)); - if (filter[i].code == BPF_S_ANC_VLAN_TAG) - PPC_ANDI(r_A, r_A, VLAN_VID_MASK); - else - PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT); - break; - case BPF_S_ANC_QUEUE: - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, - queue_mapping) != 2); - PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, - queue_mapping)); - break; - case BPF_S_ANC_CPU: -#ifdef CONFIG_SMP - /* - * PACA ptr is r13: - * raw_smp_processor_id() = local_paca->paca_index - */ - BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct, - paca_index) != 2); - PPC_LHZ_OFFS(r_A, 13, - offsetof(struct paca_struct, paca_index)); -#else - PPC_LI(r_A, 0); -#endif - break; - - /*** Absolute loads from packet header/data ***/ - case BPF_S_LD_W_ABS: - func = CHOOSE_LOAD_FUNC(K, sk_load_word); - goto common_load; - case BPF_S_LD_H_ABS: - func = CHOOSE_LOAD_FUNC(K, sk_load_half); - goto common_load; - case BPF_S_LD_B_ABS: - func = CHOOSE_LOAD_FUNC(K, sk_load_byte); - common_load: - /* Load from [K]. */ - ctx->seen |= SEEN_DATAREF; - PPC_LI64(r_scratch1, func); - PPC_MTLR(r_scratch1); - PPC_LI32(r_addr, K); - PPC_BLRL(); - /* - * Helper returns 'lt' condition on error, and an - * appropriate return value in r3 - */ - PPC_BCC(COND_LT, exit_addr); - break; - - /*** Indirect loads from packet header/data ***/ - case BPF_S_LD_W_IND: - func = sk_load_word; - goto common_load_ind; - case BPF_S_LD_H_IND: - func = sk_load_half; - goto common_load_ind; - case BPF_S_LD_B_IND: - func = sk_load_byte; - common_load_ind: - /* - * Load from [X + K]. Negative offsets are tested for - * in the helper functions. - */ - ctx->seen |= SEEN_DATAREF | SEEN_XREG; - PPC_LI64(r_scratch1, func); - PPC_MTLR(r_scratch1); - PPC_ADDI(r_addr, r_X, IMM_L(K)); - if (K >= 32768) - PPC_ADDIS(r_addr, r_addr, IMM_HA(K)); - PPC_BLRL(); - /* If error, cr0.LT set */ - PPC_BCC(COND_LT, exit_addr); - break; - - case BPF_S_LDX_B_MSH: - func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh); - goto common_load; - break; - - /*** Jump and branches ***/ - case BPF_S_JMP_JA: - if (K != 0) - PPC_JMP(addrs[i + 1 + K]); - break; - - case BPF_S_JMP_JGT_K: - case BPF_S_JMP_JGT_X: - true_cond = COND_GT; - goto cond_branch; - case BPF_S_JMP_JGE_K: - case BPF_S_JMP_JGE_X: - true_cond = COND_GE; - goto cond_branch; - case BPF_S_JMP_JEQ_K: - case BPF_S_JMP_JEQ_X: - true_cond = COND_EQ; - goto cond_branch; - case BPF_S_JMP_JSET_K: - case BPF_S_JMP_JSET_X: - true_cond = COND_NE; - /* Fall through */ - cond_branch: - /* same targets, can avoid doing the test :) */ - if (filter[i].jt == filter[i].jf) { - if (filter[i].jt > 0) - PPC_JMP(addrs[i + 1 + filter[i].jt]); - break; - } - - switch (filter[i].code) { - case BPF_S_JMP_JGT_X: - case BPF_S_JMP_JGE_X: - case BPF_S_JMP_JEQ_X: - ctx->seen |= SEEN_XREG; - PPC_CMPLW(r_A, r_X); - break; - case BPF_S_JMP_JSET_X: - ctx->seen |= SEEN_XREG; - PPC_AND_DOT(r_scratch1, r_A, r_X); - break; - case BPF_S_JMP_JEQ_K: - case BPF_S_JMP_JGT_K: - case BPF_S_JMP_JGE_K: - if (K < 32768) - PPC_CMPLWI(r_A, K); - else { - PPC_LI32(r_scratch1, K); - PPC_CMPLW(r_A, r_scratch1); - } - break; - case BPF_S_JMP_JSET_K: - if (K < 32768) - /* PPC_ANDI is /only/ dot-form */ - PPC_ANDI(r_scratch1, r_A, K); - else { - PPC_LI32(r_scratch1, K); - PPC_AND_DOT(r_scratch1, r_A, - r_scratch1); - } - break; - } - /* Sometimes branches are constructed "backward", with - * the false path being the branch and true path being - * a fallthrough to the next instruction. - */ - if (filter[i].jt == 0) - /* Swap the sense of the branch */ - PPC_BCC(true_cond ^ COND_CMP_TRUE, - addrs[i + 1 + filter[i].jf]); - else { - PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]); - if (filter[i].jf != 0) - PPC_JMP(addrs[i + 1 + filter[i].jf]); - } - break; - default: - /* The filter contains something cruel & unusual. - * We don't handle it, but also there shouldn't be - * anything missing from our list. - */ - if (printk_ratelimit()) - pr_err("BPF filter opcode %04x (@%d) unsupported\n", - filter[i].code, i); - return -ENOTSUPP; + if (jit_data) { + bpf_jit_binary_pack_finalize(jit_data->fhdr, jit_data->hdr); + kvfree(jit_data->addrs); + kfree(jit_data); } + hdr = bpf_jit_binary_pack_hdr(fp); + bpf_jit_binary_pack_free(hdr, NULL); + WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp)); + } + + bpf_prog_unlock_free(fp); +} + +bool bpf_jit_supports_kfunc_call(void) +{ + return true; +} +bool bpf_jit_supports_arena(void) +{ + return IS_ENABLED(CONFIG_PPC64); +} + +bool bpf_jit_supports_far_kfunc_call(void) +{ + return IS_ENABLED(CONFIG_PPC64); +} + +bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena) +{ + if (!in_arena) + return true; + switch (insn->code) { + case BPF_STX | BPF_ATOMIC | BPF_H: + case BPF_STX | BPF_ATOMIC | BPF_B: + case BPF_STX | BPF_ATOMIC | BPF_W: + case BPF_STX | BPF_ATOMIC | BPF_DW: + if (bpf_atomic_is_load_store(insn)) + return false; + return IS_ENABLED(CONFIG_PPC64); } - /* Set end-of-body-code address for exit. */ - addrs[i] = ctx->idx * 4; + return true; +} +void *arch_alloc_bpf_trampoline(unsigned int size) +{ + return bpf_prog_pack_alloc(size, bpf_jit_fill_ill_insns); +} + +void arch_free_bpf_trampoline(void *image, unsigned int size) +{ + bpf_prog_pack_free(image, size); +} + +int arch_protect_bpf_trampoline(void *image, unsigned int size) +{ return 0; } -void bpf_jit_compile(struct sk_filter *fp) +static int invoke_bpf_prog(u32 *image, u32 *ro_image, struct codegen_context *ctx, + struct bpf_tramp_link *l, int regs_off, int retval_off, + int run_ctx_off, bool save_ret) { - unsigned int proglen; - unsigned int alloclen; - u32 *image = NULL; - u32 *code_base; - unsigned int *addrs; - struct codegen_context cgctx; - int pass; - int flen = fp->len; + struct bpf_prog *p = l->link.prog; + ppc_inst_t branch_insn; + u32 jmp_idx; + int ret = 0; - if (!bpf_jit_enable) - return; + /* Save cookie */ + if (IS_ENABLED(CONFIG_PPC64)) { + PPC_LI64(_R3, l->cookie); + EMIT(PPC_RAW_STD(_R3, _R1, run_ctx_off + offsetof(struct bpf_tramp_run_ctx, + bpf_cookie))); + } else { + PPC_LI32(_R3, l->cookie >> 32); + PPC_LI32(_R4, l->cookie); + EMIT(PPC_RAW_STW(_R3, _R1, + run_ctx_off + offsetof(struct bpf_tramp_run_ctx, bpf_cookie))); + EMIT(PPC_RAW_STW(_R4, _R1, + run_ctx_off + offsetof(struct bpf_tramp_run_ctx, bpf_cookie) + 4)); + } - addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL); - if (addrs == NULL) - return; + /* __bpf_prog_enter(p, &bpf_tramp_run_ctx) */ + PPC_LI_ADDR(_R3, p); + EMIT(PPC_RAW_MR(_R25, _R3)); + EMIT(PPC_RAW_ADDI(_R4, _R1, run_ctx_off)); + ret = bpf_jit_emit_func_call_rel(image, ro_image, ctx, + (unsigned long)bpf_trampoline_enter(p)); + if (ret) + return ret; + + /* Remember prog start time returned by __bpf_prog_enter */ + EMIT(PPC_RAW_MR(_R26, _R3)); /* - * There are multiple assembly passes as the generated code will change - * size as it settles down, figuring out the max branch offsets/exit - * paths required. - * - * The range of standard conditional branches is +/- 32Kbytes. Since - * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to - * finish with 8 bytes/instruction. Not feasible, so long jumps are - * used, distinct from short branches. + * if (__bpf_prog_enter(p) == 0) + * goto skip_exec_of_prog; * - * Current: - * - * For now, both branch types assemble to 2 words (short branches padded - * with a NOP); this is less efficient, but assembly will always complete - * after exactly 3 passes: - * - * First pass: No code buffer; Program is "faux-generated" -- no code - * emitted but maximum size of output determined (and addrs[] filled - * in). Also, we note whether we use M[], whether we use skb data, etc. - * All generation choices assumed to be 'worst-case', e.g. branches all - * far (2 instructions), return path code reduction not available, etc. - * - * Second pass: Code buffer allocated with size determined previously. - * Prologue generated to support features we have seen used. Exit paths - * determined and addrs[] is filled in again, as code may be slightly - * smaller as a result. + * Emit a nop to be later patched with conditional branch, once offset is known + */ + EMIT(PPC_RAW_CMPLI(_R3, 0)); + jmp_idx = ctx->idx; + EMIT(PPC_RAW_NOP()); + + /* p->bpf_func(ctx) */ + EMIT(PPC_RAW_ADDI(_R3, _R1, regs_off)); + if (!p->jited) + PPC_LI_ADDR(_R4, (unsigned long)p->insnsi); + /* Account for max possible instructions during dummy pass for size calculation */ + if (image && !create_branch(&branch_insn, (u32 *)&ro_image[ctx->idx], + (unsigned long)p->bpf_func, + BRANCH_SET_LINK)) { + image[ctx->idx] = ppc_inst_val(branch_insn); + ctx->idx++; + } else { + EMIT(PPC_RAW_LL(_R12, _R25, offsetof(struct bpf_prog, bpf_func))); + EMIT(PPC_RAW_MTCTR(_R12)); + EMIT(PPC_RAW_BCTRL()); + } + + if (save_ret) + EMIT(PPC_RAW_STL(_R3, _R1, retval_off)); + + /* Fix up branch */ + if (image) { + if (create_cond_branch(&branch_insn, &image[jmp_idx], + (unsigned long)&image[ctx->idx], COND_EQ << 16)) + return -EINVAL; + image[jmp_idx] = ppc_inst_val(branch_insn); + } + + /* __bpf_prog_exit(p, start_time, &bpf_tramp_run_ctx) */ + EMIT(PPC_RAW_MR(_R3, _R25)); + EMIT(PPC_RAW_MR(_R4, _R26)); + EMIT(PPC_RAW_ADDI(_R5, _R1, run_ctx_off)); + ret = bpf_jit_emit_func_call_rel(image, ro_image, ctx, + (unsigned long)bpf_trampoline_exit(p)); + + return ret; +} + +static int invoke_bpf_mod_ret(u32 *image, u32 *ro_image, struct codegen_context *ctx, + struct bpf_tramp_links *tl, int regs_off, int retval_off, + int run_ctx_off, u32 *branches) +{ + int i; + + /* + * The first fmod_ret program will receive a garbage return value. + * Set this to 0 to avoid confusing the program. + */ + EMIT(PPC_RAW_LI(_R3, 0)); + EMIT(PPC_RAW_STL(_R3, _R1, retval_off)); + for (i = 0; i < tl->nr_links; i++) { + if (invoke_bpf_prog(image, ro_image, ctx, tl->links[i], regs_off, retval_off, + run_ctx_off, true)) + return -EINVAL; + + /* + * mod_ret prog stored return value after prog ctx. Emit: + * if (*(u64 *)(ret_val) != 0) + * goto do_fexit; + */ + EMIT(PPC_RAW_LL(_R3, _R1, retval_off)); + EMIT(PPC_RAW_CMPLI(_R3, 0)); + + /* + * Save the location of the branch and generate a nop, which is + * replaced with a conditional jump once do_fexit (i.e. the + * start of the fexit invocation) is finalized. + */ + branches[i] = ctx->idx; + EMIT(PPC_RAW_NOP()); + } + + return 0; +} + +static void bpf_trampoline_setup_tail_call_cnt(u32 *image, struct codegen_context *ctx, + int func_frame_offset, int r4_off) +{ + if (IS_ENABLED(CONFIG_PPC64)) { + /* See bpf_jit_stack_tailcallcnt() */ + int tailcallcnt_offset = 7 * 8; + + EMIT(PPC_RAW_LL(_R3, _R1, func_frame_offset - tailcallcnt_offset)); + EMIT(PPC_RAW_STL(_R3, _R1, -tailcallcnt_offset)); + } else { + /* See bpf_jit_stack_offsetof() and BPF_PPC_TC */ + EMIT(PPC_RAW_LL(_R4, _R1, r4_off)); + } +} + +static void bpf_trampoline_restore_tail_call_cnt(u32 *image, struct codegen_context *ctx, + int func_frame_offset, int r4_off) +{ + if (IS_ENABLED(CONFIG_PPC64)) { + /* See bpf_jit_stack_tailcallcnt() */ + int tailcallcnt_offset = 7 * 8; + + EMIT(PPC_RAW_LL(_R3, _R1, -tailcallcnt_offset)); + EMIT(PPC_RAW_STL(_R3, _R1, func_frame_offset - tailcallcnt_offset)); + } else { + /* See bpf_jit_stack_offsetof() and BPF_PPC_TC */ + EMIT(PPC_RAW_STL(_R4, _R1, r4_off)); + } +} + +static void bpf_trampoline_save_args(u32 *image, struct codegen_context *ctx, int func_frame_offset, + int nr_regs, int regs_off) +{ + int param_save_area_offset; + + param_save_area_offset = func_frame_offset; /* the two frames we alloted */ + param_save_area_offset += STACK_FRAME_MIN_SIZE; /* param save area is past frame header */ + + for (int i = 0; i < nr_regs; i++) { + if (i < 8) { + EMIT(PPC_RAW_STL(_R3 + i, _R1, regs_off + i * SZL)); + } else { + EMIT(PPC_RAW_LL(_R3, _R1, param_save_area_offset + i * SZL)); + EMIT(PPC_RAW_STL(_R3, _R1, regs_off + i * SZL)); + } + } +} + +/* Used when restoring just the register parameters when returning back */ +static void bpf_trampoline_restore_args_regs(u32 *image, struct codegen_context *ctx, + int nr_regs, int regs_off) +{ + for (int i = 0; i < nr_regs && i < 8; i++) + EMIT(PPC_RAW_LL(_R3 + i, _R1, regs_off + i * SZL)); +} + +/* Used when we call into the traced function. Replicate parameter save area */ +static void bpf_trampoline_restore_args_stack(u32 *image, struct codegen_context *ctx, + int func_frame_offset, int nr_regs, int regs_off) +{ + int param_save_area_offset; + + param_save_area_offset = func_frame_offset; /* the two frames we alloted */ + param_save_area_offset += STACK_FRAME_MIN_SIZE; /* param save area is past frame header */ + + for (int i = 8; i < nr_regs; i++) { + EMIT(PPC_RAW_LL(_R3, _R1, param_save_area_offset + i * SZL)); + EMIT(PPC_RAW_STL(_R3, _R1, STACK_FRAME_MIN_SIZE + i * SZL)); + } + bpf_trampoline_restore_args_regs(image, ctx, nr_regs, regs_off); +} + +static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_image, + void *rw_image_end, void *ro_image, + const struct btf_func_model *m, u32 flags, + struct bpf_tramp_links *tlinks, + void *func_addr) +{ + int regs_off, nregs_off, ip_off, run_ctx_off, retval_off, nvr_off, alt_lr_off, r4_off = 0; + int i, ret, nr_regs, bpf_frame_size = 0, bpf_dummy_frame_size = 0, func_frame_offset; + struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; + struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY]; + struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; + struct codegen_context codegen_ctx, *ctx; + u32 *image = (u32 *)rw_image; + ppc_inst_t branch_insn; + u32 *branches = NULL; + bool save_ret; + + if (IS_ENABLED(CONFIG_PPC32)) + return -EOPNOTSUPP; + + nr_regs = m->nr_args; + /* Extra registers for struct arguments */ + for (i = 0; i < m->nr_args; i++) + if (m->arg_size[i] > SZL) + nr_regs += round_up(m->arg_size[i], SZL) / SZL - 1; + + if (nr_regs > MAX_BPF_FUNC_ARGS) + return -EOPNOTSUPP; + + ctx = &codegen_ctx; + memset(ctx, 0, sizeof(*ctx)); + + /* + * Generated stack layout: * - * Third pass: Code generated 'for real', and branch destinations - * determined from now-accurate addrs[] map. + * func prev back chain [ back chain ] + * [ ] + * bpf prog redzone/tailcallcnt [ ... ] 64 bytes (64-bit powerpc) + * [ ] -- + * LR save area [ r0 save (64-bit) ] | header + * [ r0 save (32-bit) ] | + * dummy frame for unwind [ back chain 1 ] -- + * [ padding ] align stack frame + * r4_off [ r4 (tailcallcnt) ] optional - 32-bit powerpc + * alt_lr_off [ real lr (ool stub)] optional - actual lr + * [ r26 ] + * nvr_off [ r25 ] nvr save area + * retval_off [ return value ] + * [ reg argN ] + * [ ... ] + * regs_off [ reg_arg1 ] prog ctx context + * nregs_off [ args count ] + * ip_off [ traced function ] + * [ ... ] + * run_ctx_off [ bpf_tramp_run_ctx ] + * [ reg argN ] + * [ ... ] + * param_save_area [ reg_arg1 ] min 8 doublewords, per ABI + * [ TOC save (64-bit) ] -- + * [ LR save (64-bit) ] | header + * [ LR save (32-bit) ] | + * bpf trampoline frame [ back chain 2 ] -- * - * Ideal: + */ + + /* Minimum stack frame header */ + bpf_frame_size = STACK_FRAME_MIN_SIZE; + + /* + * Room for parameter save area. * - * If we optimise this, near branches will be shorter. On the - * first assembly pass, we should err on the side of caution and - * generate the biggest code. On subsequent passes, branches will be - * generated short or long and code size will reduce. With smaller - * code, more branches may fall into the short category, and code will - * reduce more. + * As per the ABI, this is required if we call into the traced + * function (BPF_TRAMP_F_CALL_ORIG): + * - if the function takes more than 8 arguments for the rest to spill onto the stack + * - or, if the function has variadic arguments + * - or, if this functions's prototype was not available to the caller * - * Finally, if we see one pass generate code the same size as the - * previous pass we have converged and should now generate code for - * real. Allocating at the end will also save the memory that would - * otherwise be wasted by the (small) current code shrinkage. - * Preferably, we should do a small number of passes (e.g. 5) and if we - * haven't converged by then, get impatient and force code to generate - * as-is, even if the odd branch would be left long. The chances of a - * long jump are tiny with all but the most enormous of BPF filter - * inputs, so we should usually converge on the third pass. + * Reserve space for at least 8 registers for now. This can be optimized later. */ + bpf_frame_size += (nr_regs > 8 ? nr_regs : 8) * SZL; - cgctx.idx = 0; - cgctx.seen = 0; - cgctx.pc_ret0 = -1; - /* Scouting faux-generate pass 0 */ - if (bpf_jit_build_body(fp, 0, &cgctx, addrs)) - /* We hit something illegal or unsupported. */ - goto out; + /* Room for struct bpf_tramp_run_ctx */ + run_ctx_off = bpf_frame_size; + bpf_frame_size += round_up(sizeof(struct bpf_tramp_run_ctx), SZL); + + /* Room for IP address argument */ + ip_off = bpf_frame_size; + if (flags & BPF_TRAMP_F_IP_ARG) + bpf_frame_size += SZL; + + /* Room for args count */ + nregs_off = bpf_frame_size; + bpf_frame_size += SZL; + + /* Room for args */ + regs_off = bpf_frame_size; + bpf_frame_size += nr_regs * SZL; + + /* Room for return value of func_addr or fentry prog */ + retval_off = bpf_frame_size; + save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET); + if (save_ret) + bpf_frame_size += SZL; + + /* Room for nvr save area */ + nvr_off = bpf_frame_size; + bpf_frame_size += 2 * SZL; + + /* Optional save area for actual LR in case of ool ftrace */ + if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) { + alt_lr_off = bpf_frame_size; + bpf_frame_size += SZL; + } + + if (IS_ENABLED(CONFIG_PPC32)) { + if (nr_regs < 2) { + r4_off = bpf_frame_size; + bpf_frame_size += SZL; + } else { + r4_off = regs_off + SZL; + } + } + + /* Padding to align stack frame, if any */ + bpf_frame_size = round_up(bpf_frame_size, SZL * 2); + + /* Dummy frame size for proper unwind - includes 64-bytes red zone for 64-bit powerpc */ + bpf_dummy_frame_size = STACK_FRAME_MIN_SIZE + 64; + + /* Offset to the traced function's stack frame */ + func_frame_offset = bpf_dummy_frame_size + bpf_frame_size; + + /* Create dummy frame for unwind, store original return value */ + EMIT(PPC_RAW_STL(_R0, _R1, PPC_LR_STKOFF)); + /* Protect red zone where tail call count goes */ + EMIT(PPC_RAW_STLU(_R1, _R1, -bpf_dummy_frame_size)); + + /* Create our stack frame */ + EMIT(PPC_RAW_STLU(_R1, _R1, -bpf_frame_size)); + + /* 64-bit: Save TOC and load kernel TOC */ + if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2) && !IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) { + EMIT(PPC_RAW_STD(_R2, _R1, 24)); + PPC64_LOAD_PACA(); + } + + /* 32-bit: save tail call count in r4 */ + if (IS_ENABLED(CONFIG_PPC32) && nr_regs < 2) + EMIT(PPC_RAW_STL(_R4, _R1, r4_off)); + + bpf_trampoline_save_args(image, ctx, func_frame_offset, nr_regs, regs_off); + + /* Save our return address */ + EMIT(PPC_RAW_MFLR(_R3)); + if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) + EMIT(PPC_RAW_STL(_R3, _R1, alt_lr_off)); + else + EMIT(PPC_RAW_STL(_R3, _R1, bpf_frame_size + PPC_LR_STKOFF)); /* - * Pretend to build prologue, given the features we've seen. This will - * update ctgtx.idx as it pretends to output instructions, then we can - * calculate total size from idx. + * Save ip address of the traced function. + * We could recover this from LR, but we will need to address for OOL trampoline, + * and optional GEP area. */ - bpf_jit_build_prologue(fp, 0, &cgctx); - bpf_jit_build_epilogue(0, &cgctx); + if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE) || flags & BPF_TRAMP_F_IP_ARG) { + EMIT(PPC_RAW_LWZ(_R4, _R3, 4)); + EMIT(PPC_RAW_SLWI(_R4, _R4, 6)); + EMIT(PPC_RAW_SRAWI(_R4, _R4, 6)); + EMIT(PPC_RAW_ADD(_R3, _R3, _R4)); + EMIT(PPC_RAW_ADDI(_R3, _R3, 4)); + } - proglen = cgctx.idx * 4; - alloclen = proglen + FUNCTION_DESCR_SIZE; - image = module_alloc(alloclen); - if (!image) - goto out; + if (flags & BPF_TRAMP_F_IP_ARG) + EMIT(PPC_RAW_STL(_R3, _R1, ip_off)); - code_base = image + (FUNCTION_DESCR_SIZE/4); + if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) + /* Fake our LR for unwind */ + EMIT(PPC_RAW_STL(_R3, _R1, bpf_frame_size + PPC_LR_STKOFF)); - /* Code generation passes 1-2 */ - for (pass = 1; pass < 3; pass++) { - /* Now build the prologue, body code & epilogue for real. */ - cgctx.idx = 0; - bpf_jit_build_prologue(fp, code_base, &cgctx); - bpf_jit_build_body(fp, code_base, &cgctx, addrs); - bpf_jit_build_epilogue(code_base, &cgctx); + /* Save function arg count -- see bpf_get_func_arg_cnt() */ + EMIT(PPC_RAW_LI(_R3, nr_regs)); + EMIT(PPC_RAW_STL(_R3, _R1, nregs_off)); - if (bpf_jit_enable > 1) - pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass, - proglen - (cgctx.idx * 4), cgctx.seen); + /* Save nv regs */ + EMIT(PPC_RAW_STL(_R25, _R1, nvr_off)); + EMIT(PPC_RAW_STL(_R26, _R1, nvr_off + SZL)); + + if (flags & BPF_TRAMP_F_CALL_ORIG) { + PPC_LI_ADDR(_R3, (unsigned long)im); + ret = bpf_jit_emit_func_call_rel(image, ro_image, ctx, + (unsigned long)__bpf_tramp_enter); + if (ret) + return ret; } - if (bpf_jit_enable > 1) - /* Note that we output the base address of the code_base - * rather than image, since opcodes are in code_base. + for (i = 0; i < fentry->nr_links; i++) + if (invoke_bpf_prog(image, ro_image, ctx, fentry->links[i], regs_off, retval_off, + run_ctx_off, flags & BPF_TRAMP_F_RET_FENTRY_RET)) + return -EINVAL; + + if (fmod_ret->nr_links) { + branches = kcalloc(fmod_ret->nr_links, sizeof(u32), GFP_KERNEL); + if (!branches) + return -ENOMEM; + + if (invoke_bpf_mod_ret(image, ro_image, ctx, fmod_ret, regs_off, retval_off, + run_ctx_off, branches)) { + ret = -EINVAL; + goto cleanup; + } + } + + /* Call the traced function */ + if (flags & BPF_TRAMP_F_CALL_ORIG) { + /* + * The address in LR save area points to the correct point in the original function + * with both PPC_FTRACE_OUT_OF_LINE as well as with traditional ftrace instruction + * sequence */ - bpf_jit_dump(flen, proglen, pass, code_base); + EMIT(PPC_RAW_LL(_R3, _R1, bpf_frame_size + PPC_LR_STKOFF)); + EMIT(PPC_RAW_MTCTR(_R3)); - if (image) { - bpf_flush_icache(code_base, code_base + (proglen/4)); - /* Function descriptor nastiness: Address + TOC */ - ((u64 *)image)[0] = (u64)code_base; - ((u64 *)image)[1] = local_paca->kernel_toc; - fp->bpf_func = (void *)image; + /* Replicate tail_call_cnt before calling the original BPF prog */ + if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) + bpf_trampoline_setup_tail_call_cnt(image, ctx, func_frame_offset, r4_off); + + /* Restore args */ + bpf_trampoline_restore_args_stack(image, ctx, func_frame_offset, nr_regs, regs_off); + + /* Restore TOC for 64-bit */ + if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2) && !IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) + EMIT(PPC_RAW_LD(_R2, _R1, 24)); + EMIT(PPC_RAW_BCTRL()); + if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2) && !IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) + PPC64_LOAD_PACA(); + + /* Store return value for bpf prog to access */ + EMIT(PPC_RAW_STL(_R3, _R1, retval_off)); + + /* Restore updated tail_call_cnt */ + if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) + bpf_trampoline_restore_tail_call_cnt(image, ctx, func_frame_offset, r4_off); + + /* Reserve space to patch branch instruction to skip fexit progs */ + if (ro_image) /* image is NULL for dummy pass */ + im->ip_after_call = &((u32 *)ro_image)[ctx->idx]; + EMIT(PPC_RAW_NOP()); + } + + /* Update branches saved in invoke_bpf_mod_ret with address of do_fexit */ + for (i = 0; i < fmod_ret->nr_links && image; i++) { + if (create_cond_branch(&branch_insn, &image[branches[i]], + (unsigned long)&image[ctx->idx], COND_NE << 16)) { + ret = -EINVAL; + goto cleanup; + } + + image[branches[i]] = ppc_inst_val(branch_insn); + } + + for (i = 0; i < fexit->nr_links; i++) + if (invoke_bpf_prog(image, ro_image, ctx, fexit->links[i], regs_off, retval_off, + run_ctx_off, false)) { + ret = -EINVAL; + goto cleanup; + } + + if (flags & BPF_TRAMP_F_CALL_ORIG) { + if (ro_image) /* image is NULL for dummy pass */ + im->ip_epilogue = &((u32 *)ro_image)[ctx->idx]; + PPC_LI_ADDR(_R3, im); + ret = bpf_jit_emit_func_call_rel(image, ro_image, ctx, + (unsigned long)__bpf_tramp_exit); + if (ret) + goto cleanup; } + + if (flags & BPF_TRAMP_F_RESTORE_REGS) + bpf_trampoline_restore_args_regs(image, ctx, nr_regs, regs_off); + + /* Restore return value of func_addr or fentry prog */ + if (save_ret) + EMIT(PPC_RAW_LL(_R3, _R1, retval_off)); + + /* Restore nv regs */ + EMIT(PPC_RAW_LL(_R26, _R1, nvr_off + SZL)); + EMIT(PPC_RAW_LL(_R25, _R1, nvr_off)); + + /* Epilogue */ + if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2) && !IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) + EMIT(PPC_RAW_LD(_R2, _R1, 24)); + if (flags & BPF_TRAMP_F_SKIP_FRAME) { + /* Skip the traced function and return to parent */ + EMIT(PPC_RAW_ADDI(_R1, _R1, func_frame_offset)); + EMIT(PPC_RAW_LL(_R0, _R1, PPC_LR_STKOFF)); + EMIT(PPC_RAW_MTLR(_R0)); + EMIT(PPC_RAW_BLR()); + } else { + if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) { + EMIT(PPC_RAW_LL(_R0, _R1, alt_lr_off)); + EMIT(PPC_RAW_MTLR(_R0)); + EMIT(PPC_RAW_ADDI(_R1, _R1, func_frame_offset)); + EMIT(PPC_RAW_LL(_R0, _R1, PPC_LR_STKOFF)); + EMIT(PPC_RAW_BLR()); + } else { + EMIT(PPC_RAW_LL(_R0, _R1, bpf_frame_size + PPC_LR_STKOFF)); + EMIT(PPC_RAW_MTCTR(_R0)); + EMIT(PPC_RAW_ADDI(_R1, _R1, func_frame_offset)); + EMIT(PPC_RAW_LL(_R0, _R1, PPC_LR_STKOFF)); + EMIT(PPC_RAW_MTLR(_R0)); + EMIT(PPC_RAW_BCTR()); + } + } + + /* Make sure the trampoline generation logic doesn't overflow */ + if (image && WARN_ON_ONCE(&image[ctx->idx] > (u32 *)rw_image_end - BPF_INSN_SAFETY)) { + ret = -EFAULT; + goto cleanup; + } + ret = ctx->idx * 4 + BPF_INSN_SAFETY * 4; + +cleanup: + kfree(branches); + return ret; +} + +int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, + struct bpf_tramp_links *tlinks, void *func_addr) +{ + struct bpf_tramp_image im; + int ret; + + ret = __arch_prepare_bpf_trampoline(&im, NULL, NULL, NULL, m, flags, tlinks, func_addr); + return ret; +} + +int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, + const struct btf_func_model *m, u32 flags, + struct bpf_tramp_links *tlinks, + void *func_addr) +{ + u32 size = image_end - image; + void *rw_image, *tmp; + int ret; + + /* + * rw_image doesn't need to be in module memory range, so we can + * use kvmalloc. + */ + rw_image = kvmalloc(size, GFP_KERNEL); + if (!rw_image) + return -ENOMEM; + + ret = __arch_prepare_bpf_trampoline(im, rw_image, rw_image + size, image, m, + flags, tlinks, func_addr); + if (ret < 0) + goto out; + + if (bpf_jit_enable > 1) + bpf_jit_dump(1, ret - BPF_INSN_SAFETY * 4, 1, rw_image); + + tmp = bpf_arch_text_copy(image, rw_image, size); + if (IS_ERR(tmp)) + ret = PTR_ERR(tmp); + out: - kfree(addrs); - return; + kvfree(rw_image); + return ret; } -void bpf_jit_free(struct sk_filter *fp) +static int bpf_modify_inst(void *ip, ppc_inst_t old_inst, ppc_inst_t new_inst) { - if (fp->bpf_func != sk_run_filter) - module_free(NULL, fp->bpf_func); + ppc_inst_t org_inst; + + if (copy_inst_from_kernel_nofault(&org_inst, ip)) { + pr_err("0x%lx: fetching instruction failed\n", (unsigned long)ip); + return -EFAULT; + } + + if (!ppc_inst_equal(org_inst, old_inst)) { + pr_err("0x%lx: expected (%08lx) != found (%08lx)\n", + (unsigned long)ip, ppc_inst_as_ulong(old_inst), ppc_inst_as_ulong(org_inst)); + return -EINVAL; + } + + if (ppc_inst_equal(old_inst, new_inst)) + return 0; + + return patch_instruction(ip, new_inst); +} + +static void do_isync(void *info __maybe_unused) +{ + isync(); +} + +/* + * A 3-step process for bpf prog entry: + * 1. At bpf prog entry, a single nop/b: + * bpf_func: + * [nop|b] ool_stub + * 2. Out-of-line stub: + * ool_stub: + * mflr r0 + * [b|bl] <bpf_prog>/<long_branch_stub> + * mtlr r0 // CONFIG_PPC_FTRACE_OUT_OF_LINE only + * b bpf_func + 4 + * 3. Long branch stub: + * long_branch_stub: + * .long <branch_addr>/<dummy_tramp> + * mflr r11 + * bcl 20,31,$+4 + * mflr r12 + * ld r12, -16(r12) + * mtctr r12 + * mtlr r11 // needed to retain ftrace ABI + * bctr + * + * dummy_tramp is used to reduce synchronization requirements. + * + * When attaching a bpf trampoline to a bpf prog, we do not need any + * synchronization here since we always have a valid branch target regardless + * of the order in which the above stores are seen. dummy_tramp ensures that + * the long_branch stub goes to a valid destination on other cpus, even when + * the branch to the long_branch stub is seen before the updated trampoline + * address. + * + * However, when detaching a bpf trampoline from a bpf prog, or if changing + * the bpf trampoline address, we need synchronization to ensure that other + * cpus can no longer branch into the older trampoline so that it can be + * safely freed. bpf_tramp_image_put() uses rcu_tasks to ensure all cpus + * make forward progress, but we still need to ensure that other cpus + * execute isync (or some CSI) so that they don't go back into the + * trampoline again. + */ +int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t, + enum bpf_text_poke_type new_t, void *old_addr, + void *new_addr) +{ + unsigned long bpf_func, bpf_func_end, size, offset; + ppc_inst_t old_inst, new_inst; + int ret = 0, branch_flags; + char name[KSYM_NAME_LEN]; + + if (IS_ENABLED(CONFIG_PPC32)) + return -EOPNOTSUPP; + + bpf_func = (unsigned long)ip; + + /* We currently only support poking bpf programs */ + if (!__bpf_address_lookup(bpf_func, &size, &offset, name)) { + pr_err("%s (0x%lx): kernel/modules are not supported\n", __func__, bpf_func); + return -EOPNOTSUPP; + } + + /* + * If we are not poking at bpf prog entry, then we are simply patching in/out + * an unconditional branch instruction at im->ip_after_call + */ + if (offset) { + if (old_t == BPF_MOD_CALL || new_t == BPF_MOD_CALL) { + pr_err("%s (0x%lx): calls are not supported in bpf prog body\n", __func__, + bpf_func); + return -EOPNOTSUPP; + } + old_inst = ppc_inst(PPC_RAW_NOP()); + if (old_addr) + if (create_branch(&old_inst, ip, (unsigned long)old_addr, 0)) + return -ERANGE; + new_inst = ppc_inst(PPC_RAW_NOP()); + if (new_addr) + if (create_branch(&new_inst, ip, (unsigned long)new_addr, 0)) + return -ERANGE; + mutex_lock(&text_mutex); + ret = bpf_modify_inst(ip, old_inst, new_inst); + mutex_unlock(&text_mutex); + + /* Make sure all cpus see the new instruction */ + smp_call_function(do_isync, NULL, 1); + return ret; + } + + bpf_func_end = bpf_func + size; + + /* Address of the jmp/call instruction in the out-of-line stub */ + ip = (void *)(bpf_func_end - bpf_jit_ool_stub + 4); + + if (!is_offset_in_branch_range((long)ip - 4 - bpf_func)) { + pr_err("%s (0x%lx): bpf prog too large, ool stub out of branch range\n", __func__, + bpf_func); + return -ERANGE; + } + + old_inst = ppc_inst(PPC_RAW_NOP()); + branch_flags = old_t == BPF_MOD_CALL ? BRANCH_SET_LINK : 0; + if (old_addr) { + if (is_offset_in_branch_range(ip - old_addr)) + create_branch(&old_inst, ip, (unsigned long)old_addr, branch_flags); + else + create_branch(&old_inst, ip, bpf_func_end - bpf_jit_long_branch_stub, + branch_flags); + } + new_inst = ppc_inst(PPC_RAW_NOP()); + branch_flags = new_t == BPF_MOD_CALL ? BRANCH_SET_LINK : 0; + if (new_addr) { + if (is_offset_in_branch_range(ip - new_addr)) + create_branch(&new_inst, ip, (unsigned long)new_addr, branch_flags); + else + create_branch(&new_inst, ip, bpf_func_end - bpf_jit_long_branch_stub, + branch_flags); + } + + mutex_lock(&text_mutex); + + /* + * 1. Update the address in the long branch stub: + * If new_addr is out of range, we will have to use the long branch stub, so patch new_addr + * here. Otherwise, revert to dummy_tramp, but only if we had patched old_addr here. + */ + if ((new_addr && !is_offset_in_branch_range(new_addr - ip)) || + (old_addr && !is_offset_in_branch_range(old_addr - ip))) + ret = patch_ulong((void *)(bpf_func_end - bpf_jit_long_branch_stub - SZL), + (new_addr && !is_offset_in_branch_range(new_addr - ip)) ? + (unsigned long)new_addr : (unsigned long)dummy_tramp); + if (ret) + goto out; + + /* 2. Update the branch/call in the out-of-line stub */ + ret = bpf_modify_inst(ip, old_inst, new_inst); + if (ret) + goto out; + + /* 3. Update instruction at bpf prog entry */ + ip = (void *)bpf_func; + if (!old_addr || !new_addr) { + if (!old_addr) { + old_inst = ppc_inst(PPC_RAW_NOP()); + create_branch(&new_inst, ip, bpf_func_end - bpf_jit_ool_stub, 0); + } else { + new_inst = ppc_inst(PPC_RAW_NOP()); + create_branch(&old_inst, ip, bpf_func_end - bpf_jit_ool_stub, 0); + } + ret = bpf_modify_inst(ip, old_inst, new_inst); + } + +out: + mutex_unlock(&text_mutex); + + /* + * Sync only if we are not attaching a trampoline to a bpf prog so the older + * trampoline can be freed safely. + */ + if (old_addr) + smp_call_function(do_isync, NULL, 1); + + return ret; } diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c new file mode 100644 index 000000000000..3087e744fb25 --- /dev/null +++ b/arch/powerpc/net/bpf_jit_comp32.c @@ -0,0 +1,1388 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * eBPF JIT compiler for PPC32 + * + * Copyright 2020 Christophe Leroy <christophe.leroy@csgroup.eu> + * CS GROUP France + * + * Based on PPC64 eBPF JIT compiler by Naveen N. Rao + */ +#include <linux/moduleloader.h> +#include <asm/cacheflush.h> +#include <asm/asm-compat.h> +#include <linux/netdevice.h> +#include <linux/filter.h> +#include <linux/if_vlan.h> +#include <asm/kprobes.h> +#include <linux/bpf.h> + +#include "bpf_jit.h" + +/* + * Stack layout: + * + * [ prev sp ] <------------- + * [ nv gpr save area ] 16 * 4 | + * fp (r31) --> [ ebpf stack space ] upto 512 | + * [ frame header ] 16 | + * sp (r1) ---> [ stack pointer ] -------------- + */ + +/* for gpr non volatile registers r17 to r31 (14) + tail call */ +#define BPF_PPC_STACK_SAVE (15 * 4 + 4) +/* stack frame, ensure this is quadword aligned */ +#define BPF_PPC_STACKFRAME(ctx) (STACK_FRAME_MIN_SIZE + BPF_PPC_STACK_SAVE + (ctx)->stack_size) + +#define PPC_EX32(r, i) EMIT(PPC_RAW_LI((r), (i) < 0 ? -1 : 0)) + +/* PPC NVR range -- update this if we ever use NVRs below r17 */ +#define BPF_PPC_NVR_MIN _R17 +#define BPF_PPC_TC _R16 + +/* BPF register usage */ +#define TMP_REG (MAX_BPF_JIT_REG + 0) + +/* BPF to ppc register mappings */ +void bpf_jit_init_reg_mapping(struct codegen_context *ctx) +{ + /* function return value */ + ctx->b2p[BPF_REG_0] = _R12; + /* function arguments */ + ctx->b2p[BPF_REG_1] = _R4; + ctx->b2p[BPF_REG_2] = _R6; + ctx->b2p[BPF_REG_3] = _R8; + ctx->b2p[BPF_REG_4] = _R10; + ctx->b2p[BPF_REG_5] = _R22; + /* non volatile registers */ + ctx->b2p[BPF_REG_6] = _R24; + ctx->b2p[BPF_REG_7] = _R26; + ctx->b2p[BPF_REG_8] = _R28; + ctx->b2p[BPF_REG_9] = _R30; + /* frame pointer aka BPF_REG_10 */ + ctx->b2p[BPF_REG_FP] = _R18; + /* eBPF jit internal registers */ + ctx->b2p[BPF_REG_AX] = _R20; + ctx->b2p[TMP_REG] = _R31; /* 32 bits */ +} + +static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg) +{ + if ((reg >= BPF_PPC_NVR_MIN && reg < 32) || reg == BPF_PPC_TC) + return BPF_PPC_STACKFRAME(ctx) - 4 * (32 - reg); + + WARN(true, "BPF JIT is asking about unknown registers, will crash the stack"); + /* Use the hole we have left for alignment */ + return BPF_PPC_STACKFRAME(ctx) - 4; +} + +#define SEEN_VREG_MASK 0x1ff80000 /* Volatile registers r3-r12 */ +#define SEEN_NVREG_FULL_MASK 0x0003ffff /* Non volatile registers r14-r31 */ +#define SEEN_NVREG_TEMP_MASK 0x00001e01 /* BPF_REG_5, BPF_REG_AX, TMP_REG */ + +static inline bool bpf_has_stack_frame(struct codegen_context *ctx) +{ + /* + * We only need a stack frame if: + * - we call other functions (kernel helpers), or + * - we use non volatile registers, or + * - we use tail call counter + * - the bpf program uses its stack area + * The latter condition is deduced from the usage of BPF_REG_FP + */ + return ctx->seen & (SEEN_FUNC | SEEN_TAILCALL | SEEN_NVREG_FULL_MASK) || + bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)); +} + +void bpf_jit_realloc_regs(struct codegen_context *ctx) +{ + unsigned int nvreg_mask; + + if (ctx->seen & SEEN_FUNC) + nvreg_mask = SEEN_NVREG_TEMP_MASK; + else + nvreg_mask = SEEN_NVREG_FULL_MASK; + + while (ctx->seen & nvreg_mask && + (ctx->seen & SEEN_VREG_MASK) != SEEN_VREG_MASK) { + int old = 32 - fls(ctx->seen & (nvreg_mask & 0xaaaaaaab)); + int new = 32 - fls(~ctx->seen & (SEEN_VREG_MASK & 0xaaaaaaaa)); + int i; + + for (i = BPF_REG_0; i <= TMP_REG; i++) { + if (ctx->b2p[i] != old) + continue; + ctx->b2p[i] = new; + bpf_set_seen_register(ctx, new); + bpf_clear_seen_register(ctx, old); + if (i != TMP_REG) { + bpf_set_seen_register(ctx, new - 1); + bpf_clear_seen_register(ctx, old - 1); + } + break; + } + } +} + +void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) +{ + int i; + + /* Instruction for trampoline attach */ + EMIT(PPC_RAW_NOP()); + + /* Initialize tail_call_cnt, to be skipped if we do tail calls. */ + if (ctx->seen & SEEN_TAILCALL) + EMIT(PPC_RAW_LI(_R4, 0)); + else + EMIT(PPC_RAW_NOP()); + +#define BPF_TAILCALL_PROLOGUE_SIZE 8 + + if (bpf_has_stack_frame(ctx)) + EMIT(PPC_RAW_STWU(_R1, _R1, -BPF_PPC_STACKFRAME(ctx))); + + if (ctx->seen & SEEN_TAILCALL) + EMIT(PPC_RAW_STW(_R4, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC))); + + /* First arg comes in as a 32 bits pointer. */ + EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_1), _R3)); + EMIT(PPC_RAW_LI(bpf_to_ppc(BPF_REG_1) - 1, 0)); + + /* + * We need a stack frame, but we don't necessarily need to + * save/restore LR unless we call other functions + */ + if (ctx->seen & SEEN_FUNC) + EMIT(PPC_RAW_MFLR(_R0)); + + /* + * Back up non-volatile regs -- registers r18-r31 + */ + for (i = BPF_PPC_NVR_MIN; i <= 31; i++) + if (bpf_is_seen_register(ctx, i)) + EMIT(PPC_RAW_STW(i, _R1, bpf_jit_stack_offsetof(ctx, i))); + + /* Setup frame pointer to point to the bpf stack area */ + if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP))) { + EMIT(PPC_RAW_LI(bpf_to_ppc(BPF_REG_FP) - 1, 0)); + EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1, + STACK_FRAME_MIN_SIZE + ctx->stack_size)); + } + + if (ctx->seen & SEEN_FUNC) + EMIT(PPC_RAW_STW(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF)); +} + +static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx) +{ + int i; + + /* Restore NVRs */ + for (i = BPF_PPC_NVR_MIN; i <= 31; i++) + if (bpf_is_seen_register(ctx, i)) + EMIT(PPC_RAW_LWZ(i, _R1, bpf_jit_stack_offsetof(ctx, i))); + + if (ctx->seen & SEEN_FUNC) + EMIT(PPC_RAW_LWZ(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF)); + + /* Tear down our stack frame */ + if (bpf_has_stack_frame(ctx)) + EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME(ctx))); + + if (ctx->seen & SEEN_FUNC) + EMIT(PPC_RAW_MTLR(_R0)); + +} + +void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) +{ + EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0))); + + bpf_jit_emit_common_epilogue(image, ctx); + + EMIT(PPC_RAW_BLR()); + + bpf_jit_build_fentry_stubs(image, ctx); +} + +/* Relative offset needs to be calculated based on final image location */ +int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func) +{ + s32 rel = (s32)func - (s32)(fimage + ctx->idx); + + if (image && rel < 0x2000000 && rel >= -0x2000000) { + EMIT(PPC_RAW_BL(rel)); + } else { + /* Load function address into r0 */ + EMIT(PPC_RAW_LIS(_R0, IMM_H(func))); + EMIT(PPC_RAW_ORI(_R0, _R0, IMM_L(func))); + EMIT(PPC_RAW_MTCTR(_R0)); + EMIT(PPC_RAW_BCTRL()); + } + + return 0; +} + +static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) +{ + /* + * By now, the eBPF program has already setup parameters in r3-r6 + * r3-r4/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program + * r5-r6/BPF_REG_2 - pointer to bpf_array + * r7-r8/BPF_REG_3 - index in bpf_array + */ + int b2p_bpf_array = bpf_to_ppc(BPF_REG_2); + int b2p_index = bpf_to_ppc(BPF_REG_3); + + /* + * if (index >= array->map.max_entries) + * goto out; + */ + EMIT(PPC_RAW_LWZ(_R0, b2p_bpf_array, offsetof(struct bpf_array, map.max_entries))); + EMIT(PPC_RAW_CMPLW(b2p_index, _R0)); + EMIT(PPC_RAW_LWZ(_R0, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC))); + PPC_BCC_SHORT(COND_GE, out); + + /* + * if (tail_call_cnt >= MAX_TAIL_CALL_CNT) + * goto out; + */ + EMIT(PPC_RAW_CMPLWI(_R0, MAX_TAIL_CALL_CNT)); + /* tail_call_cnt++; */ + EMIT(PPC_RAW_ADDIC(_R0, _R0, 1)); + PPC_BCC_SHORT(COND_GE, out); + + /* prog = array->ptrs[index]; */ + EMIT(PPC_RAW_RLWINM(_R3, b2p_index, 2, 0, 29)); + EMIT(PPC_RAW_ADD(_R3, _R3, b2p_bpf_array)); + EMIT(PPC_RAW_LWZ(_R3, _R3, offsetof(struct bpf_array, ptrs))); + + /* + * if (prog == NULL) + * goto out; + */ + EMIT(PPC_RAW_CMPLWI(_R3, 0)); + PPC_BCC_SHORT(COND_EQ, out); + + /* goto *(prog->bpf_func + prologue_size); */ + EMIT(PPC_RAW_LWZ(_R3, _R3, offsetof(struct bpf_prog, bpf_func))); + EMIT(PPC_RAW_ADDIC(_R3, _R3, BPF_TAILCALL_PROLOGUE_SIZE)); + EMIT(PPC_RAW_MTCTR(_R3)); + + EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_1))); + + /* Put tail_call_cnt in r4 */ + EMIT(PPC_RAW_MR(_R4, _R0)); + + /* tear restore NVRs, ... */ + bpf_jit_emit_common_epilogue(image, ctx); + + EMIT(PPC_RAW_BCTR()); + + /* out: */ + return 0; +} + +/* Assemble the body code between the prologue & epilogue */ +int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx, + u32 *addrs, int pass, bool extra_pass) +{ + const struct bpf_insn *insn = fp->insnsi; + int flen = fp->len; + int i, ret; + + /* Start of epilogue code - will only be valid 2nd pass onwards */ + u32 exit_addr = addrs[flen]; + + for (i = 0; i < flen; i++) { + u32 code = insn[i].code; + u32 prevcode = i ? insn[i - 1].code : 0; + u32 dst_reg = bpf_to_ppc(insn[i].dst_reg); + u32 dst_reg_h = dst_reg - 1; + u32 src_reg = bpf_to_ppc(insn[i].src_reg); + u32 src_reg_h = src_reg - 1; + u32 src2_reg = dst_reg; + u32 src2_reg_h = dst_reg_h; + u32 ax_reg = bpf_to_ppc(BPF_REG_AX); + u32 tmp_reg = bpf_to_ppc(TMP_REG); + u32 size = BPF_SIZE(code); + u32 save_reg, ret_reg; + s16 off = insn[i].off; + s32 imm = insn[i].imm; + bool func_addr_fixed; + u64 func_addr; + u32 true_cond; + u32 tmp_idx; + + if (i && (BPF_CLASS(code) == BPF_ALU64 || BPF_CLASS(code) == BPF_ALU) && + (BPF_CLASS(prevcode) == BPF_ALU64 || BPF_CLASS(prevcode) == BPF_ALU) && + BPF_OP(prevcode) == BPF_MOV && BPF_SRC(prevcode) == BPF_X && + insn[i - 1].dst_reg == insn[i].dst_reg && insn[i - 1].imm != 1) { + src2_reg = bpf_to_ppc(insn[i - 1].src_reg); + src2_reg_h = src2_reg - 1; + ctx->idx = addrs[i - 1] / 4; + } + + /* + * addrs[] maps a BPF bytecode address into a real offset from + * the start of the body code. + */ + addrs[i] = ctx->idx * 4; + + /* + * As an optimization, we note down which registers + * are used so that we can only save/restore those in our + * prologue and epilogue. We do this here regardless of whether + * the actual BPF instruction uses src/dst registers or not + * (for instance, BPF_CALL does not use them). The expectation + * is that those instructions will have src_reg/dst_reg set to + * 0. Even otherwise, we just lose some prologue/epilogue + * optimization but everything else should work without + * any issues. + */ + if (dst_reg >= 3 && dst_reg < 32) { + bpf_set_seen_register(ctx, dst_reg); + bpf_set_seen_register(ctx, dst_reg_h); + } + + if (src_reg >= 3 && src_reg < 32) { + bpf_set_seen_register(ctx, src_reg); + bpf_set_seen_register(ctx, src_reg_h); + } + + switch (code) { + /* + * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG + */ + case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */ + EMIT(PPC_RAW_ADD(dst_reg, src2_reg, src_reg)); + break; + case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */ + EMIT(PPC_RAW_ADDC(dst_reg, src2_reg, src_reg)); + EMIT(PPC_RAW_ADDE(dst_reg_h, src2_reg_h, src_reg_h)); + break; + case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */ + EMIT(PPC_RAW_SUB(dst_reg, src2_reg, src_reg)); + break; + case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */ + EMIT(PPC_RAW_SUBFC(dst_reg, src_reg, src2_reg)); + EMIT(PPC_RAW_SUBFE(dst_reg_h, src_reg_h, src2_reg_h)); + break; + case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */ + imm = -imm; + fallthrough; + case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */ + if (!imm) { + EMIT(PPC_RAW_MR(dst_reg, src2_reg)); + } else if (IMM_HA(imm) & 0xffff) { + EMIT(PPC_RAW_ADDIS(dst_reg, src2_reg, IMM_HA(imm))); + src2_reg = dst_reg; + } + if (IMM_L(imm)) + EMIT(PPC_RAW_ADDI(dst_reg, src2_reg, IMM_L(imm))); + break; + case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */ + imm = -imm; + fallthrough; + case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */ + if (!imm) { + EMIT(PPC_RAW_MR(dst_reg, src2_reg)); + EMIT(PPC_RAW_MR(dst_reg_h, src2_reg_h)); + break; + } + if (imm >= -32768 && imm < 32768) { + EMIT(PPC_RAW_ADDIC(dst_reg, src2_reg, imm)); + } else { + PPC_LI32(_R0, imm); + EMIT(PPC_RAW_ADDC(dst_reg, src2_reg, _R0)); + } + if (imm >= 0 || (BPF_OP(code) == BPF_SUB && imm == 0x80000000)) + EMIT(PPC_RAW_ADDZE(dst_reg_h, src2_reg_h)); + else + EMIT(PPC_RAW_ADDME(dst_reg_h, src2_reg_h)); + break; + case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */ + bpf_set_seen_register(ctx, tmp_reg); + EMIT(PPC_RAW_MULW(_R0, src2_reg, src_reg_h)); + EMIT(PPC_RAW_MULW(dst_reg_h, src2_reg_h, src_reg)); + EMIT(PPC_RAW_MULHWU(tmp_reg, src2_reg, src_reg)); + EMIT(PPC_RAW_MULW(dst_reg, src2_reg, src_reg)); + EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, _R0)); + EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, tmp_reg)); + break; + case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */ + EMIT(PPC_RAW_MULW(dst_reg, src2_reg, src_reg)); + break; + case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */ + if (imm == 1) { + EMIT(PPC_RAW_MR(dst_reg, src2_reg)); + } else if (imm == -1) { + EMIT(PPC_RAW_SUBFIC(dst_reg, src2_reg, 0)); + } else if (is_power_of_2((u32)imm)) { + EMIT(PPC_RAW_SLWI(dst_reg, src2_reg, ilog2(imm))); + } else if (imm >= -32768 && imm < 32768) { + EMIT(PPC_RAW_MULI(dst_reg, src2_reg, imm)); + } else { + PPC_LI32(_R0, imm); + EMIT(PPC_RAW_MULW(dst_reg, src2_reg, _R0)); + } + break; + case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */ + if (!imm) { + PPC_LI32(dst_reg, 0); + PPC_LI32(dst_reg_h, 0); + } else if (imm == 1) { + EMIT(PPC_RAW_MR(dst_reg, src2_reg)); + EMIT(PPC_RAW_MR(dst_reg_h, src2_reg_h)); + } else if (imm == -1) { + EMIT(PPC_RAW_SUBFIC(dst_reg, src2_reg, 0)); + EMIT(PPC_RAW_SUBFZE(dst_reg_h, src2_reg_h)); + } else if (imm > 0 && is_power_of_2(imm)) { + imm = ilog2(imm); + EMIT(PPC_RAW_RLWINM(dst_reg_h, src2_reg_h, imm, 0, 31 - imm)); + EMIT(PPC_RAW_RLWIMI(dst_reg_h, dst_reg, imm, 32 - imm, 31)); + EMIT(PPC_RAW_SLWI(dst_reg, src2_reg, imm)); + } else { + bpf_set_seen_register(ctx, tmp_reg); + PPC_LI32(tmp_reg, imm); + EMIT(PPC_RAW_MULW(dst_reg_h, src2_reg_h, tmp_reg)); + if (imm < 0) + EMIT(PPC_RAW_SUB(dst_reg_h, dst_reg_h, src2_reg)); + EMIT(PPC_RAW_MULHWU(_R0, src2_reg, tmp_reg)); + EMIT(PPC_RAW_MULW(dst_reg, src2_reg, tmp_reg)); + EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, _R0)); + } + break; + case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */ + if (off) + EMIT(PPC_RAW_DIVW(dst_reg, src2_reg, src_reg)); + else + EMIT(PPC_RAW_DIVWU(dst_reg, src2_reg, src_reg)); + break; + case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */ + if (off) + EMIT(PPC_RAW_DIVW(_R0, src2_reg, src_reg)); + else + EMIT(PPC_RAW_DIVWU(_R0, src2_reg, src_reg)); + EMIT(PPC_RAW_MULW(_R0, src_reg, _R0)); + EMIT(PPC_RAW_SUB(dst_reg, src2_reg, _R0)); + break; + case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */ + return -EOPNOTSUPP; + case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */ + return -EOPNOTSUPP; + case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */ + if (!imm) + return -EINVAL; + if (imm == 1) { + EMIT(PPC_RAW_MR(dst_reg, src2_reg)); + } else if (is_power_of_2((u32)imm)) { + if (off) + EMIT(PPC_RAW_SRAWI(dst_reg, src2_reg, ilog2(imm))); + else + EMIT(PPC_RAW_SRWI(dst_reg, src2_reg, ilog2(imm))); + } else { + PPC_LI32(_R0, imm); + if (off) + EMIT(PPC_RAW_DIVW(dst_reg, src2_reg, _R0)); + else + EMIT(PPC_RAW_DIVWU(dst_reg, src2_reg, _R0)); + } + break; + case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */ + if (!imm) + return -EINVAL; + + if (!is_power_of_2((u32)imm)) { + bpf_set_seen_register(ctx, tmp_reg); + PPC_LI32(tmp_reg, imm); + if (off) + EMIT(PPC_RAW_DIVW(_R0, src2_reg, tmp_reg)); + else + EMIT(PPC_RAW_DIVWU(_R0, src2_reg, tmp_reg)); + EMIT(PPC_RAW_MULW(_R0, tmp_reg, _R0)); + EMIT(PPC_RAW_SUB(dst_reg, src2_reg, _R0)); + } else if (imm == 1) { + EMIT(PPC_RAW_LI(dst_reg, 0)); + } else if (off) { + EMIT(PPC_RAW_SRAWI(_R0, src2_reg, ilog2(imm))); + EMIT(PPC_RAW_ADDZE(_R0, _R0)); + EMIT(PPC_RAW_SLWI(_R0, _R0, ilog2(imm))); + EMIT(PPC_RAW_SUB(dst_reg, src2_reg, _R0)); + } else { + imm = ilog2((u32)imm); + EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0, 32 - imm, 31)); + } + break; + case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */ + if (!imm) + return -EINVAL; + if (imm < 0) + imm = -imm; + if (!is_power_of_2(imm)) + return -EOPNOTSUPP; + if (imm == 1) { + EMIT(PPC_RAW_LI(dst_reg, 0)); + EMIT(PPC_RAW_LI(dst_reg_h, 0)); + } else if (off) { + EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, 31)); + EMIT(PPC_RAW_XOR(dst_reg, src2_reg, dst_reg_h)); + EMIT(PPC_RAW_SUBFC(dst_reg, dst_reg_h, dst_reg)); + EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 32 - ilog2(imm), 31)); + EMIT(PPC_RAW_XOR(dst_reg, dst_reg, dst_reg_h)); + EMIT(PPC_RAW_SUBFC(dst_reg, dst_reg_h, dst_reg)); + EMIT(PPC_RAW_SUBFE(dst_reg_h, dst_reg_h, dst_reg_h)); + } else { + EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0, 32 - ilog2(imm), 31)); + EMIT(PPC_RAW_LI(dst_reg_h, 0)); + } + break; + case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */ + if (!imm) + return -EINVAL; + if (!is_power_of_2(abs(imm))) + return -EOPNOTSUPP; + + if (imm < 0) { + EMIT(PPC_RAW_SUBFIC(dst_reg, src2_reg, 0)); + EMIT(PPC_RAW_SUBFZE(dst_reg_h, src2_reg_h)); + imm = -imm; + src2_reg = dst_reg; + } + if (imm == 1) { + EMIT(PPC_RAW_MR(dst_reg, src2_reg)); + EMIT(PPC_RAW_MR(dst_reg_h, src2_reg_h)); + } else { + imm = ilog2(imm); + EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 32 - imm, imm, 31)); + EMIT(PPC_RAW_RLWIMI(dst_reg, src2_reg_h, 32 - imm, 0, imm - 1)); + EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, imm)); + } + break; + case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */ + EMIT(PPC_RAW_NEG(dst_reg, src2_reg)); + break; + case BPF_ALU64 | BPF_NEG: /* dst = -dst */ + EMIT(PPC_RAW_SUBFIC(dst_reg, src2_reg, 0)); + EMIT(PPC_RAW_SUBFZE(dst_reg_h, src2_reg_h)); + break; + + /* + * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH + */ + case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */ + EMIT(PPC_RAW_AND(dst_reg, src2_reg, src_reg)); + EMIT(PPC_RAW_AND(dst_reg_h, src2_reg_h, src_reg_h)); + break; + case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */ + EMIT(PPC_RAW_AND(dst_reg, src2_reg, src_reg)); + break; + case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */ + if (imm >= 0) + EMIT(PPC_RAW_LI(dst_reg_h, 0)); + fallthrough; + case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */ + if (!IMM_H(imm)) { + EMIT(PPC_RAW_ANDI(dst_reg, src2_reg, IMM_L(imm))); + } else if (!IMM_L(imm)) { + EMIT(PPC_RAW_ANDIS(dst_reg, src2_reg, IMM_H(imm))); + } else if (imm == (((1 << fls(imm)) - 1) ^ ((1 << (ffs(i) - 1)) - 1))) { + EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0, + 32 - fls(imm), 32 - ffs(imm))); + } else { + PPC_LI32(_R0, imm); + EMIT(PPC_RAW_AND(dst_reg, src2_reg, _R0)); + } + break; + case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */ + EMIT(PPC_RAW_OR(dst_reg, src2_reg, src_reg)); + EMIT(PPC_RAW_OR(dst_reg_h, src2_reg_h, src_reg_h)); + break; + case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */ + EMIT(PPC_RAW_OR(dst_reg, src2_reg, src_reg)); + break; + case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */ + /* Sign-extended */ + if (imm < 0) + EMIT(PPC_RAW_LI(dst_reg_h, -1)); + fallthrough; + case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */ + if (IMM_L(imm)) { + EMIT(PPC_RAW_ORI(dst_reg, src2_reg, IMM_L(imm))); + src2_reg = dst_reg; + } + if (IMM_H(imm)) + EMIT(PPC_RAW_ORIS(dst_reg, src2_reg, IMM_H(imm))); + break; + case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */ + if (dst_reg == src_reg) { + EMIT(PPC_RAW_LI(dst_reg, 0)); + EMIT(PPC_RAW_LI(dst_reg_h, 0)); + } else { + EMIT(PPC_RAW_XOR(dst_reg, src2_reg, src_reg)); + EMIT(PPC_RAW_XOR(dst_reg_h, src2_reg_h, src_reg_h)); + } + break; + case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */ + if (dst_reg == src_reg) + EMIT(PPC_RAW_LI(dst_reg, 0)); + else + EMIT(PPC_RAW_XOR(dst_reg, src2_reg, src_reg)); + break; + case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */ + if (imm < 0) + EMIT(PPC_RAW_NOR(dst_reg_h, src2_reg_h, src2_reg_h)); + fallthrough; + case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */ + if (IMM_L(imm)) { + EMIT(PPC_RAW_XORI(dst_reg, src2_reg, IMM_L(imm))); + src2_reg = dst_reg; + } + if (IMM_H(imm)) + EMIT(PPC_RAW_XORIS(dst_reg, src2_reg, IMM_H(imm))); + break; + case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */ + EMIT(PPC_RAW_SLW(dst_reg, src2_reg, src_reg)); + break; + case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */ + bpf_set_seen_register(ctx, tmp_reg); + EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32)); + EMIT(PPC_RAW_SLW(dst_reg_h, src2_reg_h, src_reg)); + EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32)); + EMIT(PPC_RAW_SRW(_R0, src2_reg, _R0)); + EMIT(PPC_RAW_SLW(tmp_reg, src2_reg, tmp_reg)); + EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, _R0)); + EMIT(PPC_RAW_SLW(dst_reg, src2_reg, src_reg)); + EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, tmp_reg)); + break; + case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<= (u32) imm */ + if (imm) + EMIT(PPC_RAW_SLWI(dst_reg, src2_reg, imm)); + else + EMIT(PPC_RAW_MR(dst_reg, src2_reg)); + break; + case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<= imm */ + if (imm < 0) + return -EINVAL; + if (!imm) { + EMIT(PPC_RAW_MR(dst_reg, src2_reg)); + } else if (imm < 32) { + EMIT(PPC_RAW_RLWINM(dst_reg_h, src2_reg_h, imm, 0, 31 - imm)); + EMIT(PPC_RAW_RLWIMI(dst_reg_h, src2_reg, imm, 32 - imm, 31)); + EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, imm, 0, 31 - imm)); + } else if (imm < 64) { + EMIT(PPC_RAW_RLWINM(dst_reg_h, src2_reg, imm, 0, 31 - imm)); + EMIT(PPC_RAW_LI(dst_reg, 0)); + } else { + EMIT(PPC_RAW_LI(dst_reg_h, 0)); + EMIT(PPC_RAW_LI(dst_reg, 0)); + } + break; + case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */ + EMIT(PPC_RAW_SRW(dst_reg, src2_reg, src_reg)); + break; + case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */ + bpf_set_seen_register(ctx, tmp_reg); + EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32)); + EMIT(PPC_RAW_SRW(dst_reg, src2_reg, src_reg)); + EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32)); + EMIT(PPC_RAW_SLW(_R0, src2_reg_h, _R0)); + EMIT(PPC_RAW_SRW(tmp_reg, dst_reg_h, tmp_reg)); + EMIT(PPC_RAW_OR(dst_reg, dst_reg, _R0)); + EMIT(PPC_RAW_SRW(dst_reg_h, src2_reg_h, src_reg)); + EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp_reg)); + break; + case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */ + if (imm) + EMIT(PPC_RAW_SRWI(dst_reg, src2_reg, imm)); + else + EMIT(PPC_RAW_MR(dst_reg, src2_reg)); + break; + case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */ + if (imm < 0) + return -EINVAL; + if (!imm) { + EMIT(PPC_RAW_MR(dst_reg, src2_reg)); + EMIT(PPC_RAW_MR(dst_reg_h, src2_reg_h)); + } else if (imm < 32) { + EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 32 - imm, imm, 31)); + EMIT(PPC_RAW_RLWIMI(dst_reg, src2_reg_h, 32 - imm, 0, imm - 1)); + EMIT(PPC_RAW_RLWINM(dst_reg_h, src2_reg_h, 32 - imm, imm, 31)); + } else if (imm < 64) { + EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg_h, 64 - imm, imm - 32, 31)); + EMIT(PPC_RAW_LI(dst_reg_h, 0)); + } else { + EMIT(PPC_RAW_LI(dst_reg, 0)); + EMIT(PPC_RAW_LI(dst_reg_h, 0)); + } + break; + case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */ + EMIT(PPC_RAW_SRAW(dst_reg, src2_reg, src_reg)); + break; + case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */ + bpf_set_seen_register(ctx, tmp_reg); + EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32)); + EMIT(PPC_RAW_SRW(dst_reg, src2_reg, src_reg)); + EMIT(PPC_RAW_SLW(_R0, src2_reg_h, _R0)); + EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32)); + EMIT(PPC_RAW_OR(dst_reg, dst_reg, _R0)); + EMIT(PPC_RAW_RLWINM(_R0, tmp_reg, 0, 26, 26)); + EMIT(PPC_RAW_SRAW(tmp_reg, src2_reg_h, tmp_reg)); + EMIT(PPC_RAW_SRAW(dst_reg_h, src2_reg_h, src_reg)); + EMIT(PPC_RAW_SLW(tmp_reg, tmp_reg, _R0)); + EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp_reg)); + break; + case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */ + if (imm) + EMIT(PPC_RAW_SRAWI(dst_reg, src2_reg, imm)); + else + EMIT(PPC_RAW_MR(dst_reg, src2_reg)); + break; + case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */ + if (imm < 0) + return -EINVAL; + if (!imm) { + EMIT(PPC_RAW_MR(dst_reg, src2_reg)); + EMIT(PPC_RAW_MR(dst_reg_h, src2_reg_h)); + } else if (imm < 32) { + EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 32 - imm, imm, 31)); + EMIT(PPC_RAW_RLWIMI(dst_reg, src2_reg_h, 32 - imm, 0, imm - 1)); + EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, imm)); + } else if (imm < 64) { + EMIT(PPC_RAW_SRAWI(dst_reg, src2_reg_h, imm - 32)); + EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, 31)); + } else { + EMIT(PPC_RAW_SRAWI(dst_reg, src2_reg_h, 31)); + EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, 31)); + } + break; + + /* + * MOV + */ + case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */ + if (off == 8) { + EMIT(PPC_RAW_EXTSB(dst_reg, src_reg)); + EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg, 31)); + } else if (off == 16) { + EMIT(PPC_RAW_EXTSH(dst_reg, src_reg)); + EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg, 31)); + } else if (off == 32 && dst_reg == src_reg) { + EMIT(PPC_RAW_SRAWI(dst_reg_h, src_reg, 31)); + } else if (off == 32) { + EMIT(PPC_RAW_MR(dst_reg, src_reg)); + EMIT(PPC_RAW_SRAWI(dst_reg_h, src_reg, 31)); + } else if (dst_reg != src_reg) { + EMIT(PPC_RAW_MR(dst_reg, src_reg)); + EMIT(PPC_RAW_MR(dst_reg_h, src_reg_h)); + } + break; + case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */ + /* special mov32 for zext */ + if (imm == 1) + EMIT(PPC_RAW_LI(dst_reg_h, 0)); + else if (off == 8) + EMIT(PPC_RAW_EXTSB(dst_reg, src_reg)); + else if (off == 16) + EMIT(PPC_RAW_EXTSH(dst_reg, src_reg)); + else if (dst_reg != src_reg) + EMIT(PPC_RAW_MR(dst_reg, src_reg)); + break; + case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */ + PPC_LI32(dst_reg, imm); + PPC_EX32(dst_reg_h, imm); + break; + case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */ + PPC_LI32(dst_reg, imm); + break; + + /* + * BPF_FROM_BE/LE + */ + case BPF_ALU | BPF_END | BPF_FROM_LE: + case BPF_ALU64 | BPF_END | BPF_FROM_LE: + switch (imm) { + case 16: + /* Copy 16 bits to upper part */ + EMIT(PPC_RAW_RLWIMI(dst_reg, src2_reg, 16, 0, 15)); + /* Rotate 8 bits right & mask */ + EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 24, 16, 31)); + break; + case 32: + /* + * Rotate word left by 8 bits: + * 2 bytes are already in their final position + * -- byte 2 and 4 (of bytes 1, 2, 3 and 4) + */ + EMIT(PPC_RAW_RLWINM(_R0, src2_reg, 8, 0, 31)); + /* Rotate 24 bits and insert byte 1 */ + EMIT(PPC_RAW_RLWIMI(_R0, src2_reg, 24, 0, 7)); + /* Rotate 24 bits and insert byte 3 */ + EMIT(PPC_RAW_RLWIMI(_R0, src2_reg, 24, 16, 23)); + EMIT(PPC_RAW_MR(dst_reg, _R0)); + break; + case 64: + bpf_set_seen_register(ctx, tmp_reg); + EMIT(PPC_RAW_RLWINM(tmp_reg, src2_reg, 8, 0, 31)); + EMIT(PPC_RAW_RLWINM(_R0, src2_reg_h, 8, 0, 31)); + /* Rotate 24 bits and insert byte 1 */ + EMIT(PPC_RAW_RLWIMI(tmp_reg, src2_reg, 24, 0, 7)); + EMIT(PPC_RAW_RLWIMI(_R0, src2_reg_h, 24, 0, 7)); + /* Rotate 24 bits and insert byte 3 */ + EMIT(PPC_RAW_RLWIMI(tmp_reg, src2_reg, 24, 16, 23)); + EMIT(PPC_RAW_RLWIMI(_R0, src2_reg_h, 24, 16, 23)); + EMIT(PPC_RAW_MR(dst_reg, _R0)); + EMIT(PPC_RAW_MR(dst_reg_h, tmp_reg)); + break; + } + if (BPF_CLASS(code) == BPF_ALU64 && imm != 64) + EMIT(PPC_RAW_LI(dst_reg_h, 0)); + break; + case BPF_ALU | BPF_END | BPF_FROM_BE: + switch (imm) { + case 16: + /* zero-extend 16 bits into 32 bits */ + EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0, 16, 31)); + break; + case 32: + case 64: + /* nop */ + break; + } + break; + + /* + * BPF_ST NOSPEC (speculation barrier) + */ + case BPF_ST | BPF_NOSPEC: + break; + + /* + * BPF_ST(X) + */ + case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */ + EMIT(PPC_RAW_STB(src_reg, dst_reg, off)); + break; + case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */ + PPC_LI32(_R0, imm); + EMIT(PPC_RAW_STB(_R0, dst_reg, off)); + break; + case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */ + EMIT(PPC_RAW_STH(src_reg, dst_reg, off)); + break; + case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */ + PPC_LI32(_R0, imm); + EMIT(PPC_RAW_STH(_R0, dst_reg, off)); + break; + case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */ + EMIT(PPC_RAW_STW(src_reg, dst_reg, off)); + break; + case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */ + PPC_LI32(_R0, imm); + EMIT(PPC_RAW_STW(_R0, dst_reg, off)); + break; + case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */ + EMIT(PPC_RAW_STW(src_reg_h, dst_reg, off)); + EMIT(PPC_RAW_STW(src_reg, dst_reg, off + 4)); + break; + case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */ + PPC_LI32(_R0, imm); + EMIT(PPC_RAW_STW(_R0, dst_reg, off + 4)); + PPC_EX32(_R0, imm); + EMIT(PPC_RAW_STW(_R0, dst_reg, off)); + break; + + /* + * BPF_STX ATOMIC (atomic ops) + */ + case BPF_STX | BPF_ATOMIC | BPF_W: + save_reg = _R0; + ret_reg = src_reg; + + bpf_set_seen_register(ctx, tmp_reg); + bpf_set_seen_register(ctx, ax_reg); + + /* Get offset into TMP_REG */ + EMIT(PPC_RAW_LI(tmp_reg, off)); + /* + * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync' + * before and after the operation. + * + * This is a requirement in the Linux Kernel Memory Model. + * See __cmpxchg_u32() in asm/cmpxchg.h as an example. + */ + if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP)) + EMIT(PPC_RAW_SYNC()); + tmp_idx = ctx->idx * 4; + /* load value from memory into r0 */ + EMIT(PPC_RAW_LWARX(_R0, tmp_reg, dst_reg, 0)); + + /* Save old value in BPF_REG_AX */ + if (imm & BPF_FETCH) + EMIT(PPC_RAW_MR(ax_reg, _R0)); + + switch (imm) { + case BPF_ADD: + case BPF_ADD | BPF_FETCH: + EMIT(PPC_RAW_ADD(_R0, _R0, src_reg)); + break; + case BPF_AND: + case BPF_AND | BPF_FETCH: + EMIT(PPC_RAW_AND(_R0, _R0, src_reg)); + break; + case BPF_OR: + case BPF_OR | BPF_FETCH: + EMIT(PPC_RAW_OR(_R0, _R0, src_reg)); + break; + case BPF_XOR: + case BPF_XOR | BPF_FETCH: + EMIT(PPC_RAW_XOR(_R0, _R0, src_reg)); + break; + case BPF_CMPXCHG: + /* + * Return old value in BPF_REG_0 for BPF_CMPXCHG & + * in src_reg for other cases. + */ + ret_reg = bpf_to_ppc(BPF_REG_0); + + /* Compare with old value in BPF_REG_0 */ + EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), _R0)); + /* Don't set if different from old value */ + PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4); + fallthrough; + case BPF_XCHG: + save_reg = src_reg; + break; + default: + pr_err_ratelimited("eBPF filter atomic op code %02x (@%d) unsupported\n", + code, i); + return -EOPNOTSUPP; + } + + /* store new value */ + EMIT(PPC_RAW_STWCX(save_reg, tmp_reg, dst_reg)); + /* we're done if this succeeded */ + PPC_BCC_SHORT(COND_NE, tmp_idx); + + /* For the BPF_FETCH variant, get old data into src_reg */ + if (imm & BPF_FETCH) { + /* Emit 'sync' to enforce full ordering */ + if (IS_ENABLED(CONFIG_SMP)) + EMIT(PPC_RAW_SYNC()); + EMIT(PPC_RAW_MR(ret_reg, ax_reg)); + if (!fp->aux->verifier_zext) + EMIT(PPC_RAW_LI(ret_reg - 1, 0)); /* higher 32-bit */ + } + break; + + case BPF_STX | BPF_ATOMIC | BPF_DW: /* *(u64 *)(dst + off) += src */ + return -EOPNOTSUPP; + + /* + * BPF_LDX + */ + case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */ + case BPF_LDX | BPF_MEMSX | BPF_B: + case BPF_LDX | BPF_PROBE_MEM | BPF_B: + case BPF_LDX | BPF_PROBE_MEMSX | BPF_B: + case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */ + case BPF_LDX | BPF_MEMSX | BPF_H: + case BPF_LDX | BPF_PROBE_MEM | BPF_H: + case BPF_LDX | BPF_PROBE_MEMSX | BPF_H: + case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */ + case BPF_LDX | BPF_MEMSX | BPF_W: + case BPF_LDX | BPF_PROBE_MEM | BPF_W: + case BPF_LDX | BPF_PROBE_MEMSX | BPF_W: + case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */ + case BPF_LDX | BPF_PROBE_MEM | BPF_DW: + /* + * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid + * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM + * load only if addr is kernel address (see is_kernel_addr()), otherwise + * set dst_reg=0 and move on. + */ + if (BPF_MODE(code) == BPF_PROBE_MEM || BPF_MODE(code) == BPF_PROBE_MEMSX) { + PPC_LI32(_R0, TASK_SIZE - off); + EMIT(PPC_RAW_CMPLW(src_reg, _R0)); + PPC_BCC_SHORT(COND_GT, (ctx->idx + 4) * 4); + EMIT(PPC_RAW_LI(dst_reg, 0)); + /* + * For BPF_DW case, "li reg_h,0" would be needed when + * !fp->aux->verifier_zext. Emit NOP otherwise. + * + * Note that "li reg_h,0" is emitted for BPF_B/H/W case, + * if necessary. So, jump there instead of emitting an + * additional "li reg_h,0" instruction. + */ + if (size == BPF_DW && !fp->aux->verifier_zext) + EMIT(PPC_RAW_LI(dst_reg_h, 0)); + else + EMIT(PPC_RAW_NOP()); + /* + * Need to jump two instructions instead of one for BPF_DW case + * as there are two load instructions for dst_reg_h & dst_reg + * respectively. + */ + if (size == BPF_DW || + (size == BPF_B && BPF_MODE(code) == BPF_PROBE_MEMSX)) + PPC_JMP((ctx->idx + 3) * 4); + else + PPC_JMP((ctx->idx + 2) * 4); + } + + if (BPF_MODE(code) == BPF_MEMSX || BPF_MODE(code) == BPF_PROBE_MEMSX) { + switch (size) { + case BPF_B: + EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off)); + EMIT(PPC_RAW_EXTSB(dst_reg, dst_reg)); + break; + case BPF_H: + EMIT(PPC_RAW_LHA(dst_reg, src_reg, off)); + break; + case BPF_W: + EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off)); + break; + } + if (!fp->aux->verifier_zext) + EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg, 31)); + + } else { + switch (size) { + case BPF_B: + EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off)); + break; + case BPF_H: + EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off)); + break; + case BPF_W: + EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off)); + break; + case BPF_DW: + EMIT(PPC_RAW_LWZ(dst_reg_h, src_reg, off)); + EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off + 4)); + break; + } + if (size != BPF_DW && !fp->aux->verifier_zext) + EMIT(PPC_RAW_LI(dst_reg_h, 0)); + } + + if (BPF_MODE(code) == BPF_PROBE_MEM) { + int insn_idx = ctx->idx - 1; + int jmp_off = 4; + + /* + * In case of BPF_DW, two lwz instructions are emitted, one + * for higher 32-bit and another for lower 32-bit. So, set + * ex->insn to the first of the two and jump over both + * instructions in fixup. + * + * Similarly, with !verifier_zext, two instructions are + * emitted for BPF_B/H/W case. So, set ex->insn to the + * instruction that could fault and skip over both + * instructions. + */ + if (size == BPF_DW || !fp->aux->verifier_zext) { + insn_idx -= 1; + jmp_off += 4; + } + + ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx, insn_idx, + jmp_off, dst_reg, code); + if (ret) + return ret; + } + break; + + /* + * Doubleword load + * 16 byte instruction that uses two 'struct bpf_insn' + */ + case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */ + PPC_LI32(dst_reg_h, (u32)insn[i + 1].imm); + PPC_LI32(dst_reg, (u32)insn[i].imm); + /* Adjust for two bpf instructions */ + addrs[++i] = ctx->idx * 4; + break; + + /* + * Return/Exit + */ + case BPF_JMP | BPF_EXIT: + /* + * If this isn't the very last instruction, branch to + * the epilogue. If we _are_ the last instruction, + * we'll just fall through to the epilogue. + */ + if (i != flen - 1) { + ret = bpf_jit_emit_exit_insn(image, ctx, _R0, exit_addr); + if (ret) + return ret; + } + /* else fall through to the epilogue */ + break; + + /* + * Call kernel helper or bpf function + */ + case BPF_JMP | BPF_CALL: + ctx->seen |= SEEN_FUNC; + + ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass, + &func_addr, &func_addr_fixed); + if (ret < 0) + return ret; + + if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_5))) { + EMIT(PPC_RAW_STW(bpf_to_ppc(BPF_REG_5) - 1, _R1, 8)); + EMIT(PPC_RAW_STW(bpf_to_ppc(BPF_REG_5), _R1, 12)); + } + + ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr); + if (ret) + return ret; + + EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0) - 1, _R3)); + EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0), _R4)); + break; + + /* + * Jumps and branches + */ + case BPF_JMP | BPF_JA: + PPC_JMP(addrs[i + 1 + off]); + break; + case BPF_JMP32 | BPF_JA: + PPC_JMP(addrs[i + 1 + imm]); + break; + + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSGT | BPF_X: + true_cond = COND_GT; + goto cond_branch; + case BPF_JMP | BPF_JLT | BPF_K: + case BPF_JMP | BPF_JLT | BPF_X: + case BPF_JMP | BPF_JSLT | BPF_K: + case BPF_JMP | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_X: + true_cond = COND_LT; + goto cond_branch; + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JSGE | BPF_K: + case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_X: + true_cond = COND_GE; + goto cond_branch; + case BPF_JMP | BPF_JLE | BPF_K: + case BPF_JMP | BPF_JLE | BPF_X: + case BPF_JMP | BPF_JSLE | BPF_K: + case BPF_JMP | BPF_JSLE | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_X: + true_cond = COND_LE; + goto cond_branch; + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JEQ | BPF_X: + true_cond = COND_EQ; + goto cond_branch; + case BPF_JMP | BPF_JNE | BPF_K: + case BPF_JMP | BPF_JNE | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JNE | BPF_X: + true_cond = COND_NE; + goto cond_branch; + case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP | BPF_JSET | BPF_X: + case BPF_JMP32 | BPF_JSET | BPF_K: + case BPF_JMP32 | BPF_JSET | BPF_X: + true_cond = COND_NE; + /* fallthrough; */ + +cond_branch: + switch (code) { + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JLT | BPF_X: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JLE | BPF_X: + case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP | BPF_JNE | BPF_X: + /* unsigned comparison */ + EMIT(PPC_RAW_CMPLW(dst_reg_h, src_reg_h)); + PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); + EMIT(PPC_RAW_CMPLW(dst_reg, src_reg)); + break; + case BPF_JMP32 | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_X: + /* unsigned comparison */ + EMIT(PPC_RAW_CMPLW(dst_reg, src_reg)); + break; + case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP | BPF_JSLT | BPF_X: + case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP | BPF_JSLE | BPF_X: + /* signed comparison */ + EMIT(PPC_RAW_CMPW(dst_reg_h, src_reg_h)); + PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); + EMIT(PPC_RAW_CMPLW(dst_reg, src_reg)); + break; + case BPF_JMP32 | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_X: + /* signed comparison */ + EMIT(PPC_RAW_CMPW(dst_reg, src_reg)); + break; + case BPF_JMP | BPF_JSET | BPF_X: + EMIT(PPC_RAW_AND_DOT(_R0, dst_reg_h, src_reg_h)); + PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); + EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, src_reg)); + break; + case BPF_JMP32 | BPF_JSET | BPF_X: { + EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, src_reg)); + break; + case BPF_JMP | BPF_JNE | BPF_K: + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JLT | BPF_K: + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JLE | BPF_K: + /* + * Need sign-extended load, so only positive + * values can be used as imm in cmplwi + */ + if (imm >= 0 && imm < 32768) { + EMIT(PPC_RAW_CMPLWI(dst_reg_h, 0)); + PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); + EMIT(PPC_RAW_CMPLWI(dst_reg, imm)); + } else { + /* sign-extending load ... but unsigned comparison */ + PPC_EX32(_R0, imm); + EMIT(PPC_RAW_CMPLW(dst_reg_h, _R0)); + PPC_LI32(_R0, imm); + PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); + EMIT(PPC_RAW_CMPLW(dst_reg, _R0)); + } + break; + case BPF_JMP32 | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_K: + if (imm >= 0 && imm < 65536) { + EMIT(PPC_RAW_CMPLWI(dst_reg, imm)); + } else { + PPC_LI32(_R0, imm); + EMIT(PPC_RAW_CMPLW(dst_reg, _R0)); + } + break; + } + case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP | BPF_JSLT | BPF_K: + case BPF_JMP | BPF_JSGE | BPF_K: + case BPF_JMP | BPF_JSLE | BPF_K: + if (imm >= 0 && imm < 65536) { + EMIT(PPC_RAW_CMPWI(dst_reg_h, imm < 0 ? -1 : 0)); + PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); + EMIT(PPC_RAW_CMPLWI(dst_reg, imm)); + } else { + /* sign-extending load */ + EMIT(PPC_RAW_CMPWI(dst_reg_h, imm < 0 ? -1 : 0)); + PPC_LI32(_R0, imm); + PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); + EMIT(PPC_RAW_CMPLW(dst_reg, _R0)); + } + break; + case BPF_JMP32 | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_K: + /* + * signed comparison, so any 16-bit value + * can be used in cmpwi + */ + if (imm >= -32768 && imm < 32768) { + EMIT(PPC_RAW_CMPWI(dst_reg, imm)); + } else { + /* sign-extending load */ + PPC_LI32(_R0, imm); + EMIT(PPC_RAW_CMPW(dst_reg, _R0)); + } + break; + case BPF_JMP | BPF_JSET | BPF_K: + /* andi does not sign-extend the immediate */ + if (imm >= 0 && imm < 32768) { + /* PPC_ANDI is _only/always_ dot-form */ + EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm)); + } else { + PPC_LI32(_R0, imm); + if (imm < 0) { + EMIT(PPC_RAW_CMPWI(dst_reg_h, 0)); + PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4); + } + EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, _R0)); + } + break; + case BPF_JMP32 | BPF_JSET | BPF_K: + /* andi does not sign-extend the immediate */ + if (imm >= 0 && imm < 32768) { + /* PPC_ANDI is _only/always_ dot-form */ + EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm)); + } else { + PPC_LI32(_R0, imm); + EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, _R0)); + } + break; + } + PPC_BCC(true_cond, addrs[i + 1 + off]); + break; + + /* + * Tail call + */ + case BPF_JMP | BPF_TAIL_CALL: + ctx->seen |= SEEN_TAILCALL; + ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); + if (ret < 0) + return ret; + break; + + default: + /* + * The filter contains something cruel & unusual. + * We don't handle it, but also there shouldn't be + * anything missing from our list. + */ + pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n", code, i); + return -EOPNOTSUPP; + } + if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext && + !insn_is_zext(&insn[i + 1]) && !(BPF_OP(code) == BPF_END && imm == 64)) + EMIT(PPC_RAW_LI(dst_reg_h, 0)); + } + + /* Set end-of-body-code address for exit. */ + addrs[i] = ctx->idx * 4; + + return 0; +} diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c new file mode 100644 index 000000000000..1fe37128c876 --- /dev/null +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -0,0 +1,1630 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * bpf_jit_comp64.c: eBPF JIT compiler + * + * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> + * IBM Corporation + * + * Based on the powerpc classic BPF JIT compiler by Matt Evans + */ +#include <linux/moduleloader.h> +#include <asm/cacheflush.h> +#include <asm/asm-compat.h> +#include <linux/netdevice.h> +#include <linux/filter.h> +#include <linux/if_vlan.h> +#include <asm/kprobes.h> +#include <linux/bpf.h> +#include <asm/security_features.h> + +#include "bpf_jit.h" + +/* + * Stack layout: + * Ensure the top half (upto local_tmp_var) stays consistent + * with our redzone usage. + * + * [ prev sp ] <------------- + * [ nv gpr save area ] 6*8 | + * [ tail_call_cnt ] 8 | + * [ local_tmp_var ] 24 | + * fp (r31) --> [ ebpf stack space ] upto 512 | + * [ frame header ] 32/112 | + * sp (r1) ---> [ stack pointer ] -------------- + */ + +/* for gpr non volatile registers BPG_REG_6 to 10 */ +#define BPF_PPC_STACK_SAVE (6*8) +/* for bpf JIT code internal usage */ +#define BPF_PPC_STACK_LOCALS 32 +/* stack frame excluding BPF stack, ensure this is quadword aligned */ +#define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \ + BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE) + +/* BPF register usage */ +#define TMP_REG_1 (MAX_BPF_JIT_REG + 0) +#define TMP_REG_2 (MAX_BPF_JIT_REG + 1) +#define ARENA_VM_START (MAX_BPF_JIT_REG + 2) + +/* BPF to ppc register mappings */ +void bpf_jit_init_reg_mapping(struct codegen_context *ctx) +{ + /* function return value */ + ctx->b2p[BPF_REG_0] = _R8; + /* function arguments */ + ctx->b2p[BPF_REG_1] = _R3; + ctx->b2p[BPF_REG_2] = _R4; + ctx->b2p[BPF_REG_3] = _R5; + ctx->b2p[BPF_REG_4] = _R6; + ctx->b2p[BPF_REG_5] = _R7; + /* non volatile registers */ + ctx->b2p[BPF_REG_6] = _R27; + ctx->b2p[BPF_REG_7] = _R28; + ctx->b2p[BPF_REG_8] = _R29; + ctx->b2p[BPF_REG_9] = _R30; + /* frame pointer aka BPF_REG_10 */ + ctx->b2p[BPF_REG_FP] = _R31; + /* eBPF jit internal registers */ + ctx->b2p[BPF_REG_AX] = _R12; + ctx->b2p[TMP_REG_1] = _R9; + ctx->b2p[TMP_REG_2] = _R10; + /* non volatile register for kern_vm_start address */ + ctx->b2p[ARENA_VM_START] = _R26; +} + +/* PPC NVR range -- update this if we ever use NVRs below r26 */ +#define BPF_PPC_NVR_MIN _R26 + +static inline bool bpf_has_stack_frame(struct codegen_context *ctx) +{ + /* + * We only need a stack frame if: + * - we call other functions (kernel helpers), or + * - the bpf program uses its stack area + * The latter condition is deduced from the usage of BPF_REG_FP + */ + return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)); +} + +/* + * When not setting up our own stackframe, the redzone (288 bytes) usage is: + * + * [ prev sp ] <------------- + * [ ... ] | + * sp (r1) ---> [ stack pointer ] -------------- + * [ nv gpr save area ] 6*8 + * [ tail_call_cnt ] 8 + * [ local_tmp_var ] 24 + * [ unused red zone ] 224 + */ +static int bpf_jit_stack_local(struct codegen_context *ctx) +{ + if (bpf_has_stack_frame(ctx)) + return STACK_FRAME_MIN_SIZE + ctx->stack_size; + else + return -(BPF_PPC_STACK_SAVE + 32); +} + +static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx) +{ + return bpf_jit_stack_local(ctx) + 24; +} + +static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg) +{ + if (reg >= BPF_PPC_NVR_MIN && reg < 32) + return (bpf_has_stack_frame(ctx) ? + (BPF_PPC_STACKFRAME + ctx->stack_size) : 0) + - (8 * (32 - reg)); + + pr_err("BPF JIT is asking about unknown registers"); + BUG(); +} + +void bpf_jit_realloc_regs(struct codegen_context *ctx) +{ +} + +void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) +{ + int i; + + /* Instruction for trampoline attach */ + EMIT(PPC_RAW_NOP()); + +#ifndef CONFIG_PPC_KERNEL_PCREL + if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2)) + EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc))); +#endif + + /* + * Initialize tail_call_cnt if we do tail calls. + * Otherwise, put in NOPs so that it can be skipped when we are + * invoked through a tail call. + */ + if (ctx->seen & SEEN_TAILCALL) { + EMIT(PPC_RAW_LI(bpf_to_ppc(TMP_REG_1), 0)); + /* this goes in the redzone */ + EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_STACK_SAVE + 8))); + } else { + EMIT(PPC_RAW_NOP()); + EMIT(PPC_RAW_NOP()); + } + + if (bpf_has_stack_frame(ctx)) { + /* + * We need a stack frame, but we don't necessarily need to + * save/restore LR unless we call other functions + */ + if (ctx->seen & SEEN_FUNC) { + EMIT(PPC_RAW_MFLR(_R0)); + EMIT(PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF)); + } + + EMIT(PPC_RAW_STDU(_R1, _R1, -(BPF_PPC_STACKFRAME + ctx->stack_size))); + } + + /* + * Back up non-volatile regs -- BPF registers 6-10 + * If we haven't created our own stack frame, we save these + * in the protected zone below the previous stack frame + */ + for (i = BPF_REG_6; i <= BPF_REG_10; i++) + if (bpf_is_seen_register(ctx, bpf_to_ppc(i))) + EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i)))); + + if (ctx->arena_vm_start) + EMIT(PPC_RAW_STD(bpf_to_ppc(ARENA_VM_START), _R1, + bpf_jit_stack_offsetof(ctx, bpf_to_ppc(ARENA_VM_START)))); + + /* Setup frame pointer to point to the bpf stack area */ + if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP))) + EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1, + STACK_FRAME_MIN_SIZE + ctx->stack_size)); + + if (ctx->arena_vm_start) + PPC_LI64(bpf_to_ppc(ARENA_VM_START), ctx->arena_vm_start); +} + +static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx) +{ + int i; + + /* Restore NVRs */ + for (i = BPF_REG_6; i <= BPF_REG_10; i++) + if (bpf_is_seen_register(ctx, bpf_to_ppc(i))) + EMIT(PPC_RAW_LD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i)))); + + if (ctx->arena_vm_start) + EMIT(PPC_RAW_LD(bpf_to_ppc(ARENA_VM_START), _R1, + bpf_jit_stack_offsetof(ctx, bpf_to_ppc(ARENA_VM_START)))); + + /* Tear down our stack frame */ + if (bpf_has_stack_frame(ctx)) { + EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME + ctx->stack_size)); + if (ctx->seen & SEEN_FUNC) { + EMIT(PPC_RAW_LD(_R0, _R1, PPC_LR_STKOFF)); + EMIT(PPC_RAW_MTLR(_R0)); + } + } +} + +void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) +{ + bpf_jit_emit_common_epilogue(image, ctx); + + /* Move result to r3 */ + EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0))); + + EMIT(PPC_RAW_BLR()); + + bpf_jit_build_fentry_stubs(image, ctx); +} + +int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func) +{ + unsigned long func_addr = func ? ppc_function_entry((void *)func) : 0; + long reladdr; + + /* bpf to bpf call, func is not known in the initial pass. Emit 5 nops as a placeholder */ + if (!func) { + for (int i = 0; i < 5; i++) + EMIT(PPC_RAW_NOP()); + /* elfv1 needs an additional instruction to load addr from descriptor */ + if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1)) + EMIT(PPC_RAW_NOP()); + EMIT(PPC_RAW_MTCTR(_R12)); + EMIT(PPC_RAW_BCTRL()); + return 0; + } + +#ifdef CONFIG_PPC_KERNEL_PCREL + reladdr = func_addr - local_paca->kernelbase; + + /* + * If fimage is NULL (the initial pass to find image size), + * account for the maximum no. of instructions possible. + */ + if (!fimage) { + ctx->idx += 7; + return 0; + } else if (reladdr < (long)SZ_8G && reladdr >= -(long)SZ_8G) { + EMIT(PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernelbase))); + /* Align for subsequent prefix instruction */ + if (!IS_ALIGNED((unsigned long)fimage + CTX_NIA(ctx), 8)) + EMIT(PPC_RAW_NOP()); + /* paddi r12,r12,addr */ + EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(0) | IMM_H18(reladdr)); + EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | ___PPC_RA(_R12) | IMM_L(reladdr)); + } else { + unsigned long pc = (unsigned long)fimage + CTX_NIA(ctx); + bool alignment_needed = !IS_ALIGNED(pc, 8); + + reladdr = func_addr - (alignment_needed ? pc + 4 : pc); + + if (reladdr < (long)SZ_8G && reladdr >= -(long)SZ_8G) { + if (alignment_needed) + EMIT(PPC_RAW_NOP()); + /* pla r12,addr */ + EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(1) | IMM_H18(reladdr)); + EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | IMM_L(reladdr)); + } else { + /* We can clobber r12 */ + PPC_LI64(_R12, func); + } + } + EMIT(PPC_RAW_MTCTR(_R12)); + EMIT(PPC_RAW_BCTRL()); +#else + if (core_kernel_text(func_addr)) { + reladdr = func_addr - kernel_toc_addr(); + if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) { + pr_err("eBPF: address of %ps out of range of kernel_toc.\n", (void *)func); + return -ERANGE; + } + + EMIT(PPC_RAW_ADDIS(_R12, _R2, PPC_HA(reladdr))); + EMIT(PPC_RAW_ADDI(_R12, _R12, PPC_LO(reladdr))); + EMIT(PPC_RAW_MTCTR(_R12)); + EMIT(PPC_RAW_BCTRL()); + } else { + if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1)) { + /* func points to the function descriptor */ + PPC_LI64(bpf_to_ppc(TMP_REG_2), func); + /* Load actual entry point from function descriptor */ + EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_2), 0)); + /* ... and move it to CTR */ + EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1))); + /* + * Load TOC from function descriptor at offset 8. + * We can clobber r2 since we get called through a + * function pointer (so caller will save/restore r2). + */ + if (is_module_text_address(func_addr)) + EMIT(PPC_RAW_LD(_R2, bpf_to_ppc(TMP_REG_2), 8)); + } else { + PPC_LI64(_R12, func); + EMIT(PPC_RAW_MTCTR(_R12)); + } + EMIT(PPC_RAW_BCTRL()); + /* + * Load r2 with kernel TOC as kernel TOC is used if function address falls + * within core kernel text. + */ + if (is_module_text_address(func_addr)) + EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc))); + } +#endif + + return 0; +} + +static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) +{ + /* + * By now, the eBPF program has already setup parameters in r3, r4 and r5 + * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program + * r4/BPF_REG_2 - pointer to bpf_array + * r5/BPF_REG_3 - index in bpf_array + */ + int b2p_bpf_array = bpf_to_ppc(BPF_REG_2); + int b2p_index = bpf_to_ppc(BPF_REG_3); + int bpf_tailcall_prologue_size = 12; + + if (!IS_ENABLED(CONFIG_PPC_KERNEL_PCREL) && IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2)) + bpf_tailcall_prologue_size += 4; /* skip past the toc load */ + + /* + * if (index >= array->map.max_entries) + * goto out; + */ + EMIT(PPC_RAW_LWZ(bpf_to_ppc(TMP_REG_1), b2p_bpf_array, offsetof(struct bpf_array, map.max_entries))); + EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31)); + EMIT(PPC_RAW_CMPLW(b2p_index, bpf_to_ppc(TMP_REG_1))); + PPC_BCC_SHORT(COND_GE, out); + + /* + * if (tail_call_cnt >= MAX_TAIL_CALL_CNT) + * goto out; + */ + EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx))); + EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT)); + PPC_BCC_SHORT(COND_GE, out); + + /* + * tail_call_cnt++; + */ + EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 1)); + EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx))); + + /* prog = array->ptrs[index]; */ + EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_1), b2p_index, 8)); + EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), b2p_bpf_array)); + EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_array, ptrs))); + + /* + * if (prog == NULL) + * goto out; + */ + EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_1), 0)); + PPC_BCC_SHORT(COND_EQ, out); + + /* goto *(prog->bpf_func + prologue_size); */ + EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_prog, bpf_func))); + EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), + FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size)); + EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1))); + + /* tear down stack, restore NVRs, ... */ + bpf_jit_emit_common_epilogue(image, ctx); + + EMIT(PPC_RAW_BCTR()); + + /* out: */ + return 0; +} + +bool bpf_jit_bypass_spec_v1(void) +{ +#if defined(CONFIG_PPC_E500) || defined(CONFIG_PPC_BOOK3S_64) + return !(security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && + security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)); +#else + return true; +#endif +} + +bool bpf_jit_bypass_spec_v4(void) +{ + return !(security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && + security_ftr_enabled(SEC_FTR_STF_BARRIER) && + stf_barrier_type_get() != STF_BARRIER_NONE); +} + +/* + * We spill into the redzone always, even if the bpf program has its own stackframe. + * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local() + */ +void bpf_stf_barrier(void); + +asm ( +" .global bpf_stf_barrier ;" +" bpf_stf_barrier: ;" +" std 21,-80(1) ;" +" std 22,-72(1) ;" +" sync ;" +" ld 21,-80(1) ;" +" ld 22,-72(1) ;" +" ori 31,31,0 ;" +" .rept 14 ;" +" b 1f ;" +" 1: ;" +" .endr ;" +" blr ;" +); + +static int bpf_jit_emit_atomic_ops(u32 *image, struct codegen_context *ctx, + const struct bpf_insn *insn, u32 *jmp_off, + u32 *tmp_idx, u32 *addrp) +{ + u32 tmp1_reg = bpf_to_ppc(TMP_REG_1); + u32 tmp2_reg = bpf_to_ppc(TMP_REG_2); + u32 size = BPF_SIZE(insn->code); + u32 src_reg = bpf_to_ppc(insn->src_reg); + u32 dst_reg = bpf_to_ppc(insn->dst_reg); + s32 imm = insn->imm; + + u32 save_reg = tmp2_reg; + u32 ret_reg = src_reg; + u32 fixup_idx; + + /* Get offset into TMP_REG_1 */ + EMIT(PPC_RAW_LI(tmp1_reg, insn->off)); + /* + * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync' + * before and after the operation. + * + * This is a requirement in the Linux Kernel Memory Model. + * See __cmpxchg_u64() in asm/cmpxchg.h as an example. + */ + if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP)) + EMIT(PPC_RAW_SYNC()); + + *tmp_idx = ctx->idx; + + /* load value from memory into TMP_REG_2 */ + if (size == BPF_DW) + EMIT(PPC_RAW_LDARX(tmp2_reg, tmp1_reg, dst_reg, 0)); + else + EMIT(PPC_RAW_LWARX(tmp2_reg, tmp1_reg, dst_reg, 0)); + /* Save old value in _R0 */ + if (imm & BPF_FETCH) + EMIT(PPC_RAW_MR(_R0, tmp2_reg)); + + switch (imm) { + case BPF_ADD: + case BPF_ADD | BPF_FETCH: + EMIT(PPC_RAW_ADD(tmp2_reg, tmp2_reg, src_reg)); + break; + case BPF_AND: + case BPF_AND | BPF_FETCH: + EMIT(PPC_RAW_AND(tmp2_reg, tmp2_reg, src_reg)); + break; + case BPF_OR: + case BPF_OR | BPF_FETCH: + EMIT(PPC_RAW_OR(tmp2_reg, tmp2_reg, src_reg)); + break; + case BPF_XOR: + case BPF_XOR | BPF_FETCH: + EMIT(PPC_RAW_XOR(tmp2_reg, tmp2_reg, src_reg)); + break; + case BPF_CMPXCHG: + /* + * Return old value in BPF_REG_0 for BPF_CMPXCHG & + * in src_reg for other cases. + */ + ret_reg = bpf_to_ppc(BPF_REG_0); + + /* Compare with old value in BPF_R0 */ + if (size == BPF_DW) + EMIT(PPC_RAW_CMPD(bpf_to_ppc(BPF_REG_0), tmp2_reg)); + else + EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), tmp2_reg)); + /* Don't set if different from old value */ + PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4); + fallthrough; + case BPF_XCHG: + save_reg = src_reg; + break; + default: + return -EOPNOTSUPP; + } + + /* store new value */ + if (size == BPF_DW) + EMIT(PPC_RAW_STDCX(save_reg, tmp1_reg, dst_reg)); + else + EMIT(PPC_RAW_STWCX(save_reg, tmp1_reg, dst_reg)); + /* we're done if this succeeded */ + PPC_BCC_SHORT(COND_NE, *tmp_idx * 4); + fixup_idx = ctx->idx; + + if (imm & BPF_FETCH) { + /* Emit 'sync' to enforce full ordering */ + if (IS_ENABLED(CONFIG_SMP)) + EMIT(PPC_RAW_SYNC()); + EMIT(PPC_RAW_MR(ret_reg, _R0)); + /* + * Skip unnecessary zero-extension for 32-bit cmpxchg. + * For context, see commit 39491867ace5. + */ + if (size != BPF_DW && imm == BPF_CMPXCHG && + insn_is_zext(insn + 1)) + *addrp = ctx->idx * 4; + } + + *jmp_off = (fixup_idx - *tmp_idx) * 4; + + return 0; +} + +static int bpf_jit_emit_probe_mem_store(struct codegen_context *ctx, u32 src_reg, s16 off, + u32 code, u32 *image) +{ + u32 tmp1_reg = bpf_to_ppc(TMP_REG_1); + u32 tmp2_reg = bpf_to_ppc(TMP_REG_2); + + switch (BPF_SIZE(code)) { + case BPF_B: + EMIT(PPC_RAW_STB(src_reg, tmp1_reg, off)); + break; + case BPF_H: + EMIT(PPC_RAW_STH(src_reg, tmp1_reg, off)); + break; + case BPF_W: + EMIT(PPC_RAW_STW(src_reg, tmp1_reg, off)); + break; + case BPF_DW: + if (off % 4) { + EMIT(PPC_RAW_LI(tmp2_reg, off)); + EMIT(PPC_RAW_STDX(src_reg, tmp1_reg, tmp2_reg)); + } else { + EMIT(PPC_RAW_STD(src_reg, tmp1_reg, off)); + } + break; + default: + return -EINVAL; + } + return 0; +} + +static int emit_atomic_ld_st(const struct bpf_insn insn, struct codegen_context *ctx, u32 *image) +{ + u32 code = insn.code; + u32 dst_reg = bpf_to_ppc(insn.dst_reg); + u32 src_reg = bpf_to_ppc(insn.src_reg); + u32 size = BPF_SIZE(code); + u32 tmp1_reg = bpf_to_ppc(TMP_REG_1); + u32 tmp2_reg = bpf_to_ppc(TMP_REG_2); + s16 off = insn.off; + s32 imm = insn.imm; + + switch (imm) { + case BPF_LOAD_ACQ: + switch (size) { + case BPF_B: + EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off)); + break; + case BPF_H: + EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off)); + break; + case BPF_W: + EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off)); + break; + case BPF_DW: + if (off % 4) { + EMIT(PPC_RAW_LI(tmp1_reg, off)); + EMIT(PPC_RAW_LDX(dst_reg, src_reg, tmp1_reg)); + } else { + EMIT(PPC_RAW_LD(dst_reg, src_reg, off)); + } + break; + } + EMIT(PPC_RAW_LWSYNC()); + break; + case BPF_STORE_REL: + EMIT(PPC_RAW_LWSYNC()); + switch (size) { + case BPF_B: + EMIT(PPC_RAW_STB(src_reg, dst_reg, off)); + break; + case BPF_H: + EMIT(PPC_RAW_STH(src_reg, dst_reg, off)); + break; + case BPF_W: + EMIT(PPC_RAW_STW(src_reg, dst_reg, off)); + break; + case BPF_DW: + if (off % 4) { + EMIT(PPC_RAW_LI(tmp2_reg, off)); + EMIT(PPC_RAW_STDX(src_reg, dst_reg, tmp2_reg)); + } else { + EMIT(PPC_RAW_STD(src_reg, dst_reg, off)); + } + break; + } + break; + default: + pr_err_ratelimited("unexpected atomic load/store op code %02x\n", + imm); + return -EINVAL; + } + + return 0; +} + +/* Assemble the body code between the prologue & epilogue */ +int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx, + u32 *addrs, int pass, bool extra_pass) +{ + enum stf_barrier_type stf_barrier = stf_barrier_type_get(); + bool sync_emitted, ori31_emitted; + const struct bpf_insn *insn = fp->insnsi; + int flen = fp->len; + int i, ret; + + /* Start of epilogue code - will only be valid 2nd pass onwards */ + u32 exit_addr = addrs[flen]; + + for (i = 0; i < flen; i++) { + u32 code = insn[i].code; + u32 dst_reg = bpf_to_ppc(insn[i].dst_reg); + u32 src_reg = bpf_to_ppc(insn[i].src_reg); + u32 size = BPF_SIZE(code); + u32 tmp1_reg = bpf_to_ppc(TMP_REG_1); + u32 tmp2_reg = bpf_to_ppc(TMP_REG_2); + s16 off = insn[i].off; + s32 imm = insn[i].imm; + bool func_addr_fixed; + u64 func_addr; + u64 imm64; + u32 true_cond; + u32 tmp_idx; + u32 jmp_off; + + /* + * addrs[] maps a BPF bytecode address into a real offset from + * the start of the body code. + */ + addrs[i] = ctx->idx * 4; + + /* + * As an optimization, we note down which non-volatile registers + * are used so that we can only save/restore those in our + * prologue and epilogue. We do this here regardless of whether + * the actual BPF instruction uses src/dst registers or not + * (for instance, BPF_CALL does not use them). The expectation + * is that those instructions will have src_reg/dst_reg set to + * 0. Even otherwise, we just lose some prologue/epilogue + * optimization but everything else should work without + * any issues. + */ + if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32) + bpf_set_seen_register(ctx, dst_reg); + if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32) + bpf_set_seen_register(ctx, src_reg); + + switch (code) { + /* + * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG + */ + case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */ + case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */ + EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg)); + goto bpf_alu32_trunc; + case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */ + case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */ + EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg)); + goto bpf_alu32_trunc; + case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */ + case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */ + if (!imm) { + goto bpf_alu32_trunc; + } else if (imm >= -32768 && imm < 32768) { + EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm))); + } else { + PPC_LI32(tmp1_reg, imm); + EMIT(PPC_RAW_ADD(dst_reg, dst_reg, tmp1_reg)); + } + goto bpf_alu32_trunc; + case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */ + case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */ + if (!imm) { + goto bpf_alu32_trunc; + } else if (imm > -32768 && imm <= 32768) { + EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm))); + } else { + PPC_LI32(tmp1_reg, imm); + EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg)); + } + goto bpf_alu32_trunc; + case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */ + case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */ + if (BPF_CLASS(code) == BPF_ALU) + EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg)); + else + EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg)); + goto bpf_alu32_trunc; + case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */ + case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */ + if (imm >= -32768 && imm < 32768) + EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm))); + else { + PPC_LI32(tmp1_reg, imm); + if (BPF_CLASS(code) == BPF_ALU) + EMIT(PPC_RAW_MULW(dst_reg, dst_reg, tmp1_reg)); + else + EMIT(PPC_RAW_MULD(dst_reg, dst_reg, tmp1_reg)); + } + goto bpf_alu32_trunc; + case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */ + case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */ + if (BPF_OP(code) == BPF_MOD) { + if (off) + EMIT(PPC_RAW_DIVW(tmp1_reg, dst_reg, src_reg)); + else + EMIT(PPC_RAW_DIVWU(tmp1_reg, dst_reg, src_reg)); + + EMIT(PPC_RAW_MULW(tmp1_reg, src_reg, tmp1_reg)); + EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg)); + } else + if (off) + EMIT(PPC_RAW_DIVW(dst_reg, dst_reg, src_reg)); + else + EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg)); + goto bpf_alu32_trunc; + case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */ + case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */ + if (BPF_OP(code) == BPF_MOD) { + if (off) + EMIT(PPC_RAW_DIVD(tmp1_reg, dst_reg, src_reg)); + else + EMIT(PPC_RAW_DIVDU(tmp1_reg, dst_reg, src_reg)); + EMIT(PPC_RAW_MULD(tmp1_reg, src_reg, tmp1_reg)); + EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg)); + } else + if (off) + EMIT(PPC_RAW_DIVD(dst_reg, dst_reg, src_reg)); + else + EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg)); + break; + case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */ + case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */ + case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */ + case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */ + if (imm == 0) + return -EINVAL; + if (imm == 1) { + if (BPF_OP(code) == BPF_DIV) { + goto bpf_alu32_trunc; + } else { + EMIT(PPC_RAW_LI(dst_reg, 0)); + break; + } + } + + PPC_LI32(tmp1_reg, imm); + switch (BPF_CLASS(code)) { + case BPF_ALU: + if (BPF_OP(code) == BPF_MOD) { + if (off) + EMIT(PPC_RAW_DIVW(tmp2_reg, dst_reg, tmp1_reg)); + else + EMIT(PPC_RAW_DIVWU(tmp2_reg, dst_reg, tmp1_reg)); + EMIT(PPC_RAW_MULW(tmp1_reg, tmp1_reg, tmp2_reg)); + EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg)); + } else + if (off) + EMIT(PPC_RAW_DIVW(dst_reg, dst_reg, tmp1_reg)); + else + EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, tmp1_reg)); + break; + case BPF_ALU64: + if (BPF_OP(code) == BPF_MOD) { + if (off) + EMIT(PPC_RAW_DIVD(tmp2_reg, dst_reg, tmp1_reg)); + else + EMIT(PPC_RAW_DIVDU(tmp2_reg, dst_reg, tmp1_reg)); + EMIT(PPC_RAW_MULD(tmp1_reg, tmp1_reg, tmp2_reg)); + EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg)); + } else + if (off) + EMIT(PPC_RAW_DIVD(dst_reg, dst_reg, tmp1_reg)); + else + EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, tmp1_reg)); + break; + } + goto bpf_alu32_trunc; + case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */ + case BPF_ALU64 | BPF_NEG: /* dst = -dst */ + EMIT(PPC_RAW_NEG(dst_reg, dst_reg)); + goto bpf_alu32_trunc; + + /* + * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH + */ + case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */ + case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */ + EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg)); + goto bpf_alu32_trunc; + case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */ + case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */ + if (!IMM_H(imm)) + EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm))); + else { + /* Sign-extended */ + PPC_LI32(tmp1_reg, imm); + EMIT(PPC_RAW_AND(dst_reg, dst_reg, tmp1_reg)); + } + goto bpf_alu32_trunc; + case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */ + case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */ + EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg)); + goto bpf_alu32_trunc; + case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */ + case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */ + if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) { + /* Sign-extended */ + PPC_LI32(tmp1_reg, imm); + EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp1_reg)); + } else { + if (IMM_L(imm)) + EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm))); + if (IMM_H(imm)) + EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm))); + } + goto bpf_alu32_trunc; + case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */ + case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */ + EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg)); + goto bpf_alu32_trunc; + case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */ + case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */ + if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) { + /* Sign-extended */ + PPC_LI32(tmp1_reg, imm); + EMIT(PPC_RAW_XOR(dst_reg, dst_reg, tmp1_reg)); + } else { + if (IMM_L(imm)) + EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm))); + if (IMM_H(imm)) + EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm))); + } + goto bpf_alu32_trunc; + case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */ + /* slw clears top 32 bits */ + EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg)); + /* skip zero extension move, but set address map. */ + if (insn_is_zext(&insn[i + 1])) + addrs[++i] = ctx->idx * 4; + break; + case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */ + EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg)); + break; + case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */ + /* with imm 0, we still need to clear top 32 bits */ + EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm)); + if (insn_is_zext(&insn[i + 1])) + addrs[++i] = ctx->idx * 4; + break; + case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */ + if (imm != 0) + EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm)); + break; + case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */ + EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg)); + if (insn_is_zext(&insn[i + 1])) + addrs[++i] = ctx->idx * 4; + break; + case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */ + EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg)); + break; + case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */ + EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm)); + if (insn_is_zext(&insn[i + 1])) + addrs[++i] = ctx->idx * 4; + break; + case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */ + if (imm != 0) + EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm)); + break; + case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */ + EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg)); + goto bpf_alu32_trunc; + case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */ + EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg)); + break; + case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */ + EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm)); + goto bpf_alu32_trunc; + case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */ + if (imm != 0) + EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm)); + break; + + /* + * MOV + */ + case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */ + case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */ + + if (insn_is_cast_user(&insn[i])) { + EMIT(PPC_RAW_RLDICL_DOT(tmp1_reg, src_reg, 0, 32)); + PPC_LI64(dst_reg, (ctx->user_vm_start & 0xffffffff00000000UL)); + PPC_BCC_SHORT(COND_EQ, (ctx->idx + 2) * 4); + EMIT(PPC_RAW_OR(tmp1_reg, dst_reg, tmp1_reg)); + EMIT(PPC_RAW_MR(dst_reg, tmp1_reg)); + break; + } + + if (imm == 1) { + /* special mov32 for zext */ + EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31)); + break; + } else if (off == 8) { + EMIT(PPC_RAW_EXTSB(dst_reg, src_reg)); + } else if (off == 16) { + EMIT(PPC_RAW_EXTSH(dst_reg, src_reg)); + } else if (off == 32) { + EMIT(PPC_RAW_EXTSW(dst_reg, src_reg)); + } else if (dst_reg != src_reg) + EMIT(PPC_RAW_MR(dst_reg, src_reg)); + goto bpf_alu32_trunc; + case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */ + case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */ + PPC_LI32(dst_reg, imm); + if (imm < 0) + goto bpf_alu32_trunc; + else if (insn_is_zext(&insn[i + 1])) + addrs[++i] = ctx->idx * 4; + break; + +bpf_alu32_trunc: + /* Truncate to 32-bits */ + if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext) + EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31)); + break; + + /* + * BPF_FROM_BE/LE + */ + case BPF_ALU | BPF_END | BPF_FROM_LE: + case BPF_ALU | BPF_END | BPF_FROM_BE: + case BPF_ALU64 | BPF_END | BPF_FROM_LE: +#ifdef __BIG_ENDIAN__ + if (BPF_SRC(code) == BPF_FROM_BE) + goto emit_clear; +#else /* !__BIG_ENDIAN__ */ + if (BPF_CLASS(code) == BPF_ALU && BPF_SRC(code) == BPF_FROM_LE) + goto emit_clear; +#endif + switch (imm) { + case 16: + /* Rotate 8 bits left & mask with 0x0000ff00 */ + EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 16, 23)); + /* Rotate 8 bits right & insert LSB to reg */ + EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 24, 31)); + /* Move result back to dst_reg */ + EMIT(PPC_RAW_MR(dst_reg, tmp1_reg)); + break; + case 32: + /* + * Rotate word left by 8 bits: + * 2 bytes are already in their final position + * -- byte 2 and 4 (of bytes 1, 2, 3 and 4) + */ + EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 0, 31)); + /* Rotate 24 bits and insert byte 1 */ + EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 0, 7)); + /* Rotate 24 bits and insert byte 3 */ + EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 16, 23)); + EMIT(PPC_RAW_MR(dst_reg, tmp1_reg)); + break; + case 64: + /* Store the value to stack and then use byte-reverse loads */ + EMIT(PPC_RAW_STD(dst_reg, _R1, bpf_jit_stack_local(ctx))); + EMIT(PPC_RAW_ADDI(tmp1_reg, _R1, bpf_jit_stack_local(ctx))); + if (cpu_has_feature(CPU_FTR_ARCH_206)) { + EMIT(PPC_RAW_LDBRX(dst_reg, 0, tmp1_reg)); + } else { + EMIT(PPC_RAW_LWBRX(dst_reg, 0, tmp1_reg)); + if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN)) + EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32)); + EMIT(PPC_RAW_LI(tmp2_reg, 4)); + EMIT(PPC_RAW_LWBRX(tmp2_reg, tmp2_reg, tmp1_reg)); + if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + EMIT(PPC_RAW_SLDI(tmp2_reg, tmp2_reg, 32)); + EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp2_reg)); + } + break; + } + break; + +emit_clear: + switch (imm) { + case 16: + /* zero-extend 16 bits into 64 bits */ + EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48)); + if (insn_is_zext(&insn[i + 1])) + addrs[++i] = ctx->idx * 4; + break; + case 32: + if (!fp->aux->verifier_zext) + /* zero-extend 32 bits into 64 bits */ + EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32)); + break; + case 64: + /* nop */ + break; + } + break; + + /* + * BPF_ST NOSPEC (speculation barrier) + * + * The following must act as a barrier against both Spectre v1 + * and v4 if we requested both mitigations. Therefore, also emit + * 'isync; sync' on E500 or 'ori31' on BOOK3S_64 in addition to + * the insns needed for a Spectre v4 barrier. + * + * If we requested only !bypass_spec_v1 OR only !bypass_spec_v4, + * we can skip the respective other barrier type as an + * optimization. + */ + case BPF_ST | BPF_NOSPEC: + sync_emitted = false; + ori31_emitted = false; + if (IS_ENABLED(CONFIG_PPC_E500) && + !bpf_jit_bypass_spec_v1()) { + EMIT(PPC_RAW_ISYNC()); + EMIT(PPC_RAW_SYNC()); + sync_emitted = true; + } + if (!bpf_jit_bypass_spec_v4()) { + switch (stf_barrier) { + case STF_BARRIER_EIEIO: + EMIT(PPC_RAW_EIEIO() | 0x02000000); + break; + case STF_BARRIER_SYNC_ORI: + if (!sync_emitted) + EMIT(PPC_RAW_SYNC()); + EMIT(PPC_RAW_LD(tmp1_reg, _R13, 0)); + EMIT(PPC_RAW_ORI(_R31, _R31, 0)); + ori31_emitted = true; + break; + case STF_BARRIER_FALLBACK: + ctx->seen |= SEEN_FUNC; + PPC_LI64(_R12, dereference_kernel_function_descriptor(bpf_stf_barrier)); + EMIT(PPC_RAW_MTCTR(_R12)); + EMIT(PPC_RAW_BCTRL()); + break; + case STF_BARRIER_NONE: + break; + } + } + if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && + !bpf_jit_bypass_spec_v1() && + !ori31_emitted) + EMIT(PPC_RAW_ORI(_R31, _R31, 0)); + break; + + /* + * BPF_ST(X) + */ + case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */ + case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */ + if (BPF_CLASS(code) == BPF_ST) { + EMIT(PPC_RAW_LI(tmp1_reg, imm)); + src_reg = tmp1_reg; + } + EMIT(PPC_RAW_STB(src_reg, dst_reg, off)); + break; + case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */ + case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */ + if (BPF_CLASS(code) == BPF_ST) { + EMIT(PPC_RAW_LI(tmp1_reg, imm)); + src_reg = tmp1_reg; + } + EMIT(PPC_RAW_STH(src_reg, dst_reg, off)); + break; + case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */ + case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */ + if (BPF_CLASS(code) == BPF_ST) { + PPC_LI32(tmp1_reg, imm); + src_reg = tmp1_reg; + } + EMIT(PPC_RAW_STW(src_reg, dst_reg, off)); + break; + case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */ + case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */ + if (BPF_CLASS(code) == BPF_ST) { + PPC_LI32(tmp1_reg, imm); + src_reg = tmp1_reg; + } + if (off % 4) { + EMIT(PPC_RAW_LI(tmp2_reg, off)); + EMIT(PPC_RAW_STDX(src_reg, dst_reg, tmp2_reg)); + } else { + EMIT(PPC_RAW_STD(src_reg, dst_reg, off)); + } + break; + + case BPF_STX | BPF_PROBE_MEM32 | BPF_B: + case BPF_STX | BPF_PROBE_MEM32 | BPF_H: + case BPF_STX | BPF_PROBE_MEM32 | BPF_W: + case BPF_STX | BPF_PROBE_MEM32 | BPF_DW: + + EMIT(PPC_RAW_ADD(tmp1_reg, dst_reg, bpf_to_ppc(ARENA_VM_START))); + + ret = bpf_jit_emit_probe_mem_store(ctx, src_reg, off, code, image); + if (ret) + return ret; + + ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx, + ctx->idx - 1, 4, -1, code); + if (ret) + return ret; + + break; + + case BPF_ST | BPF_PROBE_MEM32 | BPF_B: + case BPF_ST | BPF_PROBE_MEM32 | BPF_H: + case BPF_ST | BPF_PROBE_MEM32 | BPF_W: + case BPF_ST | BPF_PROBE_MEM32 | BPF_DW: + + EMIT(PPC_RAW_ADD(tmp1_reg, dst_reg, bpf_to_ppc(ARENA_VM_START))); + + if (BPF_SIZE(code) == BPF_W || BPF_SIZE(code) == BPF_DW) { + PPC_LI32(tmp2_reg, imm); + src_reg = tmp2_reg; + } else { + EMIT(PPC_RAW_LI(tmp2_reg, imm)); + src_reg = tmp2_reg; + } + + ret = bpf_jit_emit_probe_mem_store(ctx, src_reg, off, code, image); + if (ret) + return ret; + + ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx, + ctx->idx - 1, 4, -1, code); + if (ret) + return ret; + + break; + + /* + * BPF_STX PROBE_ATOMIC (arena atomic ops) + */ + case BPF_STX | BPF_PROBE_ATOMIC | BPF_W: + case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW: + EMIT(PPC_RAW_ADD(dst_reg, dst_reg, bpf_to_ppc(ARENA_VM_START))); + ret = bpf_jit_emit_atomic_ops(image, ctx, &insn[i], + &jmp_off, &tmp_idx, &addrs[i + 1]); + if (ret) { + if (ret == -EOPNOTSUPP) { + pr_err_ratelimited( + "eBPF filter atomic op code %02x (@%d) unsupported\n", + code, i); + } + return ret; + } + /* LDARX/LWARX should land here on exception. */ + ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx, + tmp_idx, jmp_off, dst_reg, code); + if (ret) + return ret; + + /* Retrieve the dst_reg */ + EMIT(PPC_RAW_SUB(dst_reg, dst_reg, bpf_to_ppc(ARENA_VM_START))); + break; + + /* + * BPF_STX ATOMIC (atomic ops) + */ + case BPF_STX | BPF_ATOMIC | BPF_B: + case BPF_STX | BPF_ATOMIC | BPF_H: + case BPF_STX | BPF_ATOMIC | BPF_W: + case BPF_STX | BPF_ATOMIC | BPF_DW: + if (bpf_atomic_is_load_store(&insn[i])) { + ret = emit_atomic_ld_st(insn[i], ctx, image); + if (ret) + return ret; + + if (size != BPF_DW && insn_is_zext(&insn[i + 1])) + addrs[++i] = ctx->idx * 4; + break; + } else if (size == BPF_B || size == BPF_H) { + pr_err_ratelimited( + "eBPF filter atomic op code %02x (@%d) unsupported\n", + code, i); + return -EOPNOTSUPP; + } + + ret = bpf_jit_emit_atomic_ops(image, ctx, &insn[i], + &jmp_off, &tmp_idx, &addrs[i + 1]); + if (ret) { + if (ret == -EOPNOTSUPP) { + pr_err_ratelimited( + "eBPF filter atomic op code %02x (@%d) unsupported\n", + code, i); + } + return ret; + } + break; + + /* + * BPF_LDX + */ + /* dst = *(u8 *)(ul) (src + off) */ + case BPF_LDX | BPF_MEM | BPF_B: + case BPF_LDX | BPF_MEMSX | BPF_B: + case BPF_LDX | BPF_PROBE_MEM | BPF_B: + case BPF_LDX | BPF_PROBE_MEMSX | BPF_B: + /* dst = *(u16 *)(ul) (src + off) */ + case BPF_LDX | BPF_MEM | BPF_H: + case BPF_LDX | BPF_MEMSX | BPF_H: + case BPF_LDX | BPF_PROBE_MEM | BPF_H: + case BPF_LDX | BPF_PROBE_MEMSX | BPF_H: + /* dst = *(u32 *)(ul) (src + off) */ + case BPF_LDX | BPF_MEM | BPF_W: + case BPF_LDX | BPF_MEMSX | BPF_W: + case BPF_LDX | BPF_PROBE_MEM | BPF_W: + case BPF_LDX | BPF_PROBE_MEMSX | BPF_W: + /* dst = *(u64 *)(ul) (src + off) */ + case BPF_LDX | BPF_MEM | BPF_DW: + case BPF_LDX | BPF_PROBE_MEM | BPF_DW: + /* + * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid + * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM + * load only if addr is kernel address (see is_kernel_addr()), otherwise + * set dst_reg=0 and move on. + */ + if (BPF_MODE(code) == BPF_PROBE_MEM || BPF_MODE(code) == BPF_PROBE_MEMSX) { + EMIT(PPC_RAW_ADDI(tmp1_reg, src_reg, off)); + if (IS_ENABLED(CONFIG_PPC_BOOK3E_64)) + PPC_LI64(tmp2_reg, 0x8000000000000000ul); + else /* BOOK3S_64 */ + PPC_LI64(tmp2_reg, PAGE_OFFSET); + EMIT(PPC_RAW_CMPLD(tmp1_reg, tmp2_reg)); + PPC_BCC_SHORT(COND_GT, (ctx->idx + 3) * 4); + EMIT(PPC_RAW_LI(dst_reg, 0)); + /* + * Check if 'off' is word aligned for BPF_DW, because + * we might generate two instructions. + */ + if ((BPF_SIZE(code) == BPF_DW && (off & 3)) || + (BPF_SIZE(code) == BPF_B && + BPF_MODE(code) == BPF_PROBE_MEMSX) || + (BPF_SIZE(code) == BPF_B && BPF_MODE(code) == BPF_MEMSX)) + PPC_JMP((ctx->idx + 3) * 4); + else + PPC_JMP((ctx->idx + 2) * 4); + } + + if (BPF_MODE(code) == BPF_MEMSX || BPF_MODE(code) == BPF_PROBE_MEMSX) { + switch (size) { + case BPF_B: + EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off)); + EMIT(PPC_RAW_EXTSB(dst_reg, dst_reg)); + break; + case BPF_H: + EMIT(PPC_RAW_LHA(dst_reg, src_reg, off)); + break; + case BPF_W: + EMIT(PPC_RAW_LWA(dst_reg, src_reg, off)); + break; + } + } else { + switch (size) { + case BPF_B: + EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off)); + break; + case BPF_H: + EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off)); + break; + case BPF_W: + EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off)); + break; + case BPF_DW: + if (off % 4) { + EMIT(PPC_RAW_LI(tmp1_reg, off)); + EMIT(PPC_RAW_LDX(dst_reg, src_reg, tmp1_reg)); + } else { + EMIT(PPC_RAW_LD(dst_reg, src_reg, off)); + } + break; + } + } + + if (size != BPF_DW && insn_is_zext(&insn[i + 1])) + addrs[++i] = ctx->idx * 4; + + if (BPF_MODE(code) == BPF_PROBE_MEM) { + ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx, + ctx->idx - 1, 4, dst_reg, code); + if (ret) + return ret; + } + break; + + /* dst = *(u64 *)(ul) (src + ARENA_VM_START + off) */ + case BPF_LDX | BPF_PROBE_MEM32 | BPF_B: + case BPF_LDX | BPF_PROBE_MEM32 | BPF_H: + case BPF_LDX | BPF_PROBE_MEM32 | BPF_W: + case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW: + + EMIT(PPC_RAW_ADD(tmp1_reg, src_reg, bpf_to_ppc(ARENA_VM_START))); + + switch (size) { + case BPF_B: + EMIT(PPC_RAW_LBZ(dst_reg, tmp1_reg, off)); + break; + case BPF_H: + EMIT(PPC_RAW_LHZ(dst_reg, tmp1_reg, off)); + break; + case BPF_W: + EMIT(PPC_RAW_LWZ(dst_reg, tmp1_reg, off)); + break; + case BPF_DW: + if (off % 4) { + EMIT(PPC_RAW_LI(tmp2_reg, off)); + EMIT(PPC_RAW_LDX(dst_reg, tmp1_reg, tmp2_reg)); + } else { + EMIT(PPC_RAW_LD(dst_reg, tmp1_reg, off)); + } + break; + } + + if (size != BPF_DW && insn_is_zext(&insn[i + 1])) + addrs[++i] = ctx->idx * 4; + + ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx, + ctx->idx - 1, 4, dst_reg, code); + if (ret) + return ret; + break; + + /* + * Doubleword load + * 16 byte instruction that uses two 'struct bpf_insn' + */ + case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */ + imm64 = ((u64)(u32) insn[i].imm) | + (((u64)(u32) insn[i+1].imm) << 32); + PPC_LI64(dst_reg, imm64); + /* Adjust for two bpf instructions */ + addrs[++i] = ctx->idx * 4; + break; + + /* + * Return/Exit + */ + case BPF_JMP | BPF_EXIT: + /* + * If this isn't the very last instruction, branch to + * the epilogue. If we _are_ the last instruction, + * we'll just fall through to the epilogue. + */ + if (i != flen - 1) { + ret = bpf_jit_emit_exit_insn(image, ctx, tmp1_reg, exit_addr); + if (ret) + return ret; + } + /* else fall through to the epilogue */ + break; + + /* + * Call kernel helper or bpf function + */ + case BPF_JMP | BPF_CALL: + ctx->seen |= SEEN_FUNC; + + ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass, + &func_addr, &func_addr_fixed); + if (ret < 0) + return ret; + + ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr); + if (ret) + return ret; + + /* move return value from r3 to BPF_REG_0 */ + EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0), _R3)); + break; + + /* + * Jumps and branches + */ + case BPF_JMP | BPF_JA: + PPC_JMP(addrs[i + 1 + off]); + break; + case BPF_JMP32 | BPF_JA: + PPC_JMP(addrs[i + 1 + imm]); + break; + + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSGT | BPF_X: + true_cond = COND_GT; + goto cond_branch; + case BPF_JMP | BPF_JLT | BPF_K: + case BPF_JMP | BPF_JLT | BPF_X: + case BPF_JMP | BPF_JSLT | BPF_K: + case BPF_JMP | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_X: + true_cond = COND_LT; + goto cond_branch; + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JSGE | BPF_K: + case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_X: + true_cond = COND_GE; + goto cond_branch; + case BPF_JMP | BPF_JLE | BPF_K: + case BPF_JMP | BPF_JLE | BPF_X: + case BPF_JMP | BPF_JSLE | BPF_K: + case BPF_JMP | BPF_JSLE | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_X: + true_cond = COND_LE; + goto cond_branch; + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JEQ | BPF_X: + true_cond = COND_EQ; + goto cond_branch; + case BPF_JMP | BPF_JNE | BPF_K: + case BPF_JMP | BPF_JNE | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JNE | BPF_X: + true_cond = COND_NE; + goto cond_branch; + case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP | BPF_JSET | BPF_X: + case BPF_JMP32 | BPF_JSET | BPF_K: + case BPF_JMP32 | BPF_JSET | BPF_X: + true_cond = COND_NE; + /* Fall through */ + +cond_branch: + switch (code) { + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JLT | BPF_X: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JLE | BPF_X: + case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP | BPF_JNE | BPF_X: + case BPF_JMP32 | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_X: + /* unsigned comparison */ + if (BPF_CLASS(code) == BPF_JMP32) + EMIT(PPC_RAW_CMPLW(dst_reg, src_reg)); + else + EMIT(PPC_RAW_CMPLD(dst_reg, src_reg)); + break; + case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP | BPF_JSLT | BPF_X: + case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP | BPF_JSLE | BPF_X: + case BPF_JMP32 | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_X: + /* signed comparison */ + if (BPF_CLASS(code) == BPF_JMP32) + EMIT(PPC_RAW_CMPW(dst_reg, src_reg)); + else + EMIT(PPC_RAW_CMPD(dst_reg, src_reg)); + break; + case BPF_JMP | BPF_JSET | BPF_X: + case BPF_JMP32 | BPF_JSET | BPF_X: + if (BPF_CLASS(code) == BPF_JMP) { + EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg, src_reg)); + } else { + EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, src_reg)); + EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg, 0, 0, 31)); + } + break; + case BPF_JMP | BPF_JNE | BPF_K: + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JLT | BPF_K: + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JLE | BPF_K: + case BPF_JMP32 | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_K: + { + bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32; + + /* + * Need sign-extended load, so only positive + * values can be used as imm in cmpldi + */ + if (imm >= 0 && imm < 32768) { + if (is_jmp32) + EMIT(PPC_RAW_CMPLWI(dst_reg, imm)); + else + EMIT(PPC_RAW_CMPLDI(dst_reg, imm)); + } else { + /* sign-extending load */ + PPC_LI32(tmp1_reg, imm); + /* ... but unsigned comparison */ + if (is_jmp32) + EMIT(PPC_RAW_CMPLW(dst_reg, tmp1_reg)); + else + EMIT(PPC_RAW_CMPLD(dst_reg, tmp1_reg)); + } + break; + } + case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP | BPF_JSLT | BPF_K: + case BPF_JMP | BPF_JSGE | BPF_K: + case BPF_JMP | BPF_JSLE | BPF_K: + case BPF_JMP32 | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_K: + { + bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32; + + /* + * signed comparison, so any 16-bit value + * can be used in cmpdi + */ + if (imm >= -32768 && imm < 32768) { + if (is_jmp32) + EMIT(PPC_RAW_CMPWI(dst_reg, imm)); + else + EMIT(PPC_RAW_CMPDI(dst_reg, imm)); + } else { + PPC_LI32(tmp1_reg, imm); + if (is_jmp32) + EMIT(PPC_RAW_CMPW(dst_reg, tmp1_reg)); + else + EMIT(PPC_RAW_CMPD(dst_reg, tmp1_reg)); + } + break; + } + case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP32 | BPF_JSET | BPF_K: + /* andi does not sign-extend the immediate */ + if (imm >= 0 && imm < 32768) + /* PPC_ANDI is _only/always_ dot-form */ + EMIT(PPC_RAW_ANDI(tmp1_reg, dst_reg, imm)); + else { + PPC_LI32(tmp1_reg, imm); + if (BPF_CLASS(code) == BPF_JMP) { + EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg, + tmp1_reg)); + } else { + EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, tmp1_reg)); + EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg, + 0, 0, 31)); + } + } + break; + } + PPC_BCC(true_cond, addrs[i + 1 + off]); + break; + + /* + * Tail call + */ + case BPF_JMP | BPF_TAIL_CALL: + ctx->seen |= SEEN_TAILCALL; + ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); + if (ret < 0) + return ret; + break; + + default: + /* + * The filter contains something cruel & unusual. + * We don't handle it, but also there shouldn't be + * anything missing from our list. + */ + pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n", + code, i); + return -ENOTSUPP; + } + } + + /* Set end-of-body-code address for exit. */ + addrs[i] = ctx->idx * 4; + + return 0; +} |
