summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSaket Kumar Bhaskar <skb99@linux.ibm.com>2025-09-04 15:38:33 +0530
committerMadhavan Srinivasan <maddy@linux.ibm.com>2025-09-06 15:49:43 +0530
commita2485d06cad3741303b9414e44f9b6d76d3713f2 (patch)
tree54c7ae4a9d983509da46fc3969ef27181482b5f3
parent47c7f3b7038787ee42cb9ed69825e6790dba9410 (diff)
powerpc64/bpf: Implement bpf_addr_space_cast instruction
LLVM generates bpf_addr_space_cast instruction while translating pointers between native (zero) address space and __attribute__((address_space(N))). The addr_space=0 is reserved as bpf_arena address space. rY = addr_space_cast(rX, 0, 1) is processed by the verifier and converted to normal 32-bit move: wX = wY. rY = addr_space_cast(rX, 1, 0) : used to convert a bpf arena pointer to a pointer in the userspace vma. This has to be converted by the JIT. PPC_RAW_RLDICL_DOT, a variant of PPC_RAW_RLDICL is introduced to set condition register as well. Reviewed-by: Hari Bathini <hbathini@linux.ibm.com> Tested-by: Venkat Rao Bagalkote <venkat88@linux.ibm.com> Signed-off-by: Saket Kumar Bhaskar <skb99@linux.ibm.com> Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com> Link: https://patch.msgid.link/20250904100835.1100423-3-skb99@linux.ibm.com
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h1
-rw-r--r--arch/powerpc/net/bpf_jit.h1
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c6
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c10
4 files changed, 18 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 8053b24afc39..55ca49d18319 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -571,6 +571,7 @@
(0x54000001 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH(i) | __PPC_MB(mb) | __PPC_ME(me))
#define PPC_RAW_RLWIMI(d, a, i, mb, me) (0x50000000 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH(i) | __PPC_MB(mb) | __PPC_ME(me))
#define PPC_RAW_RLDICL(d, a, i, mb) (0x78000000 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH64(i) | __PPC_MB64(mb))
+#define PPC_RAW_RLDICL_DOT(d, a, i, mb) (0x78000000 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH64(i) | __PPC_MB64(mb) | 0x1)
#define PPC_RAW_RLDICR(d, a, i, me) (0x78000004 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH64(i) | __PPC_ME64(me))
/* slwi = rlwinm Rx, Ry, n, 0, 31-n */
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index b4a5f58b7e3d..8334cd667bba 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -165,6 +165,7 @@ struct codegen_context {
unsigned int exentry_idx;
unsigned int alt_exit_addr;
u64 arena_vm_start;
+ u64 user_vm_start;
};
#define bpf_to_ppc(r) (ctx->b2p[r])
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 7d070232159f..cfa84cab0a18 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -205,6 +205,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
/* Make sure that the stack is quadword aligned. */
cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
cgctx.arena_vm_start = bpf_arena_get_kern_vm_start(fp->aux->arena);
+ cgctx.user_vm_start = bpf_arena_get_user_vm_start(fp->aux->arena);
/* Scouting faux-generate pass 0 */
if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) {
@@ -439,6 +440,11 @@ bool bpf_jit_supports_kfunc_call(void)
return true;
}
+bool bpf_jit_supports_arena(void)
+{
+ return IS_ENABLED(CONFIG_PPC64);
+}
+
bool bpf_jit_supports_far_kfunc_call(void)
{
return IS_ENABLED(CONFIG_PPC64);
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 569619f1b31c..76efb47f02a6 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -812,6 +812,16 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
*/
case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
+
+ if (insn_is_cast_user(&insn[i])) {
+ EMIT(PPC_RAW_RLDICL_DOT(tmp1_reg, src_reg, 0, 32));
+ PPC_LI64(dst_reg, (ctx->user_vm_start & 0xffffffff00000000UL));
+ PPC_BCC_SHORT(COND_EQ, (ctx->idx + 2) * 4);
+ EMIT(PPC_RAW_OR(tmp1_reg, dst_reg, tmp1_reg));
+ EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
+ break;
+ }
+
if (imm == 1) {
/* special mov32 for zext */
EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));