diff options
| -rw-r--r-- | arch/riscv/net/bpf_jit_comp64.c | 24 | 
1 files changed, 24 insertions, 0 deletions
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index 15e482f2c657..1f0159963b3e 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -12,6 +12,7 @@  #include <linux/stop_machine.h>  #include <asm/patch.h>  #include <asm/cfi.h> +#include <asm/percpu.h>  #include "bpf_jit.h"  #define RV_FENTRY_NINSNS 2 @@ -1089,6 +1090,24 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,  			emit_or(RV_REG_T1, rd, RV_REG_T1, ctx);  			emit_mv(rd, RV_REG_T1, ctx);  			break; +		} else if (insn_is_mov_percpu_addr(insn)) { +			if (rd != rs) +				emit_mv(rd, rs, ctx); +#ifdef CONFIG_SMP +			/* Load current CPU number in T1 */ +			emit_ld(RV_REG_T1, offsetof(struct thread_info, cpu), +				RV_REG_TP, ctx); +			/* << 3 because offsets are 8 bytes */ +			emit_slli(RV_REG_T1, RV_REG_T1, 3, ctx); +			/* Load address of __per_cpu_offset array in T2 */ +			emit_addr(RV_REG_T2, (u64)&__per_cpu_offset, extra_pass, ctx); +			/* Add offset of current CPU to  __per_cpu_offset */ +			emit_add(RV_REG_T1, RV_REG_T2, RV_REG_T1, ctx); +			/* Load __per_cpu_offset[cpu] in T1 */ +			emit_ld(RV_REG_T1, 0, RV_REG_T1, ctx); +			/* Add the offset to Rd */ +			emit_add(rd, rd, RV_REG_T1, ctx); +#endif  		}  		if (imm == 1) {  			/* Special mov32 for zext */ @@ -2038,3 +2057,8 @@ bool bpf_jit_supports_arena(void)  {  	return true;  } + +bool bpf_jit_supports_percpu_insn(void) +{ +	return true; +}  | 
