summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/netronome/nfp
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/netronome/nfp')
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile4
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c788
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c10
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h52
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c21
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c412
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h130
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c13
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h35
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c114
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c94
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c811
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.c257
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.h300
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c39
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h2
20 files changed, 2564 insertions, 545 deletions
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 96e579a15cbe..bd3b2bd408bc 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -14,6 +14,7 @@ nfp-objs := \
nfpcore/nfp_resource.o \
nfpcore/nfp_rtsym.o \
nfpcore/nfp_target.o \
+ nfp_asm.o \
nfp_app.o \
nfp_app_nic.o \
nfp_devlink.o \
@@ -37,7 +38,8 @@ nfp-objs += \
flower/main.o \
flower/match.o \
flower/metadata.o \
- flower/offload.o
+ flower/offload.o \
+ flower/tunnel_conf.o
endif
ifeq ($(CONFIG_BPF_SYSCALL),y)
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 239dfbe8a0a1..23fb11a41cc4 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -110,150 +110,7 @@ nfp_prog_offset_to_index(struct nfp_prog *nfp_prog, unsigned int offset)
return offset - nfp_prog->start_off;
}
-/* --- SW reg --- */
-struct nfp_insn_ur_regs {
- enum alu_dst_ab dst_ab;
- u16 dst;
- u16 areg, breg;
- bool swap;
- bool wr_both;
-};
-
-struct nfp_insn_re_regs {
- enum alu_dst_ab dst_ab;
- u8 dst;
- u8 areg, breg;
- bool swap;
- bool wr_both;
- bool i8;
-};
-
-static u16 nfp_swreg_to_unreg(u32 swreg, bool is_dst)
-{
- u16 val = FIELD_GET(NN_REG_VAL, swreg);
-
- switch (FIELD_GET(NN_REG_TYPE, swreg)) {
- case NN_REG_GPR_A:
- case NN_REG_GPR_B:
- case NN_REG_GPR_BOTH:
- return val;
- case NN_REG_NNR:
- return UR_REG_NN | val;
- case NN_REG_XFER:
- return UR_REG_XFR | val;
- case NN_REG_IMM:
- if (val & ~0xff) {
- pr_err("immediate too large\n");
- return 0;
- }
- return UR_REG_IMM_encode(val);
- case NN_REG_NONE:
- return is_dst ? UR_REG_NO_DST : REG_NONE;
- default:
- pr_err("unrecognized reg encoding %08x\n", swreg);
- return 0;
- }
-}
-
-static int
-swreg_to_unrestricted(u32 dst, u32 lreg, u32 rreg, struct nfp_insn_ur_regs *reg)
-{
- memset(reg, 0, sizeof(*reg));
-
- /* Decode destination */
- if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_IMM)
- return -EFAULT;
-
- if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_B)
- reg->dst_ab = ALU_DST_B;
- if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_BOTH)
- reg->wr_both = true;
- reg->dst = nfp_swreg_to_unreg(dst, true);
-
- /* Decode source operands */
- if (FIELD_GET(NN_REG_TYPE, lreg) == FIELD_GET(NN_REG_TYPE, rreg))
- return -EFAULT;
-
- if (FIELD_GET(NN_REG_TYPE, lreg) == NN_REG_GPR_B ||
- FIELD_GET(NN_REG_TYPE, rreg) == NN_REG_GPR_A) {
- reg->areg = nfp_swreg_to_unreg(rreg, false);
- reg->breg = nfp_swreg_to_unreg(lreg, false);
- reg->swap = true;
- } else {
- reg->areg = nfp_swreg_to_unreg(lreg, false);
- reg->breg = nfp_swreg_to_unreg(rreg, false);
- }
-
- return 0;
-}
-
-static u16 nfp_swreg_to_rereg(u32 swreg, bool is_dst, bool has_imm8, bool *i8)
-{
- u16 val = FIELD_GET(NN_REG_VAL, swreg);
-
- switch (FIELD_GET(NN_REG_TYPE, swreg)) {
- case NN_REG_GPR_A:
- case NN_REG_GPR_B:
- case NN_REG_GPR_BOTH:
- return val;
- case NN_REG_XFER:
- return RE_REG_XFR | val;
- case NN_REG_IMM:
- if (val & ~(0x7f | has_imm8 << 7)) {
- pr_err("immediate too large\n");
- return 0;
- }
- *i8 = val & 0x80;
- return RE_REG_IMM_encode(val & 0x7f);
- case NN_REG_NONE:
- return is_dst ? RE_REG_NO_DST : REG_NONE;
- default:
- pr_err("unrecognized reg encoding\n");
- return 0;
- }
-}
-
-static int
-swreg_to_restricted(u32 dst, u32 lreg, u32 rreg, struct nfp_insn_re_regs *reg,
- bool has_imm8)
-{
- memset(reg, 0, sizeof(*reg));
-
- /* Decode destination */
- if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_IMM)
- return -EFAULT;
-
- if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_B)
- reg->dst_ab = ALU_DST_B;
- if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_BOTH)
- reg->wr_both = true;
- reg->dst = nfp_swreg_to_rereg(dst, true, false, NULL);
-
- /* Decode source operands */
- if (FIELD_GET(NN_REG_TYPE, lreg) == FIELD_GET(NN_REG_TYPE, rreg))
- return -EFAULT;
-
- if (FIELD_GET(NN_REG_TYPE, lreg) == NN_REG_GPR_B ||
- FIELD_GET(NN_REG_TYPE, rreg) == NN_REG_GPR_A) {
- reg->areg = nfp_swreg_to_rereg(rreg, false, has_imm8, &reg->i8);
- reg->breg = nfp_swreg_to_rereg(lreg, false, has_imm8, &reg->i8);
- reg->swap = true;
- } else {
- reg->areg = nfp_swreg_to_rereg(lreg, false, has_imm8, &reg->i8);
- reg->breg = nfp_swreg_to_rereg(rreg, false, has_imm8, &reg->i8);
- }
-
- return 0;
-}
-
/* --- Emitters --- */
-static const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = {
- [CMD_TGT_WRITE8] = { 0x00, 0x42 },
- [CMD_TGT_READ8] = { 0x01, 0x43 },
- [CMD_TGT_READ_LE] = { 0x01, 0x40 },
- [CMD_TGT_READ_SWAP_LE] = { 0x03, 0x40 },
-};
-
static void
__emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, bool sync)
@@ -281,7 +138,7 @@ __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
static void
emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
- u8 mode, u8 xfer, u32 lreg, u32 rreg, u8 size, bool sync)
+ u8 mode, u8 xfer, swreg lreg, swreg rreg, u8 size, bool sync)
{
struct nfp_insn_re_regs reg;
int err;
@@ -296,6 +153,11 @@ emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
nfp_prog->error = -EFAULT;
return;
}
+ if (reg.dst_lmextn || reg.src_lmextn) {
+ pr_err("cmd can't use LMextn\n");
+ nfp_prog->error = -EFAULT;
+ return;
+ }
__emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, sync);
}
@@ -341,7 +203,7 @@ emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer)
static void
__emit_br_byte(struct nfp_prog *nfp_prog, u8 areg, u8 breg, bool imm8,
- u8 byte, bool equal, u16 addr, u8 defer)
+ u8 byte, bool equal, u16 addr, u8 defer, bool src_lmextn)
{
u16 addr_lo, addr_hi;
u64 insn;
@@ -357,32 +219,34 @@ __emit_br_byte(struct nfp_prog *nfp_prog, u8 areg, u8 breg, bool imm8,
FIELD_PREP(OP_BB_EQ, equal) |
FIELD_PREP(OP_BB_DEFBR, defer) |
FIELD_PREP(OP_BB_ADDR_LO, addr_lo) |
- FIELD_PREP(OP_BB_ADDR_HI, addr_hi);
+ FIELD_PREP(OP_BB_ADDR_HI, addr_hi) |
+ FIELD_PREP(OP_BB_SRC_LMEXTN, src_lmextn);
nfp_prog_push(nfp_prog, insn);
}
static void
emit_br_byte_neq(struct nfp_prog *nfp_prog,
- u32 dst, u8 imm, u8 byte, u16 addr, u8 defer)
+ swreg src, u8 imm, u8 byte, u16 addr, u8 defer)
{
struct nfp_insn_re_regs reg;
int err;
- err = swreg_to_restricted(reg_none(), dst, reg_imm(imm), &reg, true);
+ err = swreg_to_restricted(reg_none(), src, reg_imm(imm), &reg, true);
if (err) {
nfp_prog->error = err;
return;
}
__emit_br_byte(nfp_prog, reg.areg, reg.breg, reg.i8, byte, false, addr,
- defer);
+ defer, reg.src_lmextn);
}
static void
__emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi,
enum immed_width width, bool invert,
- enum immed_shift shift, bool wr_both)
+ enum immed_shift shift, bool wr_both,
+ bool dst_lmextn, bool src_lmextn)
{
u64 insn;
@@ -393,19 +257,21 @@ __emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi,
FIELD_PREP(OP_IMMED_WIDTH, width) |
FIELD_PREP(OP_IMMED_INV, invert) |
FIELD_PREP(OP_IMMED_SHIFT, shift) |
- FIELD_PREP(OP_IMMED_WR_AB, wr_both);
+ FIELD_PREP(OP_IMMED_WR_AB, wr_both) |
+ FIELD_PREP(OP_IMMED_SRC_LMEXTN, src_lmextn) |
+ FIELD_PREP(OP_IMMED_DST_LMEXTN, dst_lmextn);
nfp_prog_push(nfp_prog, insn);
}
static void
-emit_immed(struct nfp_prog *nfp_prog, u32 dst, u16 imm,
+emit_immed(struct nfp_prog *nfp_prog, swreg dst, u16 imm,
enum immed_width width, bool invert, enum immed_shift shift)
{
struct nfp_insn_ur_regs reg;
int err;
- if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_IMM) {
+ if (swreg_type(dst) == NN_REG_IMM) {
nfp_prog->error = -EFAULT;
return;
}
@@ -417,13 +283,15 @@ emit_immed(struct nfp_prog *nfp_prog, u32 dst, u16 imm,
}
__emit_immed(nfp_prog, reg.areg, reg.breg, imm >> 8, width,
- invert, shift, reg.wr_both);
+ invert, shift, reg.wr_both,
+ reg.dst_lmextn, reg.src_lmextn);
}
static void
__emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
enum shf_sc sc, u8 shift,
- u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both)
+ u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both,
+ bool dst_lmextn, bool src_lmextn)
{
u64 insn;
@@ -445,14 +313,16 @@ __emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
FIELD_PREP(OP_SHF_SHIFT, shift) |
FIELD_PREP(OP_SHF_OP, op) |
FIELD_PREP(OP_SHF_DST_AB, dst_ab) |
- FIELD_PREP(OP_SHF_WR_AB, wr_both);
+ FIELD_PREP(OP_SHF_WR_AB, wr_both) |
+ FIELD_PREP(OP_SHF_SRC_LMEXTN, src_lmextn) |
+ FIELD_PREP(OP_SHF_DST_LMEXTN, dst_lmextn);
nfp_prog_push(nfp_prog, insn);
}
static void
-emit_shf(struct nfp_prog *nfp_prog, u32 dst, u32 lreg, enum shf_op op, u32 rreg,
- enum shf_sc sc, u8 shift)
+emit_shf(struct nfp_prog *nfp_prog, swreg dst,
+ swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc, u8 shift)
{
struct nfp_insn_re_regs reg;
int err;
@@ -464,12 +334,14 @@ emit_shf(struct nfp_prog *nfp_prog, u32 dst, u32 lreg, enum shf_op op, u32 rreg,
}
__emit_shf(nfp_prog, reg.dst, reg.dst_ab, sc, shift,
- reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both);
+ reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both,
+ reg.dst_lmextn, reg.src_lmextn);
}
static void
__emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
- u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both)
+ u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both,
+ bool dst_lmextn, bool src_lmextn)
{
u64 insn;
@@ -480,13 +352,16 @@ __emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
FIELD_PREP(OP_ALU_SW, swap) |
FIELD_PREP(OP_ALU_OP, op) |
FIELD_PREP(OP_ALU_DST_AB, dst_ab) |
- FIELD_PREP(OP_ALU_WR_AB, wr_both);
+ FIELD_PREP(OP_ALU_WR_AB, wr_both) |
+ FIELD_PREP(OP_ALU_SRC_LMEXTN, src_lmextn) |
+ FIELD_PREP(OP_ALU_DST_LMEXTN, dst_lmextn);
nfp_prog_push(nfp_prog, insn);
}
static void
-emit_alu(struct nfp_prog *nfp_prog, u32 dst, u32 lreg, enum alu_op op, u32 rreg)
+emit_alu(struct nfp_prog *nfp_prog, swreg dst,
+ swreg lreg, enum alu_op op, swreg rreg)
{
struct nfp_insn_ur_regs reg;
int err;
@@ -498,13 +373,15 @@ emit_alu(struct nfp_prog *nfp_prog, u32 dst, u32 lreg, enum alu_op op, u32 rreg)
}
__emit_alu(nfp_prog, reg.dst, reg.dst_ab,
- reg.areg, op, reg.breg, reg.swap, reg.wr_both);
+ reg.areg, op, reg.breg, reg.swap, reg.wr_both,
+ reg.dst_lmextn, reg.src_lmextn);
}
static void
__emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc,
u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8,
- bool zero, bool swap, bool wr_both)
+ bool zero, bool swap, bool wr_both,
+ bool dst_lmextn, bool src_lmextn)
{
u64 insn;
@@ -517,33 +394,42 @@ __emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc,
FIELD_PREP(OP_LDF_ZF, zero) |
FIELD_PREP(OP_LDF_BMASK, bmask) |
FIELD_PREP(OP_LDF_SHF, shift) |
- FIELD_PREP(OP_LDF_WR_AB, wr_both);
+ FIELD_PREP(OP_LDF_WR_AB, wr_both) |
+ FIELD_PREP(OP_LDF_SRC_LMEXTN, src_lmextn) |
+ FIELD_PREP(OP_LDF_DST_LMEXTN, dst_lmextn);
nfp_prog_push(nfp_prog, insn);
}
static void
-emit_ld_field_any(struct nfp_prog *nfp_prog, enum shf_sc sc, u8 shift,
- u32 dst, u8 bmask, u32 src, bool zero)
+emit_ld_field_any(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src,
+ enum shf_sc sc, u8 shift, bool zero)
{
struct nfp_insn_re_regs reg;
int err;
- err = swreg_to_restricted(reg_none(), dst, src, &reg, true);
+ /* Note: ld_field is special as it uses one of the src regs as dst */
+ err = swreg_to_restricted(dst, dst, src, &reg, true);
if (err) {
nfp_prog->error = err;
return;
}
__emit_ld_field(nfp_prog, sc, reg.areg, bmask, reg.breg, shift,
- reg.i8, zero, reg.swap, reg.wr_both);
+ reg.i8, zero, reg.swap, reg.wr_both,
+ reg.dst_lmextn, reg.src_lmextn);
}
static void
-emit_ld_field(struct nfp_prog *nfp_prog, u32 dst, u8 bmask, u32 src,
+emit_ld_field(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src,
enum shf_sc sc, u8 shift)
{
- emit_ld_field_any(nfp_prog, sc, shift, dst, bmask, src, false);
+ emit_ld_field_any(nfp_prog, dst, bmask, src, sc, shift, false);
+}
+
+static void emit_nop(struct nfp_prog *nfp_prog)
+{
+ __emit_immed(nfp_prog, UR_REG_IMM, UR_REG_IMM, 0, 0, 0, 0, 0, 0, 0);
}
/* --- Wrappers --- */
@@ -565,7 +451,7 @@ static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift)
return true;
}
-static void wrp_immed(struct nfp_prog *nfp_prog, u32 dst, u32 imm)
+static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm)
{
enum immed_shift shift;
u16 val;
@@ -586,7 +472,7 @@ static void wrp_immed(struct nfp_prog *nfp_prog, u32 dst, u32 imm)
* If the @imm is small enough encode it directly in operand and return
* otherwise load @imm to a spare register and return its encoding.
*/
-static u32 ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, u32 tmp_reg)
+static swreg ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg)
{
if (FIELD_FIT(UR_REG_IMM_MAX, imm))
return reg_imm(imm);
@@ -599,7 +485,7 @@ static u32 ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, u32 tmp_reg)
* If the @imm is small enough encode it directly in operand and return
* otherwise load @imm to a spare register and return its encoding.
*/
-static u32 re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, u32 tmp_reg)
+static swreg re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg)
{
if (FIELD_FIT(RE_REG_IMM_MAX, imm))
return reg_imm(imm);
@@ -618,78 +504,134 @@ wrp_br_special(struct nfp_prog *nfp_prog, enum br_mask mask,
FIELD_PREP(OP_BR_SPECIAL, special);
}
+static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src)
+{
+ emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src);
+}
+
static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src)
{
- emit_alu(nfp_prog, reg_both(dst), reg_none(), ALU_OP_NONE, reg_b(src));
+ wrp_mov(nfp_prog, reg_both(dst), reg_b(src));
}
static int
-construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset,
- u16 src, bool src_valid, u8 size)
+data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size)
{
unsigned int i;
u16 shift, sz;
- u32 tmp_reg;
/* We load the value from the address indicated in @offset and then
* shift out the data we don't need. Note: this is big endian!
*/
- sz = size < 4 ? 4 : size;
+ sz = max(size, 4);
shift = size < 4 ? 4 - size : 0;
- if (src_valid) {
- /* Calculate the true offset (src_reg + imm) */
- tmp_reg = ur_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
- emit_alu(nfp_prog, imm_both(nfp_prog),
- reg_a(src), ALU_OP_ADD, tmp_reg);
- /* Check packet length (size guaranteed to fit b/c it's u8) */
- emit_alu(nfp_prog, imm_a(nfp_prog),
- imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size));
- emit_alu(nfp_prog, reg_none(),
- NFP_BPF_ABI_LEN, ALU_OP_SUB, imm_a(nfp_prog));
- wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
- /* Load data */
- emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0,
- pkt_reg(nfp_prog), imm_b(nfp_prog), sz - 1, true);
- } else {
- /* Check packet length */
- tmp_reg = ur_load_imm_any(nfp_prog, offset + size,
- imm_a(nfp_prog));
- emit_alu(nfp_prog, reg_none(),
- NFP_BPF_ABI_LEN, ALU_OP_SUB, tmp_reg);
- wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
- /* Load data */
- tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
- emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0,
- pkt_reg(nfp_prog), tmp_reg, sz - 1, true);
- }
+ emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0,
+ pptr_reg(nfp_prog), offset, sz - 1, true);
i = 0;
if (shift)
- emit_shf(nfp_prog, reg_both(0), reg_none(), SHF_OP_NONE,
+ emit_shf(nfp_prog, reg_both(dst_gpr), reg_none(), SHF_OP_NONE,
reg_xfer(0), SHF_SC_R_SHF, shift * 8);
else
for (; i * 4 < size; i++)
- emit_alu(nfp_prog, reg_both(i),
- reg_none(), ALU_OP_NONE, reg_xfer(i));
+ wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i));
+
+ if (i < 2)
+ wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0);
+
+ return 0;
+}
+
+static int
+data_ld_host_order(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
+ u8 dst_gpr, int size)
+{
+ unsigned int i;
+ u8 mask, sz;
+
+ /* We load the value from the address indicated in @offset and then
+ * mask out the data we don't need. Note: this is little endian!
+ */
+ sz = max(size, 4);
+ mask = size < 4 ? GENMASK(size - 1, 0) : 0;
+
+ emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0,
+ reg_a(src_gpr), offset, sz / 4 - 1, true);
+
+ i = 0;
+ if (mask)
+ emit_ld_field_any(nfp_prog, reg_both(dst_gpr), mask,
+ reg_xfer(0), SHF_SC_NONE, 0, true);
+ else
+ for (; i * 4 < size; i++)
+ wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i));
if (i < 2)
- wrp_immed(nfp_prog, reg_both(1), 0);
+ wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0);
return 0;
}
+static int
+construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size)
+{
+ swreg tmp_reg;
+
+ /* Calculate the true offset (src_reg + imm) */
+ tmp_reg = ur_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
+ emit_alu(nfp_prog, imm_both(nfp_prog), reg_a(src), ALU_OP_ADD, tmp_reg);
+
+ /* Check packet length (size guaranteed to fit b/c it's u8) */
+ emit_alu(nfp_prog, imm_a(nfp_prog),
+ imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size));
+ emit_alu(nfp_prog, reg_none(),
+ plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog));
+ wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
+
+ /* Load data */
+ return data_ld(nfp_prog, imm_b(nfp_prog), 0, size);
+}
+
static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size)
{
- return construct_data_ind_ld(nfp_prog, offset, 0, false, size);
+ swreg tmp_reg;
+
+ /* Check packet length */
+ tmp_reg = ur_load_imm_any(nfp_prog, offset + size, imm_a(nfp_prog));
+ emit_alu(nfp_prog, reg_none(), plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg);
+ wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
+
+ /* Load data */
+ tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
+ return data_ld(nfp_prog, tmp_reg, 0, size);
+}
+
+static int
+data_stx_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset,
+ u8 src_gpr, u8 size)
+{
+ unsigned int i;
+
+ for (i = 0; i * 4 < size; i++)
+ wrp_mov(nfp_prog, reg_xfer(i), reg_a(src_gpr + i));
+
+ emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
+ reg_a(dst_gpr), offset, size - 1, true);
+
+ return 0;
}
-static int wrp_set_mark(struct nfp_prog *nfp_prog, u8 src)
+static int
+data_st_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset,
+ u64 imm, u8 size)
{
- emit_alu(nfp_prog, NFP_BPF_ABI_MARK,
- reg_none(), ALU_OP_NONE, reg_b(src));
- emit_alu(nfp_prog, NFP_BPF_ABI_FLAGS,
- NFP_BPF_ABI_FLAGS, ALU_OP_OR, reg_imm(NFP_BPF_ABI_FLAG_MARK));
+ wrp_immed(nfp_prog, reg_xfer(0), imm);
+ if (size == 8)
+ wrp_immed(nfp_prog, reg_xfer(1), imm >> 32);
+
+ emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
+ reg_a(dst_gpr), offset, size - 1, true);
return 0;
}
@@ -697,7 +639,7 @@ static int wrp_set_mark(struct nfp_prog *nfp_prog, u8 src)
static void
wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm)
{
- u32 tmp_reg;
+ swreg tmp_reg;
if (alu_op == ALU_OP_AND) {
if (!imm)
@@ -815,7 +757,7 @@ wrp_cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
const struct bpf_insn *insn = &meta->insn;
u64 imm = insn->imm; /* sign extend */
u8 reg = insn->dst_reg * 2;
- u32 tmp_reg;
+ swreg tmp_reg;
if (insn->off < 0) /* TODO */
return -EOPNOTSUPP;
@@ -844,7 +786,10 @@ wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
enum br_mask br_mask, bool swap)
{
const struct bpf_insn *insn = &meta->insn;
- u8 areg = insn->src_reg * 2, breg = insn->dst_reg * 2;
+ u8 areg, breg;
+
+ areg = insn->dst_reg * 2;
+ breg = insn->src_reg * 2;
if (insn->off < 0) /* TODO */
return -EOPNOTSUPP;
@@ -863,6 +808,14 @@ wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
return 0;
}
+static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out)
+{
+ emit_ld_field(nfp_prog, reg_both(gpr_out), 0xf, reg_in,
+ SHF_SC_R_ROT, 8);
+ emit_ld_field(nfp_prog, reg_both(gpr_out), 0x5, reg_a(gpr_out),
+ SHF_SC_R_ROT, 16);
+}
+
/* --- Callbacks --- */
static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
@@ -967,12 +920,24 @@ static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
-
- if (insn->imm != 32)
- return 1; /* TODO */
-
- wrp_reg_mov(nfp_prog, insn->dst_reg * 2 + 1, insn->dst_reg * 2);
- wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), 0);
+ u8 dst = insn->dst_reg * 2;
+
+ if (insn->imm < 32) {
+ emit_shf(nfp_prog, reg_both(dst + 1),
+ reg_a(dst + 1), SHF_OP_NONE, reg_b(dst),
+ SHF_SC_R_DSHF, 32 - insn->imm);
+ emit_shf(nfp_prog, reg_both(dst),
+ reg_none(), SHF_OP_NONE, reg_b(dst),
+ SHF_SC_L_SHF, insn->imm);
+ } else if (insn->imm == 32) {
+ wrp_reg_mov(nfp_prog, dst + 1, dst);
+ wrp_immed(nfp_prog, reg_both(dst), 0);
+ } else if (insn->imm > 32) {
+ emit_shf(nfp_prog, reg_both(dst + 1),
+ reg_none(), SHF_OP_NONE, reg_b(dst),
+ SHF_SC_L_SHF, insn->imm - 32);
+ wrp_immed(nfp_prog, reg_both(dst), 0);
+ }
return 0;
}
@@ -980,12 +945,24 @@ static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
-
- if (insn->imm != 32)
- return 1; /* TODO */
-
- wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->dst_reg * 2 + 1);
- wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
+ u8 dst = insn->dst_reg * 2;
+
+ if (insn->imm < 32) {
+ emit_shf(nfp_prog, reg_both(dst),
+ reg_a(dst + 1), SHF_OP_NONE, reg_b(dst),
+ SHF_SC_R_DSHF, insn->imm);
+ emit_shf(nfp_prog, reg_both(dst + 1),
+ reg_none(), SHF_OP_NONE, reg_b(dst + 1),
+ SHF_SC_R_SHF, insn->imm);
+ } else if (insn->imm == 32) {
+ wrp_reg_mov(nfp_prog, dst, dst + 1);
+ wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ } else if (insn->imm > 32) {
+ emit_shf(nfp_prog, reg_both(dst),
+ reg_none(), SHF_OP_NONE, reg_b(dst + 1),
+ SHF_SC_R_SHF, insn->imm - 32);
+ wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ }
return 0;
}
@@ -1075,6 +1052,35 @@ static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return 0;
}
+static int end_reg32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u8 gpr = insn->dst_reg * 2;
+
+ switch (insn->imm) {
+ case 16:
+ emit_ld_field(nfp_prog, reg_both(gpr), 0x9, reg_b(gpr),
+ SHF_SC_R_ROT, 8);
+ emit_ld_field(nfp_prog, reg_both(gpr), 0xe, reg_a(gpr),
+ SHF_SC_R_SHF, 16);
+
+ wrp_immed(nfp_prog, reg_both(gpr + 1), 0);
+ break;
+ case 32:
+ wrp_end32(nfp_prog, reg_a(gpr), gpr);
+ wrp_immed(nfp_prog, reg_both(gpr + 1), 0);
+ break;
+ case 64:
+ wrp_mov(nfp_prog, imm_a(nfp_prog), reg_b(gpr + 1));
+
+ wrp_end32(nfp_prog, reg_a(gpr), gpr + 1);
+ wrp_end32(nfp_prog, imm_a(nfp_prog), gpr);
+ break;
+ }
+
+ return 0;
+}
+
static int imm_ld8_part2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
wrp_immed(nfp_prog, reg_both(nfp_meta_prev(meta)->insn.dst_reg * 2 + 1),
@@ -1111,82 +1117,209 @@ static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return construct_data_ind_ld(nfp_prog, meta->insn.imm,
- meta->insn.src_reg * 2, true, 1);
+ meta->insn.src_reg * 2, 1);
}
static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return construct_data_ind_ld(nfp_prog, meta->insn.imm,
- meta->insn.src_reg * 2, true, 2);
+ meta->insn.src_reg * 2, 2);
}
static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return construct_data_ind_ld(nfp_prog, meta->insn.imm,
- meta->insn.src_reg * 2, true, 4);
+ meta->insn.src_reg * 2, 4);
}
-static int mem_ldx4_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ u8 size)
{
- if (meta->insn.off == offsetof(struct sk_buff, len))
- emit_alu(nfp_prog, reg_both(meta->insn.dst_reg * 2),
- reg_none(), ALU_OP_NONE, NFP_BPF_ABI_LEN);
- else
+ swreg dst = reg_both(meta->insn.dst_reg * 2);
+
+ switch (meta->insn.off) {
+ case offsetof(struct sk_buff, len):
+ if (size != FIELD_SIZEOF(struct sk_buff, len))
+ return -EOPNOTSUPP;
+ wrp_mov(nfp_prog, dst, plen_reg(nfp_prog));
+ break;
+ case offsetof(struct sk_buff, data):
+ if (size != sizeof(void *))
+ return -EOPNOTSUPP;
+ wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog));
+ break;
+ case offsetof(struct sk_buff, cb) +
+ offsetof(struct bpf_skb_data_end, data_end):
+ if (size != sizeof(void *))
+ return -EOPNOTSUPP;
+ emit_alu(nfp_prog, dst,
+ plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog));
+ break;
+ default:
return -EOPNOTSUPP;
+ }
+
+ wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
return 0;
}
-static int mem_ldx4_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+static int mem_ldx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ u8 size)
{
- u32 dst = reg_both(meta->insn.dst_reg * 2);
+ swreg dst = reg_both(meta->insn.dst_reg * 2);
- if (meta->insn.off != offsetof(struct xdp_md, data) &&
- meta->insn.off != offsetof(struct xdp_md, data_end))
+ if (size != sizeof(void *))
+ return -EINVAL;
+
+ switch (meta->insn.off) {
+ case offsetof(struct xdp_buff, data):
+ wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog));
+ break;
+ case offsetof(struct xdp_buff, data_end):
+ emit_alu(nfp_prog, dst,
+ plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog));
+ break;
+ default:
return -EOPNOTSUPP;
+ }
- emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, NFP_BPF_ABI_PKT);
+ wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
- if (meta->insn.off == offsetof(struct xdp_md, data))
- return 0;
+ return 0;
+}
- emit_alu(nfp_prog, dst, dst, ALU_OP_ADD, NFP_BPF_ABI_LEN);
+static int
+mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ unsigned int size)
+{
+ swreg tmp_reg;
- return 0;
+ tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
+
+ return data_ld_host_order(nfp_prog, meta->insn.src_reg * 2, tmp_reg,
+ meta->insn.dst_reg * 2, size);
+}
+
+static int
+mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ unsigned int size)
+{
+ if (meta->ptr.type == PTR_TO_CTX) {
+ if (nfp_prog->act == NN_ACT_XDP)
+ return mem_ldx_xdp(nfp_prog, meta, size);
+ else
+ return mem_ldx_skb(nfp_prog, meta, size);
+ }
+
+ if (meta->ptr.type == PTR_TO_PACKET)
+ return mem_ldx_data(nfp_prog, meta, size);
+
+ return -EOPNOTSUPP;
+}
+
+static int mem_ldx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return mem_ldx(nfp_prog, meta, 1);
+}
+
+static int mem_ldx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return mem_ldx(nfp_prog, meta, 2);
}
static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- int ret;
+ return mem_ldx(nfp_prog, meta, 4);
+}
- if (nfp_prog->act == NN_ACT_XDP)
- ret = mem_ldx4_xdp(nfp_prog, meta);
- else
- ret = mem_ldx4_skb(nfp_prog, meta);
+static int mem_ldx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return mem_ldx(nfp_prog, meta, 8);
+}
- wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
+static int
+mem_st_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ unsigned int size)
+{
+ u64 imm = meta->insn.imm; /* sign extend */
+ swreg off_reg;
- return ret;
+ off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
+
+ return data_st_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg,
+ imm, size);
}
-static int mem_stx4_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+static int mem_st(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ unsigned int size)
{
- if (meta->insn.off == offsetof(struct sk_buff, mark))
- return wrp_set_mark(nfp_prog, meta->insn.src_reg * 2);
+ if (meta->ptr.type == PTR_TO_PACKET)
+ return mem_st_data(nfp_prog, meta, size);
return -EOPNOTSUPP;
}
-static int mem_stx4_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+static int mem_st1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return mem_st(nfp_prog, meta, 1);
+}
+
+static int mem_st2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
+ return mem_st(nfp_prog, meta, 2);
+}
+
+static int mem_st4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return mem_st(nfp_prog, meta, 4);
+}
+
+static int mem_st8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return mem_st(nfp_prog, meta, 8);
+}
+
+static int
+mem_stx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ unsigned int size)
+{
+ swreg off_reg;
+
+ off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
+
+ return data_stx_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg,
+ meta->insn.src_reg * 2, size);
+}
+
+static int
+mem_stx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ unsigned int size)
+{
+ if (meta->ptr.type == PTR_TO_PACKET)
+ return mem_stx_data(nfp_prog, meta, size);
+
return -EOPNOTSUPP;
}
+static int mem_stx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return mem_stx(nfp_prog, meta, 1);
+}
+
+static int mem_stx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return mem_stx(nfp_prog, meta, 2);
+}
+
static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- if (nfp_prog->act == NN_ACT_XDP)
- return mem_stx4_xdp(nfp_prog, meta);
- return mem_stx4_skb(nfp_prog, meta);
+ return mem_stx(nfp_prog, meta, 4);
+}
+
+static int mem_stx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return mem_stx(nfp_prog, meta, 8);
}
static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -1202,8 +1335,10 @@ static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
u64 imm = insn->imm; /* sign extend */
- u32 or1 = reg_a(insn->dst_reg * 2), or2 = reg_b(insn->dst_reg * 2 + 1);
- u32 tmp_reg;
+ swreg or1, or2, tmp_reg;
+
+ or1 = reg_a(insn->dst_reg * 2);
+ or2 = reg_b(insn->dst_reg * 2 + 1);
if (insn->off < 0) /* TODO */
return -EOPNOTSUPP;
@@ -1230,29 +1365,29 @@ static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int jgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_cmp_imm(nfp_prog, meta, BR_BLO, false);
+ return wrp_cmp_imm(nfp_prog, meta, BR_BLO, true);
}
static int jge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true);
+ return wrp_cmp_imm(nfp_prog, meta, BR_BHS, false);
}
static int jlt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_cmp_imm(nfp_prog, meta, BR_BHS, false);
+ return wrp_cmp_imm(nfp_prog, meta, BR_BLO, false);
}
static int jle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_cmp_imm(nfp_prog, meta, BR_BLO, true);
+ return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true);
}
static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
u64 imm = insn->imm; /* sign extend */
- u32 tmp_reg;
+ swreg tmp_reg;
if (insn->off < 0) /* TODO */
return -EOPNOTSUPP;
@@ -1283,7 +1418,7 @@ static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
u64 imm = insn->imm; /* sign extend */
- u32 tmp_reg;
+ swreg tmp_reg;
if (insn->off < 0) /* TODO */
return -EOPNOTSUPP;
@@ -1292,6 +1427,7 @@ static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2),
ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1));
emit_br(nfp_prog, BR_BNE, insn->off, 0);
+ return 0;
}
tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
@@ -1327,22 +1463,22 @@ static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int jgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_cmp_reg(nfp_prog, meta, BR_BLO, false);
+ return wrp_cmp_reg(nfp_prog, meta, BR_BLO, true);
}
static int jge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true);
+ return wrp_cmp_reg(nfp_prog, meta, BR_BHS, false);
}
static int jlt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_cmp_reg(nfp_prog, meta, BR_BHS, false);
+ return wrp_cmp_reg(nfp_prog, meta, BR_BLO, false);
}
static int jle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_cmp_reg(nfp_prog, meta, BR_BLO, true);
+ return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true);
}
static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -1390,6 +1526,7 @@ static const instr_cb_t instr_cb[256] = {
[BPF_ALU | BPF_SUB | BPF_X] = sub_reg,
[BPF_ALU | BPF_SUB | BPF_K] = sub_imm,
[BPF_ALU | BPF_LSH | BPF_K] = shl_imm,
+ [BPF_ALU | BPF_END | BPF_X] = end_reg32,
[BPF_LD | BPF_IMM | BPF_DW] = imm_ld8,
[BPF_LD | BPF_ABS | BPF_B] = data_ld1,
[BPF_LD | BPF_ABS | BPF_H] = data_ld2,
@@ -1397,8 +1534,18 @@ static const instr_cb_t instr_cb[256] = {
[BPF_LD | BPF_IND | BPF_B] = data_ind_ld1,
[BPF_LD | BPF_IND | BPF_H] = data_ind_ld2,
[BPF_LD | BPF_IND | BPF_W] = data_ind_ld4,
+ [BPF_LDX | BPF_MEM | BPF_B] = mem_ldx1,
+ [BPF_LDX | BPF_MEM | BPF_H] = mem_ldx2,
[BPF_LDX | BPF_MEM | BPF_W] = mem_ldx4,
+ [BPF_LDX | BPF_MEM | BPF_DW] = mem_ldx8,
+ [BPF_STX | BPF_MEM | BPF_B] = mem_stx1,
+ [BPF_STX | BPF_MEM | BPF_H] = mem_stx2,
[BPF_STX | BPF_MEM | BPF_W] = mem_stx4,
+ [BPF_STX | BPF_MEM | BPF_DW] = mem_stx8,
+ [BPF_ST | BPF_MEM | BPF_B] = mem_st1,
+ [BPF_ST | BPF_MEM | BPF_H] = mem_st2,
+ [BPF_ST | BPF_MEM | BPF_W] = mem_st4,
+ [BPF_ST | BPF_MEM | BPF_DW] = mem_st8,
[BPF_JMP | BPF_JA | BPF_K] = jump,
[BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm,
[BPF_JMP | BPF_JGT | BPF_K] = jgt_imm,
@@ -1510,8 +1657,9 @@ static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
static void nfp_intro(struct nfp_prog *nfp_prog)
{
- emit_alu(nfp_prog, pkt_reg(nfp_prog),
- reg_none(), ALU_OP_NONE, NFP_BPF_ABI_PKT);
+ wrp_immed(nfp_prog, plen_reg(nfp_prog), GENMASK(13, 0));
+ emit_alu(nfp_prog, plen_reg(nfp_prog),
+ plen_reg(nfp_prog), ALU_OP_AND, pv_len(nfp_prog));
}
static void nfp_outro_tc_legacy(struct nfp_prog *nfp_prog)
@@ -1534,8 +1682,7 @@ static void nfp_outro_tc_legacy(struct nfp_prog *nfp_prog)
* ife + tx 0x24 -> redir, count as stat1
*/
emit_br_byte_neq(nfp_prog, reg_b(0), 0xff, 0, nfp_prog->tgt_done, 2);
- emit_alu(nfp_prog, reg_a(0),
- reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS);
+ wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16);
emit_br(nfp_prog, BR_UNC, nfp_prog->tgt_done, 1);
@@ -1562,8 +1709,7 @@ static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
- emit_alu(nfp_prog, reg_a(0),
- reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS);
+ wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16);
/* Target for normal exits */
@@ -1572,8 +1718,7 @@ static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
/* if R0 > 7 jump to abort */
emit_alu(nfp_prog, reg_none(), reg_imm(7), ALU_OP_SUB, reg_b(0));
emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0);
- emit_alu(nfp_prog, reg_a(0),
- reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS);
+ wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
wrp_immed(nfp_prog, reg_b(2), 0x41221211);
wrp_immed(nfp_prog, reg_b(3), 0x41001211);
@@ -1610,8 +1755,7 @@ static void nfp_outro_xdp(struct nfp_prog *nfp_prog)
emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
- emit_alu(nfp_prog, reg_a(0),
- reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS);
+ wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16);
/* Target for normal exits */
@@ -1632,8 +1776,7 @@ static void nfp_outro_xdp(struct nfp_prog *nfp_prog)
emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
- emit_alu(nfp_prog, reg_a(0),
- reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS);
+ wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16);
}
@@ -1656,7 +1799,7 @@ static void nfp_outro(struct nfp_prog *nfp_prog)
static int nfp_translate(struct nfp_prog *nfp_prog)
{
struct nfp_insn_meta *meta;
- int err;
+ int i, err;
nfp_intro(nfp_prog);
if (nfp_prog->error)
@@ -1688,6 +1831,11 @@ static int nfp_translate(struct nfp_prog *nfp_prog)
if (nfp_prog->error)
return nfp_prog->error;
+ for (i = 0; i < NFP_USTORE_PREFETCH_WINDOW; i++)
+ emit_nop(nfp_prog);
+ if (nfp_prog->error)
+ return nfp_prog->error;
+
return nfp_fixup_branches(nfp_prog);
}
@@ -1737,38 +1885,6 @@ static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog)
}
}
-/* Try to rename registers so that program uses only low ones */
-static int nfp_bpf_opt_reg_rename(struct nfp_prog *nfp_prog)
-{
- bool reg_used[MAX_BPF_REG] = {};
- u8 tgt_reg[MAX_BPF_REG] = {};
- struct nfp_insn_meta *meta;
- unsigned int i, j;
-
- list_for_each_entry(meta, &nfp_prog->insns, l) {
- if (meta->skip)
- continue;
-
- reg_used[meta->insn.src_reg] = true;
- reg_used[meta->insn.dst_reg] = true;
- }
-
- for (i = 0, j = 0; i < ARRAY_SIZE(tgt_reg); i++) {
- if (!reg_used[i])
- continue;
-
- tgt_reg[i] = j++;
- }
- nfp_prog->num_regs = j;
-
- list_for_each_entry(meta, &nfp_prog->insns, l) {
- meta->insn.src_reg = tgt_reg[meta->insn.src_reg];
- meta->insn.dst_reg = tgt_reg[meta->insn.dst_reg];
- }
-
- return 0;
-}
-
/* Remove masking after load since our load guarantees this is not needed */
static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog)
{
@@ -1845,20 +1961,33 @@ static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog)
static int nfp_bpf_optimize(struct nfp_prog *nfp_prog)
{
- int ret;
-
nfp_bpf_opt_reg_init(nfp_prog);
- ret = nfp_bpf_opt_reg_rename(nfp_prog);
- if (ret)
- return ret;
-
nfp_bpf_opt_ld_mask(nfp_prog);
nfp_bpf_opt_ld_shift(nfp_prog);
return 0;
}
+static int nfp_bpf_ustore_calc(struct nfp_prog *nfp_prog, __le64 *ustore)
+{
+ int i;
+
+ for (i = 0; i < nfp_prog->prog_len; i++) {
+ int err;
+
+ err = nfp_ustore_check_valid_no_ecc(nfp_prog->prog[i]);
+ if (err)
+ return err;
+
+ nfp_prog->prog[i] = nfp_ustore_calc_ecc_insn(nfp_prog->prog[i]);
+
+ ustore[i] = cpu_to_le64(nfp_prog->prog[i]);
+ }
+
+ return 0;
+}
+
/**
* nfp_bpf_jit() - translate BPF code into NFP assembly
* @filter: kernel BPF filter struct
@@ -1899,10 +2028,8 @@ nfp_bpf_jit(struct bpf_prog *filter, void *prog_mem,
if (ret)
goto out;
- if (nfp_prog->num_regs <= 7)
- nfp_prog->regs_per_thread = 16;
- else
- nfp_prog->regs_per_thread = 32;
+ nfp_prog->num_regs = MAX_BPF_REG;
+ nfp_prog->regs_per_thread = 32;
nfp_prog->prog = prog_mem;
nfp_prog->__prog_alloc_len = prog_sz;
@@ -1912,10 +2039,13 @@ nfp_bpf_jit(struct bpf_prog *filter, void *prog_mem,
pr_err("Translation failed with error %d (translated: %u)\n",
ret, nfp_prog->n_translated);
ret = -EINVAL;
+ goto out;
}
+ ret = nfp_bpf_ustore_calc(nfp_prog, (__force __le64 *)prog_mem);
+
res->n_instr = nfp_prog->prog_len;
- res->dense_mode = nfp_prog->num_regs <= 7;
+ res->dense_mode = false;
out:
nfp_prog_free(nfp_prog);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index be2cf10a2cd7..6e74f8db1cc1 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -42,9 +42,11 @@
static bool nfp_net_ebpf_capable(struct nfp_net *nn)
{
+#ifdef __LITTLE_ENDIAN
if (nn->cap & NFP_NET_CFG_CTRL_BPF &&
nn_readb(nn, NFP_NET_CFG_BPF_ABI) == NFP_NET_BPF_ABI)
return true;
+#endif
return false;
}
@@ -89,14 +91,6 @@ nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
struct nfp_net_bpf_priv *priv;
int ret;
- /* Limit to single port, otherwise it's just a NIC */
- if (id > 0) {
- nfp_warn(app->cpp,
- "BPF NIC doesn't support more than one port right now\n");
- nn->port = nfp_port_alloc(app, NFP_PORT_INVALID, nn->dp.netdev);
- return PTR_ERR_OR_ZERO(nn->port);
- }
-
priv = kmalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index 4051e943f363..d77e88a45409 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -36,9 +36,11 @@
#include <linux/bitfield.h>
#include <linux/bpf.h>
+#include <linux/bpf_verifier.h>
#include <linux/list.h>
#include <linux/types.h>
+#include "../nfp_asm.h"
#include "../nfp_net.h"
/* For branch fixup logic use up-most byte of branch instruction as scratch
@@ -53,9 +55,13 @@ enum br_special {
};
enum static_regs {
- STATIC_REG_PKT = 1,
-#define REG_PKT_BANK ALU_DST_A
- STATIC_REG_IMM = 2, /* Bank AB */
+ STATIC_REG_IMM = 21, /* Bank AB */
+ STATIC_REG_PKT_LEN = 22, /* Bank B */
+};
+
+enum pkt_vec {
+ PKT_VEC_PKT_LEN = 0,
+ PKT_VEC_PKT_PTR = 2,
};
enum nfp_bpf_action_type {
@@ -65,39 +71,17 @@ enum nfp_bpf_action_type {
NN_ACT_XDP,
};
-/* Software register representation, hardware encoding in asm.h */
-#define NN_REG_TYPE GENMASK(31, 24)
-#define NN_REG_VAL GENMASK(7, 0)
-
-enum nfp_bpf_reg_type {
- NN_REG_GPR_A = BIT(0),
- NN_REG_GPR_B = BIT(1),
- NN_REG_NNR = BIT(2),
- NN_REG_XFER = BIT(3),
- NN_REG_IMM = BIT(4),
- NN_REG_NONE = BIT(5),
-};
-
-#define NN_REG_GPR_BOTH (NN_REG_GPR_A | NN_REG_GPR_B)
-
-#define reg_both(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_GPR_BOTH))
-#define reg_a(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_GPR_A))
-#define reg_b(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_GPR_B))
-#define reg_nnr(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_NNR))
-#define reg_xfer(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_XFER))
-#define reg_imm(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_IMM))
-#define reg_none() (FIELD_PREP(NN_REG_TYPE, NN_REG_NONE))
+#define pv_len(np) reg_lm(1, PKT_VEC_PKT_LEN)
+#define pv_ctm_ptr(np) reg_lm(1, PKT_VEC_PKT_PTR)
-#define pkt_reg(np) reg_a((np)->regs_per_thread - STATIC_REG_PKT)
-#define imm_a(np) reg_a((np)->regs_per_thread - STATIC_REG_IMM)
-#define imm_b(np) reg_b((np)->regs_per_thread - STATIC_REG_IMM)
-#define imm_both(np) reg_both((np)->regs_per_thread - STATIC_REG_IMM)
+#define plen_reg(np) reg_b(STATIC_REG_PKT_LEN)
+#define pptr_reg(np) pv_ctm_ptr(np)
+#define imm_a(np) reg_a(STATIC_REG_IMM)
+#define imm_b(np) reg_b(STATIC_REG_IMM)
+#define imm_both(np) reg_both(STATIC_REG_IMM)
-#define NFP_BPF_ABI_FLAGS reg_nnr(0)
+#define NFP_BPF_ABI_FLAGS reg_imm(0)
#define NFP_BPF_ABI_FLAG_MARK 1
-#define NFP_BPF_ABI_MARK reg_nnr(1)
-#define NFP_BPF_ABI_PKT reg_nnr(2)
-#define NFP_BPF_ABI_LEN reg_nnr(3)
struct nfp_prog;
struct nfp_insn_meta;
@@ -113,6 +97,7 @@ typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
/**
* struct nfp_insn_meta - BPF instruction wrapper
* @insn: BPF instruction
+ * @ptr: pointer type for memory operations
* @off: index of first generated machine instruction (in nfp_prog.prog)
* @n: eBPF instruction number
* @skip: skip this instruction (optimized out)
@@ -121,6 +106,7 @@ typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
*/
struct nfp_insn_meta {
struct bpf_insn insn;
+ struct bpf_reg_state ptr;
unsigned int off;
unsigned short n;
bool skip;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index 5b783a91b115..e361c0e3b788 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -112,12 +112,19 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
}
static int
-nfp_bpf_check_ctx_ptr(struct nfp_prog *nfp_prog,
- const struct bpf_verifier_env *env, u8 reg)
+nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ const struct bpf_verifier_env *env, u8 reg)
{
- if (env->cur_state.regs[reg].type != PTR_TO_CTX)
+ if (env->cur_state.regs[reg].type != PTR_TO_CTX &&
+ env->cur_state.regs[reg].type != PTR_TO_PACKET)
return -EINVAL;
+ if (meta->ptr.type != NOT_INIT &&
+ meta->ptr.type != env->cur_state.regs[reg].type)
+ return -EINVAL;
+
+ meta->ptr = env->cur_state.regs[reg];
+
return 0;
}
@@ -145,11 +152,11 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
return nfp_bpf_check_exit(priv->prog, env);
if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM))
- return nfp_bpf_check_ctx_ptr(priv->prog, env,
- meta->insn.src_reg);
+ return nfp_bpf_check_ptr(priv->prog, meta, env,
+ meta->insn.src_reg);
if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM))
- return nfp_bpf_check_ctx_ptr(priv->prog, env,
- meta->insn.dst_reg);
+ return nfp_bpf_check_ptr(priv->prog, meta, env,
+ meta->insn.dst_reg);
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index db9750695dc7..1194c47ef827 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -36,7 +36,9 @@
#include <net/switchdev.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_vlan.h>
+#include <net/tc_act/tc_tunnel_key.h>
#include "cmsg.h"
#include "main.h"
@@ -80,14 +82,27 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
}
+static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
+ enum nfp_flower_tun_type tun_type)
+{
+ if (!out_dev->rtnl_link_ops)
+ return false;
+
+ if (!strcmp(out_dev->rtnl_link_ops->kind, "vxlan"))
+ return tun_type == NFP_FL_TUNNEL_VXLAN;
+
+ return false;
+}
+
static int
nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
struct nfp_fl_payload *nfp_flow, bool last,
- struct net_device *in_dev)
+ struct net_device *in_dev, enum nfp_flower_tun_type tun_type,
+ int *tun_out_cnt)
{
size_t act_size = sizeof(struct nfp_fl_output);
+ u16 tmp_output_op, tmp_flags;
struct net_device *out_dev;
- u16 tmp_output_op;
int ifindex;
/* Set action opcode to output action. */
@@ -97,25 +112,355 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
output->a_op = cpu_to_be16(tmp_output_op);
- /* Set action output parameters. */
- output->flags = cpu_to_be16(last ? NFP_FL_OUT_FLAGS_LAST : 0);
-
ifindex = tcf_mirred_ifindex(action);
out_dev = __dev_get_by_index(dev_net(in_dev), ifindex);
if (!out_dev)
return -EOPNOTSUPP;
- /* Only offload egress ports are on the same device as the ingress
- * port.
+ tmp_flags = last ? NFP_FL_OUT_FLAGS_LAST : 0;
+
+ if (tun_type) {
+ /* Verify the egress netdev matches the tunnel type. */
+ if (!nfp_fl_netdev_is_tunnel_type(out_dev, tun_type))
+ return -EOPNOTSUPP;
+
+ if (*tun_out_cnt)
+ return -EOPNOTSUPP;
+ (*tun_out_cnt)++;
+
+ output->flags = cpu_to_be16(tmp_flags |
+ NFP_FL_OUT_FLAGS_USE_TUN);
+ output->port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
+ } else {
+ /* Set action output parameters. */
+ output->flags = cpu_to_be16(tmp_flags);
+
+ /* Only offload if egress ports are on the same device as the
+ * ingress port.
+ */
+ if (!switchdev_port_same_parent_id(in_dev, out_dev))
+ return -EOPNOTSUPP;
+
+ output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
+ if (!output->port)
+ return -EOPNOTSUPP;
+ }
+ nfp_flow->meta.shortcut = output->port;
+
+ return 0;
+}
+
+static bool nfp_fl_supported_tun_port(const struct tc_action *action)
+{
+ struct ip_tunnel_info *tun = tcf_tunnel_info(action);
+
+ return tun->key.tp_dst == htons(NFP_FL_VXLAN_PORT);
+}
+
+static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
+{
+ size_t act_size = sizeof(struct nfp_fl_pre_tunnel);
+ struct nfp_fl_pre_tunnel *pre_tun_act;
+ u16 tmp_pre_tun_op;
+
+ /* Pre_tunnel action must be first on action list.
+ * If other actions already exist they need pushed forward.
*/
- if (!switchdev_port_same_parent_id(in_dev, out_dev))
+ if (act_len)
+ memmove(act_data + act_size, act_data, act_len);
+
+ pre_tun_act = (struct nfp_fl_pre_tunnel *)act_data;
+
+ memset(pre_tun_act, 0, act_size);
+
+ tmp_pre_tun_op =
+ FIELD_PREP(NFP_FL_ACT_LEN_LW, act_size >> NFP_FL_LW_SIZ) |
+ FIELD_PREP(NFP_FL_ACT_JMP_ID, NFP_FL_ACTION_OPCODE_PRE_TUNNEL);
+
+ pre_tun_act->a_op = cpu_to_be16(tmp_pre_tun_op);
+
+ return pre_tun_act;
+}
+
+static int
+nfp_fl_set_vxlan(struct nfp_fl_set_vxlan *set_vxlan,
+ const struct tc_action *action,
+ struct nfp_fl_pre_tunnel *pre_tun)
+{
+ struct ip_tunnel_info *vxlan = tcf_tunnel_info(action);
+ size_t act_size = sizeof(struct nfp_fl_set_vxlan);
+ u32 tmp_set_vxlan_type_index = 0;
+ u16 tmp_set_vxlan_op;
+ /* Currently support one pre-tunnel so index is always 0. */
+ int pretun_idx = 0;
+
+ if (vxlan->options_len) {
+ /* Do not support options e.g. vxlan gpe. */
+ return -EOPNOTSUPP;
+ }
+
+ tmp_set_vxlan_op =
+ FIELD_PREP(NFP_FL_ACT_LEN_LW, act_size >> NFP_FL_LW_SIZ) |
+ FIELD_PREP(NFP_FL_ACT_JMP_ID,
+ NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL);
+
+ set_vxlan->a_op = cpu_to_be16(tmp_set_vxlan_op);
+
+ /* Set tunnel type and pre-tunnel index. */
+ tmp_set_vxlan_type_index |=
+ FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, NFP_FL_TUNNEL_VXLAN) |
+ FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx);
+
+ set_vxlan->tun_type_index = cpu_to_be32(tmp_set_vxlan_type_index);
+
+ set_vxlan->tun_id = vxlan->key.tun_id;
+ set_vxlan->tun_flags = vxlan->key.tun_flags;
+ set_vxlan->ipv4_ttl = vxlan->key.ttl;
+ set_vxlan->ipv4_tos = vxlan->key.tos;
+
+ /* Complete pre_tunnel action. */
+ pre_tun->ipv4_dst = vxlan->key.u.ipv4.dst;
+
+ return 0;
+}
+
+static void nfp_fl_set_helper32(u32 value, u32 mask, u8 *p_exact, u8 *p_mask)
+{
+ u32 oldvalue = get_unaligned((u32 *)p_exact);
+ u32 oldmask = get_unaligned((u32 *)p_mask);
+
+ value &= mask;
+ value |= oldvalue & ~mask;
+
+ put_unaligned(oldmask | mask, (u32 *)p_mask);
+ put_unaligned(value, (u32 *)p_exact);
+}
+
+static int
+nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off,
+ struct nfp_fl_set_eth *set_eth)
+{
+ u16 tmp_set_eth_op;
+ u32 exact, mask;
+
+ if (off + 4 > ETH_ALEN * 2)
return -EOPNOTSUPP;
- output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
- if (!output->port)
+ mask = ~tcf_pedit_mask(action, idx);
+ exact = tcf_pedit_val(action, idx);
+
+ if (exact & ~mask)
return -EOPNOTSUPP;
- nfp_flow->meta.shortcut = output->port;
+ nfp_fl_set_helper32(exact, mask, &set_eth->eth_addr_val[off],
+ &set_eth->eth_addr_mask[off]);
+
+ set_eth->reserved = cpu_to_be16(0);
+ tmp_set_eth_op = FIELD_PREP(NFP_FL_ACT_LEN_LW,
+ sizeof(*set_eth) >> NFP_FL_LW_SIZ) |
+ FIELD_PREP(NFP_FL_ACT_JMP_ID,
+ NFP_FL_ACTION_OPCODE_SET_ETHERNET);
+ set_eth->a_op = cpu_to_be16(tmp_set_eth_op);
+
+ return 0;
+}
+
+static int
+nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
+ struct nfp_fl_set_ip4_addrs *set_ip_addr)
+{
+ u16 tmp_set_ipv4_op;
+ __be32 exact, mask;
+
+ /* We are expecting tcf_pedit to return a big endian value */
+ mask = (__force __be32)~tcf_pedit_mask(action, idx);
+ exact = (__force __be32)tcf_pedit_val(action, idx);
+
+ if (exact & ~mask)
+ return -EOPNOTSUPP;
+
+ switch (off) {
+ case offsetof(struct iphdr, daddr):
+ set_ip_addr->ipv4_dst_mask = mask;
+ set_ip_addr->ipv4_dst = exact;
+ break;
+ case offsetof(struct iphdr, saddr):
+ set_ip_addr->ipv4_src_mask = mask;
+ set_ip_addr->ipv4_src = exact;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ set_ip_addr->reserved = cpu_to_be16(0);
+ tmp_set_ipv4_op = FIELD_PREP(NFP_FL_ACT_LEN_LW,
+ sizeof(*set_ip_addr) >> NFP_FL_LW_SIZ) |
+ FIELD_PREP(NFP_FL_ACT_JMP_ID,
+ NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS);
+ set_ip_addr->a_op = cpu_to_be16(tmp_set_ipv4_op);
+
+ return 0;
+}
+
+static void
+nfp_fl_set_ip6_helper(int opcode_tag, int idx, __be32 exact, __be32 mask,
+ struct nfp_fl_set_ipv6_addr *ip6)
+{
+ u16 tmp_set_op;
+
+ ip6->ipv6[idx % 4].mask = mask;
+ ip6->ipv6[idx % 4].exact = exact;
+
+ ip6->reserved = cpu_to_be16(0);
+ tmp_set_op = FIELD_PREP(NFP_FL_ACT_LEN_LW, sizeof(*ip6) >>
+ NFP_FL_LW_SIZ) |
+ FIELD_PREP(NFP_FL_ACT_JMP_ID, opcode_tag);
+ ip6->a_op = cpu_to_be16(tmp_set_op);
+}
+
+static int
+nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
+ struct nfp_fl_set_ipv6_addr *ip_dst,
+ struct nfp_fl_set_ipv6_addr *ip_src)
+{
+ __be32 exact, mask;
+
+ /* We are expecting tcf_pedit to return a big endian value */
+ mask = (__force __be32)~tcf_pedit_mask(action, idx);
+ exact = (__force __be32)tcf_pedit_val(action, idx);
+
+ if (exact & ~mask)
+ return -EOPNOTSUPP;
+
+ if (off < offsetof(struct ipv6hdr, saddr))
+ return -EOPNOTSUPP;
+ else if (off < offsetof(struct ipv6hdr, daddr))
+ nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, idx,
+ exact, mask, ip_src);
+ else if (off < offsetof(struct ipv6hdr, daddr) +
+ sizeof(struct in6_addr))
+ nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, idx,
+ exact, mask, ip_dst);
+ else
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int
+nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off,
+ struct nfp_fl_set_tport *set_tport, int opcode)
+{
+ u32 exact, mask;
+ u16 tmp_set_op;
+
+ if (off)
+ return -EOPNOTSUPP;
+
+ mask = ~tcf_pedit_mask(action, idx);
+ exact = tcf_pedit_val(action, idx);
+
+ if (exact & ~mask)
+ return -EOPNOTSUPP;
+
+ nfp_fl_set_helper32(exact, mask, set_tport->tp_port_val,
+ set_tport->tp_port_mask);
+
+ set_tport->reserved = cpu_to_be16(0);
+ tmp_set_op = FIELD_PREP(NFP_FL_ACT_LEN_LW,
+ sizeof(*set_tport) >> NFP_FL_LW_SIZ);
+ tmp_set_op |= FIELD_PREP(NFP_FL_ACT_JMP_ID, opcode);
+ set_tport->a_op = cpu_to_be16(tmp_set_op);
+
+ return 0;
+}
+
+static int
+nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len)
+{
+ struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
+ struct nfp_fl_set_ip4_addrs set_ip_addr;
+ struct nfp_fl_set_tport set_tport;
+ struct nfp_fl_set_eth set_eth;
+ enum pedit_header_type htype;
+ int idx, nkeys, err;
+ size_t act_size;
+ u32 offset, cmd;
+
+ memset(&set_ip6_dst, 0, sizeof(set_ip6_dst));
+ memset(&set_ip6_src, 0, sizeof(set_ip6_src));
+ memset(&set_ip_addr, 0, sizeof(set_ip_addr));
+ memset(&set_tport, 0, sizeof(set_tport));
+ memset(&set_eth, 0, sizeof(set_eth));
+ nkeys = tcf_pedit_nkeys(action);
+
+ for (idx = 0; idx < nkeys; idx++) {
+ cmd = tcf_pedit_cmd(action, idx);
+ htype = tcf_pedit_htype(action, idx);
+ offset = tcf_pedit_offset(action, idx);
+
+ if (cmd != TCA_PEDIT_KEY_EX_CMD_SET)
+ return -EOPNOTSUPP;
+
+ switch (htype) {
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
+ err = nfp_fl_set_eth(action, idx, offset, &set_eth);
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
+ err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr);
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
+ err = nfp_fl_set_ip6(action, idx, offset, &set_ip6_dst,
+ &set_ip6_src);
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
+ err = nfp_fl_set_tport(action, idx, offset, &set_tport,
+ NFP_FL_ACTION_OPCODE_SET_TCP);
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
+ err = nfp_fl_set_tport(action, idx, offset, &set_tport,
+ NFP_FL_ACTION_OPCODE_SET_UDP);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ if (err)
+ return err;
+ }
+
+ if (set_eth.a_op) {
+ act_size = sizeof(set_eth);
+ memcpy(nfp_action, &set_eth, act_size);
+ *a_len += act_size;
+ } else if (set_ip_addr.a_op) {
+ act_size = sizeof(set_ip_addr);
+ memcpy(nfp_action, &set_ip_addr, act_size);
+ *a_len += act_size;
+ } else if (set_ip6_dst.a_op && set_ip6_src.a_op) {
+ /* TC compiles set src and dst IPv6 address as a single action,
+ * the hardware requires this to be 2 separate actions.
+ */
+ act_size = sizeof(set_ip6_src);
+ memcpy(nfp_action, &set_ip6_src, act_size);
+ *a_len += act_size;
+
+ act_size = sizeof(set_ip6_dst);
+ memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst,
+ act_size);
+ *a_len += act_size;
+ } else if (set_ip6_dst.a_op) {
+ act_size = sizeof(set_ip6_dst);
+ memcpy(nfp_action, &set_ip6_dst, act_size);
+ *a_len += act_size;
+ } else if (set_ip6_src.a_op) {
+ act_size = sizeof(set_ip6_src);
+ memcpy(nfp_action, &set_ip6_src, act_size);
+ *a_len += act_size;
+ } else if (set_tport.a_op) {
+ act_size = sizeof(set_tport);
+ memcpy(nfp_action, &set_tport, act_size);
+ *a_len += act_size;
+ }
return 0;
}
@@ -123,8 +468,11 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
static int
nfp_flower_loop_action(const struct tc_action *a,
struct nfp_fl_payload *nfp_fl, int *a_len,
- struct net_device *netdev)
+ struct net_device *netdev,
+ enum nfp_flower_tun_type *tun_type, int *tun_out_cnt)
{
+ struct nfp_fl_pre_tunnel *pre_tun;
+ struct nfp_fl_set_vxlan *s_vxl;
struct nfp_fl_push_vlan *psh_v;
struct nfp_fl_pop_vlan *pop_v;
struct nfp_fl_output *output;
@@ -137,7 +485,8 @@ nfp_flower_loop_action(const struct tc_action *a,
return -EOPNOTSUPP;
output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
- err = nfp_fl_output(output, a, nfp_fl, true, netdev);
+ err = nfp_fl_output(output, a, nfp_fl, true, netdev, *tun_type,
+ tun_out_cnt);
if (err)
return err;
@@ -147,7 +496,8 @@ nfp_flower_loop_action(const struct tc_action *a,
return -EOPNOTSUPP;
output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
- err = nfp_fl_output(output, a, nfp_fl, false, netdev);
+ err = nfp_fl_output(output, a, nfp_fl, false, netdev, *tun_type,
+ tun_out_cnt);
if (err)
return err;
@@ -170,6 +520,32 @@ nfp_flower_loop_action(const struct tc_action *a,
nfp_fl_push_vlan(psh_v, a);
*a_len += sizeof(struct nfp_fl_push_vlan);
+ } else if (is_tcf_tunnel_set(a) && nfp_fl_supported_tun_port(a)) {
+ /* Pre-tunnel action is required for tunnel encap.
+ * This checks for next hop entries on NFP.
+ * If none, the packet falls back before applying other actions.
+ */
+ if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
+ sizeof(struct nfp_fl_set_vxlan) > NFP_FL_MAX_A_SIZ)
+ return -EOPNOTSUPP;
+
+ *tun_type = NFP_FL_TUNNEL_VXLAN;
+ pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len);
+ nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
+ *a_len += sizeof(struct nfp_fl_pre_tunnel);
+
+ s_vxl = (struct nfp_fl_set_vxlan *)&nfp_fl->action_data[*a_len];
+ err = nfp_fl_set_vxlan(s_vxl, a, pre_tun);
+ if (err)
+ return err;
+
+ *a_len += sizeof(struct nfp_fl_set_vxlan);
+ } else if (is_tcf_tunnel_release(a)) {
+ /* Tunnel decap is handled by default so accept action. */
+ return 0;
+ } else if (is_tcf_pedit(a)) {
+ if (nfp_fl_pedit(a, &nfp_fl->action_data[*a_len], a_len))
+ return -EOPNOTSUPP;
} else {
/* Currently we do not handle any other actions. */
return -EOPNOTSUPP;
@@ -182,18 +558,22 @@ int nfp_flower_compile_action(struct tc_cls_flower_offload *flow,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow)
{
- int act_len, act_cnt, err;
+ int act_len, act_cnt, err, tun_out_cnt;
+ enum nfp_flower_tun_type tun_type;
const struct tc_action *a;
LIST_HEAD(actions);
memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
nfp_flow->meta.act_len = 0;
+ tun_type = NFP_FL_TUNNEL_NONE;
act_len = 0;
act_cnt = 0;
+ tun_out_cnt = 0;
tcf_exts_to_list(flow->exts, &actions);
list_for_each_entry(a, &actions, list) {
- err = nfp_flower_loop_action(a, nfp_flow, &act_len, netdev);
+ err = nfp_flower_loop_action(a, nfp_flow, &act_len, netdev,
+ &tun_type, &tun_out_cnt);
if (err)
return err;
act_cnt++;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index c3ca05d10fe1..6b71c719deba 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -38,17 +38,10 @@
#include <net/dst_metadata.h>
#include "main.h"
-#include "../nfpcore/nfp_cpp.h"
#include "../nfp_net.h"
#include "../nfp_net_repr.h"
#include "./cmsg.h"
-#define nfp_flower_cmsg_warn(app, fmt, args...) \
- do { \
- if (net_ratelimit()) \
- nfp_warn((app)->cpp, fmt, ## args); \
- } while (0)
-
static struct nfp_flower_cmsg_hdr *
nfp_flower_cmsg_get_hdr(struct sk_buff *skb)
{
@@ -188,6 +181,15 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
case NFP_FLOWER_CMSG_TYPE_FLOW_STATS:
nfp_flower_rx_flow_stats(app, skb);
break;
+ case NFP_FLOWER_CMSG_TYPE_NO_NEIGH:
+ nfp_tunnel_request_route(app, skb);
+ break;
+ case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS:
+ nfp_tunnel_keep_alive(app, skb);
+ break;
+ case NFP_FLOWER_CMSG_TYPE_TUN_NEIGH:
+ /* Acks from the NFP that the route is added - ignore. */
+ break;
default:
nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n",
type);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index a2ec60344236..f7b7242a22bc 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -39,6 +39,7 @@
#include <linux/types.h>
#include "../nfp_app.h"
+#include "../nfpcore/nfp_cpp.h"
#define NFP_FLOWER_LAYER_META BIT(0)
#define NFP_FLOWER_LAYER_PORT BIT(1)
@@ -56,6 +57,11 @@
#define NFP_FLOWER_MASK_VLAN_CFI BIT(12)
#define NFP_FLOWER_MASK_VLAN_VID GENMASK(11, 0)
+#define NFP_FLOWER_MASK_MPLS_LB GENMASK(31, 12)
+#define NFP_FLOWER_MASK_MPLS_TC GENMASK(11, 9)
+#define NFP_FLOWER_MASK_MPLS_BOS BIT(8)
+#define NFP_FLOWER_MASK_MPLS_Q BIT(0)
+
#define NFP_FL_SC_ACT_DROP 0x80000000
#define NFP_FL_SC_ACT_USER 0x7D000000
#define NFP_FL_SC_ACT_POPV 0x6A000000
@@ -67,10 +73,18 @@
#define NFP_FL_LW_SIZ 2
/* Action opcodes */
-#define NFP_FL_ACTION_OPCODE_OUTPUT 0
-#define NFP_FL_ACTION_OPCODE_PUSH_VLAN 1
-#define NFP_FL_ACTION_OPCODE_POP_VLAN 2
-#define NFP_FL_ACTION_OPCODE_NUM 32
+#define NFP_FL_ACTION_OPCODE_OUTPUT 0
+#define NFP_FL_ACTION_OPCODE_PUSH_VLAN 1
+#define NFP_FL_ACTION_OPCODE_POP_VLAN 2
+#define NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL 6
+#define NFP_FL_ACTION_OPCODE_SET_ETHERNET 7
+#define NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS 9
+#define NFP_FL_ACTION_OPCODE_SET_IPV6_SRC 11
+#define NFP_FL_ACTION_OPCODE_SET_IPV6_DST 12
+#define NFP_FL_ACTION_OPCODE_SET_UDP 14
+#define NFP_FL_ACTION_OPCODE_SET_TCP 15
+#define NFP_FL_ACTION_OPCODE_PRE_TUNNEL 17
+#define NFP_FL_ACTION_OPCODE_NUM 32
#define NFP_FL_ACT_JMP_ID GENMASK(15, 8)
#define NFP_FL_ACT_LEN_LW GENMASK(7, 0)
@@ -83,6 +97,54 @@
#define NFP_FL_PUSH_VLAN_CFI BIT(12)
#define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0)
+/* Tunnel ports */
+#define NFP_FL_PORT_TYPE_TUN 0x50000000
+#define NFP_FL_IPV4_TUNNEL_TYPE GENMASK(7, 4)
+#define NFP_FL_IPV4_PRE_TUN_INDEX GENMASK(2, 0)
+
+#define nfp_flower_cmsg_warn(app, fmt, args...) \
+ do { \
+ if (net_ratelimit()) \
+ nfp_warn((app)->cpp, fmt, ## args); \
+ } while (0)
+
+enum nfp_flower_tun_type {
+ NFP_FL_TUNNEL_NONE = 0,
+ NFP_FL_TUNNEL_VXLAN = 2,
+};
+
+struct nfp_fl_set_eth {
+ __be16 a_op;
+ __be16 reserved;
+ u8 eth_addr_mask[ETH_ALEN * 2];
+ u8 eth_addr_val[ETH_ALEN * 2];
+};
+
+struct nfp_fl_set_ip4_addrs {
+ __be16 a_op;
+ __be16 reserved;
+ __be32 ipv4_src_mask;
+ __be32 ipv4_src;
+ __be32 ipv4_dst_mask;
+ __be32 ipv4_dst;
+};
+
+struct nfp_fl_set_ipv6_addr {
+ __be16 a_op;
+ __be16 reserved;
+ struct {
+ __be32 mask;
+ __be32 exact;
+ } ipv6[4];
+};
+
+struct nfp_fl_set_tport {
+ __be16 a_op;
+ __be16 reserved;
+ u8 tp_port_mask[4];
+ u8 tp_port_val[4];
+};
+
struct nfp_fl_output {
__be16 a_op;
__be16 flags;
@@ -115,6 +177,25 @@ struct nfp_flower_meta_one {
u16 reserved;
};
+struct nfp_fl_pre_tunnel {
+ __be16 a_op;
+ __be16 reserved;
+ __be32 ipv4_dst;
+ /* reserved for use with IPv6 addresses */
+ __be32 extra[3];
+};
+
+struct nfp_fl_set_vxlan {
+ __be16 a_op;
+ __be16 reserved;
+ __be64 tun_id;
+ __be32 tun_type_index;
+ __be16 tun_flags;
+ u8 ipv4_ttl;
+ u8 ipv4_tos;
+ __be32 extra[2];
+} __packed;
+
/* Metadata with L2 (1W/4B)
* ----------------------------------------------------------------
* 3 2 1
@@ -230,6 +311,36 @@ struct nfp_flower_ipv6 {
struct in6_addr ipv6_dst;
};
+/* Flow Frame VXLAN --> Tunnel details (4W/16B)
+ * -----------------------------------------------------------------
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv4_addr_src |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv4_addr_dst |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | tun_flags | tos | ttl |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | gpe_flags | Reserved | Next Protocol |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | VNI | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_vxlan {
+ __be32 ip_src;
+ __be32 ip_dst;
+ __be16 tun_flags;
+ u8 tos;
+ u8 ttl;
+ u8 gpe_flags;
+ u8 reserved[2];
+ u8 nxt_proto;
+ __be32 tun_id;
+};
+
+#define NFP_FL_TUN_VNI_OFFSET 8
+
/* The base header for a control message packet.
* Defines an 8-bit version, and an 8-bit type, padded
* to a 32-bit word. Rest of the packet is type-specific.
@@ -249,6 +360,11 @@ enum nfp_flower_cmsg_type_port {
NFP_FLOWER_CMSG_TYPE_FLOW_DEL = 2,
NFP_FLOWER_CMSG_TYPE_MAC_REPR = 7,
NFP_FLOWER_CMSG_TYPE_PORT_MOD = 8,
+ NFP_FLOWER_CMSG_TYPE_NO_NEIGH = 10,
+ NFP_FLOWER_CMSG_TYPE_TUN_MAC = 11,
+ NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS = 12,
+ NFP_FLOWER_CMSG_TYPE_TUN_NEIGH = 13,
+ NFP_FLOWER_CMSG_TYPE_TUN_IPS = 14,
NFP_FLOWER_CMSG_TYPE_FLOW_STATS = 15,
NFP_FLOWER_CMSG_TYPE_PORT_ECHO = 16,
NFP_FLOWER_CMSG_TYPE_MAX = 32,
@@ -282,6 +398,7 @@ enum nfp_flower_cmsg_port_type {
NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC = 0x0,
NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT = 0x1,
NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT = 0x2,
+ NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT = 0x3,
};
enum nfp_flower_cmsg_port_vnic_type {
@@ -323,6 +440,11 @@ static inline void *nfp_flower_cmsg_get_data(struct sk_buff *skb)
return (unsigned char *)skb->data + NFP_FLOWER_CMSG_HLEN;
}
+static inline int nfp_flower_cmsg_get_data_len(struct sk_buff *skb)
+{
+ return skb->len - NFP_FLOWER_CMSG_HLEN;
+}
+
struct sk_buff *
nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports);
void
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index 91fe03617106..e46e7c60d491 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -436,6 +436,16 @@ static void nfp_flower_clean(struct nfp_app *app)
app->priv = NULL;
}
+static int nfp_flower_start(struct nfp_app *app)
+{
+ return nfp_tunnel_config_start(app);
+}
+
+static void nfp_flower_stop(struct nfp_app *app)
+{
+ nfp_tunnel_config_stop(app);
+}
+
const struct nfp_app_type app_flower = {
.id = NFP_APP_FLOWER_NIC,
.name = "flower",
@@ -453,6 +463,9 @@ const struct nfp_app_type app_flower = {
.repr_open = nfp_flower_repr_netdev_open,
.repr_stop = nfp_flower_repr_netdev_stop,
+ .start = nfp_flower_start,
+ .stop = nfp_flower_stop,
+
.ctrl_msg_rx = nfp_flower_cmsg_rx,
.sriov_enable = nfp_flower_sriov_enable,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index c20dd00a1cae..12c319a219d8 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -58,6 +58,8 @@ struct nfp_app;
#define NFP_FL_MASK_REUSE_TIME_NS 40000
#define NFP_FL_MASK_ID_LOCATION 1
+#define NFP_FL_VXLAN_PORT 4789
+
struct nfp_fl_mask_id {
struct circ_buf mask_id_free_list;
struct timespec64 *last_used;
@@ -82,6 +84,18 @@ struct nfp_fl_stats_id {
* @flow_table: Hash table used to store flower rules
* @cmsg_work: Workqueue for control messages processing
* @cmsg_skbs: List of skbs for control message processing
+ * @nfp_mac_off_list: List of MAC addresses to offload
+ * @nfp_mac_index_list: List of unique 8-bit indexes for non NFP netdevs
+ * @nfp_ipv4_off_list: List of IPv4 addresses to offload
+ * @nfp_neigh_off_list: List of neighbour offloads
+ * @nfp_mac_off_lock: Lock for the MAC address list
+ * @nfp_mac_index_lock: Lock for the MAC index list
+ * @nfp_ipv4_off_lock: Lock for the IPv4 address list
+ * @nfp_neigh_off_lock: Lock for the neighbour address list
+ * @nfp_mac_off_ids: IDA to manage id assignment for offloaded macs
+ * @nfp_mac_off_count: Number of MACs in address list
+ * @nfp_tun_mac_nb: Notifier to monitor link state
+ * @nfp_tun_neigh_nb: Notifier to monitor neighbour state
*/
struct nfp_flower_priv {
struct nfp_app *app;
@@ -94,6 +108,18 @@ struct nfp_flower_priv {
DECLARE_HASHTABLE(flow_table, NFP_FLOWER_HASH_BITS);
struct work_struct cmsg_work;
struct sk_buff_head cmsg_skbs;
+ struct list_head nfp_mac_off_list;
+ struct list_head nfp_mac_index_list;
+ struct list_head nfp_ipv4_off_list;
+ struct list_head nfp_neigh_off_list;
+ struct mutex nfp_mac_off_lock;
+ struct mutex nfp_mac_index_lock;
+ struct mutex nfp_ipv4_off_lock;
+ struct mutex nfp_neigh_off_lock;
+ struct ida nfp_mac_off_ids;
+ int nfp_mac_off_count;
+ struct notifier_block nfp_tun_mac_nb;
+ struct notifier_block nfp_tun_neigh_nb;
};
struct nfp_fl_key_ls {
@@ -126,6 +152,7 @@ struct nfp_fl_payload {
struct rcu_head rcu;
spinlock_t lock; /* lock stats */
struct nfp_fl_stats stats;
+ __be32 nfp_tun_ipv4_addr;
char *unmasked_data;
char *mask_data;
char *action_data;
@@ -163,4 +190,12 @@ nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie);
void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb);
+int nfp_tunnel_config_start(struct nfp_app *app);
+void nfp_tunnel_config_stop(struct nfp_app *app);
+void nfp_tunnel_write_macs(struct nfp_app *app);
+void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4);
+void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4);
+void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb);
+void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb);
+
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index d25b5038c3a2..60614d4f0e22 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -77,14 +77,17 @@ nfp_flower_compile_meta(struct nfp_flower_meta_one *frame, u8 key_type)
static int
nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
- bool mask_version)
+ bool mask_version, enum nfp_flower_tun_type tun_type)
{
if (mask_version) {
frame->in_port = cpu_to_be32(~0);
return 0;
}
- frame->in_port = cpu_to_be32(cmsg_port);
+ if (tun_type)
+ frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
+ else
+ frame->in_port = cpu_to_be32(cmsg_port);
return 0;
}
@@ -108,8 +111,21 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame,
ether_addr_copy(frame->mac_src, &addr->src[0]);
}
- if (mask_version)
- frame->mpls_lse = cpu_to_be32(~0);
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) {
+ struct flow_dissector_key_mpls *mpls;
+ u32 t_mpls;
+
+ mpls = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_MPLS,
+ target);
+
+ t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, mpls->mpls_label) |
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, mpls->mpls_tc) |
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, mpls->mpls_bos) |
+ NFP_FLOWER_MASK_MPLS_Q;
+
+ frame->mpls_lse = cpu_to_be32(t_mpls);
+ }
}
static void
@@ -140,7 +156,6 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame,
struct flow_dissector_key_ipv4_addrs *addr;
struct flow_dissector_key_basic *basic;
- /* Wildcard TOS/TTL for now. */
memset(frame, 0, sizeof(struct nfp_flower_ipv4));
if (dissector_uses_key(flow->dissector,
@@ -158,6 +173,16 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame,
target);
frame->proto = basic->ip_proto;
}
+
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_dissector_key_ip *flow_ip;
+
+ flow_ip = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_IP,
+ target);
+ frame->tos = flow_ip->tos;
+ frame->ttl = flow_ip->ttl;
+ }
}
static void
@@ -169,7 +194,6 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
struct flow_dissector_key_ipv6_addrs *addr;
struct flow_dissector_key_basic *basic;
- /* Wildcard LABEL/TOS/TTL for now. */
memset(frame, 0, sizeof(struct nfp_flower_ipv6));
if (dissector_uses_key(flow->dissector,
@@ -187,6 +211,51 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
target);
frame->proto = basic->ip_proto;
}
+
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_dissector_key_ip *flow_ip;
+
+ flow_ip = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_IP,
+ target);
+ frame->tos = flow_ip->tos;
+ frame->ttl = flow_ip->ttl;
+ }
+}
+
+static void
+nfp_flower_compile_vxlan(struct nfp_flower_vxlan *frame,
+ struct tc_cls_flower_offload *flow,
+ bool mask_version, __be32 *tun_dst)
+{
+ struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
+ struct flow_dissector_key_ipv4_addrs *vxlan_ips;
+ struct flow_dissector_key_keyid *vni;
+
+ /* Wildcard TOS/TTL/GPE_FLAGS/NXT_PROTO for now. */
+ memset(frame, 0, sizeof(struct nfp_flower_vxlan));
+
+ if (dissector_uses_key(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ u32 temp_vni;
+
+ vni = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_KEYID,
+ target);
+ temp_vni = be32_to_cpu(vni->keyid) << NFP_FL_TUN_VNI_OFFSET;
+ frame->tun_id = cpu_to_be32(temp_vni);
+ }
+
+ if (dissector_uses_key(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
+ vxlan_ips =
+ skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
+ target);
+ frame->ip_src = vxlan_ips->src;
+ frame->ip_dst = vxlan_ips->dst;
+ *tun_dst = vxlan_ips->dst;
+ }
}
int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
@@ -194,10 +263,16 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow)
{
+ enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
+ __be32 tun_dst, tun_dst_mask = 0;
+ struct nfp_repr *netdev_repr;
int err;
u8 *ext;
u8 *msk;
+ if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN)
+ tun_type = NFP_FL_TUNNEL_VXLAN;
+
memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
memset(nfp_flow->mask_data, 0, key_ls->key_size);
@@ -216,14 +291,14 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
/* Populate Exact Port data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
nfp_repr_get_port_id(netdev),
- false);
+ false, tun_type);
if (err)
return err;
/* Populate Mask Port Data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
nfp_repr_get_port_id(netdev),
- true);
+ true, tun_type);
if (err)
return err;
@@ -291,5 +366,28 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
msk += sizeof(struct nfp_flower_ipv6);
}
+ if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN) {
+ /* Populate Exact VXLAN Data. */
+ nfp_flower_compile_vxlan((struct nfp_flower_vxlan *)ext,
+ flow, false, &tun_dst);
+ /* Populate Mask VXLAN Data. */
+ nfp_flower_compile_vxlan((struct nfp_flower_vxlan *)msk,
+ flow, true, &tun_dst_mask);
+ ext += sizeof(struct nfp_flower_vxlan);
+ msk += sizeof(struct nfp_flower_vxlan);
+
+ /* Configure tunnel end point MAC. */
+ if (nfp_netdev_is_nfp_repr(netdev)) {
+ netdev_repr = netdev_priv(netdev);
+ nfp_tunnel_write_macs(netdev_repr->app);
+
+ /* Store the tunnel destination in the rule data.
+ * This must be present and be an exact match.
+ */
+ nfp_flow->nfp_tun_ipv4_addr = tun_dst;
+ nfp_tunnel_add_ipv4_off(netdev_repr->app, tun_dst);
+ }
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 3226ddc55f99..193520ef23f0 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -140,7 +140,7 @@ exit_rcu_unlock:
void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb)
{
- unsigned int msg_len = skb->len - NFP_FLOWER_CMSG_HLEN;
+ unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
struct nfp_fl_stats_frame *stats_frame;
unsigned char *msg;
int i;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index a18b4d2b1d3e..6f239c27964e 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -52,8 +52,26 @@
BIT(FLOW_DISSECTOR_KEY_PORTS) | \
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
BIT(FLOW_DISSECTOR_KEY_VLAN) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
+ BIT(FLOW_DISSECTOR_KEY_MPLS) | \
BIT(FLOW_DISSECTOR_KEY_IP))
+#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
+ (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
+
+#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
+ (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
+
static int
nfp_flower_xmit_flow(struct net_device *netdev,
struct nfp_fl_payload *nfp_flow, u8 mtype)
@@ -117,7 +135,6 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
{
struct flow_dissector_key_basic *mask_basic = NULL;
struct flow_dissector_key_basic *key_basic = NULL;
- struct flow_dissector_key_ip *mask_ip = NULL;
u32 key_layer_two;
u8 key_layer;
int key_size;
@@ -125,15 +142,58 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
if (flow->dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR)
return -EOPNOTSUPP;
+ /* If any tun dissector is used then the required set must be used. */
+ if (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
+ (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
+ != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
+ return -EOPNOTSUPP;
+
+ key_layer_two = 0;
+ key_layer = NFP_FLOWER_LAYER_PORT | NFP_FLOWER_LAYER_MAC;
+ key_size = sizeof(struct nfp_flower_meta_one) +
+ sizeof(struct nfp_flower_in_port) +
+ sizeof(struct nfp_flower_mac_mpls);
+
if (dissector_uses_key(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
+ struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL;
+ struct flow_dissector_key_ports *mask_enc_ports = NULL;
+ struct flow_dissector_key_ports *enc_ports = NULL;
struct flow_dissector_key_control *mask_enc_ctl =
skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_CONTROL,
flow->mask);
- /* We are expecting a tunnel. For now we ignore offloading. */
- if (mask_enc_ctl->addr_type)
+ struct flow_dissector_key_control *enc_ctl =
+ skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_CONTROL,
+ flow->key);
+ if (mask_enc_ctl->addr_type != 0xffff ||
+ enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
return -EOPNOTSUPP;
+
+ /* These fields are already verified as used. */
+ mask_ipv4 =
+ skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
+ flow->mask);
+ if (mask_ipv4->dst != cpu_to_be32(~0))
+ return -EOPNOTSUPP;
+
+ mask_enc_ports =
+ skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_PORTS,
+ flow->mask);
+ enc_ports =
+ skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_PORTS,
+ flow->key);
+
+ if (mask_enc_ports->dst != cpu_to_be16(~0) ||
+ enc_ports->dst != htons(NFP_FL_VXLAN_PORT))
+ return -EOPNOTSUPP;
+
+ key_layer |= NFP_FLOWER_LAYER_VXLAN;
+ key_size += sizeof(struct nfp_flower_vxlan);
}
if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
@@ -146,34 +206,15 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
flow->key);
}
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP))
- mask_ip = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_IP,
- flow->mask);
-
- key_layer_two = 0;
- key_layer = NFP_FLOWER_LAYER_PORT | NFP_FLOWER_LAYER_MAC;
- key_size = sizeof(struct nfp_flower_meta_one) +
- sizeof(struct nfp_flower_in_port) +
- sizeof(struct nfp_flower_mac_mpls);
-
if (mask_basic && mask_basic->n_proto) {
/* Ethernet type is present in the key. */
switch (key_basic->n_proto) {
case cpu_to_be16(ETH_P_IP):
- if (mask_ip && mask_ip->tos)
- return -EOPNOTSUPP;
- if (mask_ip && mask_ip->ttl)
- return -EOPNOTSUPP;
key_layer |= NFP_FLOWER_LAYER_IPV4;
key_size += sizeof(struct nfp_flower_ipv4);
break;
case cpu_to_be16(ETH_P_IPV6):
- if (mask_ip && mask_ip->tos)
- return -EOPNOTSUPP;
- if (mask_ip && mask_ip->ttl)
- return -EOPNOTSUPP;
key_layer |= NFP_FLOWER_LAYER_IPV6;
key_size += sizeof(struct nfp_flower_ipv6);
break;
@@ -184,11 +225,6 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
case cpu_to_be16(ETH_P_ARP):
return -EOPNOTSUPP;
- /* Currently we do not offload MPLS. */
- case cpu_to_be16(ETH_P_MPLS_UC):
- case cpu_to_be16(ETH_P_MPLS_MC):
- return -EOPNOTSUPP;
-
/* Will be included in layer 2. */
case cpu_to_be16(ETH_P_8021Q):
break;
@@ -252,6 +288,7 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
if (!flow_pay->action_data)
goto err_free_mask;
+ flow_pay->nfp_tun_ipv4_addr = 0;
flow_pay->meta.flags = 0;
spin_lock_init(&flow_pay->lock);
@@ -361,6 +398,9 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
if (err)
goto err_free_flow;
+ if (nfp_flow->nfp_tun_ipv4_addr)
+ nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
+
err = nfp_flower_xmit_flow(netdev, nfp_flow,
NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
if (err)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
new file mode 100644
index 000000000000..c495f8f38506
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -0,0 +1,811 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/inetdevice.h>
+#include <net/netevent.h>
+#include <linux/idr.h>
+#include <net/dst_metadata.h>
+#include <net/arp.h>
+
+#include "cmsg.h"
+#include "main.h"
+#include "../nfp_net_repr.h"
+#include "../nfp_net.h"
+
+#define NFP_FL_MAX_ROUTES 32
+
+/**
+ * struct nfp_tun_active_tuns - periodic message of active tunnels
+ * @seq: sequence number of the message
+ * @count: number of tunnels report in message
+ * @flags: options part of the request
+ * @ipv4: dest IPv4 address of active route
+ * @egress_port: port the encapsulated packet egressed
+ * @extra: reserved for future use
+ * @tun_info: tunnels that have sent traffic in reported period
+ */
+struct nfp_tun_active_tuns {
+ __be32 seq;
+ __be32 count;
+ __be32 flags;
+ struct route_ip_info {
+ __be32 ipv4;
+ __be32 egress_port;
+ __be32 extra[2];
+ } tun_info[];
+};
+
+/**
+ * struct nfp_tun_neigh - neighbour/route entry on the NFP
+ * @dst_ipv4: destination IPv4 address
+ * @src_ipv4: source IPv4 address
+ * @dst_addr: destination MAC address
+ * @src_addr: source MAC address
+ * @port_id: NFP port to output packet on - associated with source IPv4
+ */
+struct nfp_tun_neigh {
+ __be32 dst_ipv4;
+ __be32 src_ipv4;
+ u8 dst_addr[ETH_ALEN];
+ u8 src_addr[ETH_ALEN];
+ __be32 port_id;
+};
+
+/**
+ * struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup
+ * @ingress_port: ingress port of packet that signalled request
+ * @ipv4_addr: destination ipv4 address for route
+ * @reserved: reserved for future use
+ */
+struct nfp_tun_req_route_ipv4 {
+ __be32 ingress_port;
+ __be32 ipv4_addr;
+ __be32 reserved[2];
+};
+
+/**
+ * struct nfp_ipv4_route_entry - routes that are offloaded to the NFP
+ * @ipv4_addr: destination of route
+ * @list: list pointer
+ */
+struct nfp_ipv4_route_entry {
+ __be32 ipv4_addr;
+ struct list_head list;
+};
+
+#define NFP_FL_IPV4_ADDRS_MAX 32
+
+/**
+ * struct nfp_tun_ipv4_addr - set the IP address list on the NFP
+ * @count: number of IPs populated in the array
+ * @ipv4_addr: array of IPV4_ADDRS_MAX 32 bit IPv4 addresses
+ */
+struct nfp_tun_ipv4_addr {
+ __be32 count;
+ __be32 ipv4_addr[NFP_FL_IPV4_ADDRS_MAX];
+};
+
+/**
+ * struct nfp_ipv4_addr_entry - cached IPv4 addresses
+ * @ipv4_addr: IP address
+ * @ref_count: number of rules currently using this IP
+ * @list: list pointer
+ */
+struct nfp_ipv4_addr_entry {
+ __be32 ipv4_addr;
+ int ref_count;
+ struct list_head list;
+};
+
+/**
+ * struct nfp_tun_mac_addr - configure MAC address of tunnel EP on NFP
+ * @reserved: reserved for future use
+ * @count: number of MAC addresses in the message
+ * @index: index of MAC address in the lookup table
+ * @addr: interface MAC address
+ * @addresses: series of MACs to offload
+ */
+struct nfp_tun_mac_addr {
+ __be16 reserved;
+ __be16 count;
+ struct index_mac_addr {
+ __be16 index;
+ u8 addr[ETH_ALEN];
+ } addresses[];
+};
+
+/**
+ * struct nfp_tun_mac_offload_entry - list of MACs to offload
+ * @index: index of MAC address for offloading
+ * @addr: interface MAC address
+ * @list: list pointer
+ */
+struct nfp_tun_mac_offload_entry {
+ __be16 index;
+ u8 addr[ETH_ALEN];
+ struct list_head list;
+};
+
+#define NFP_MAX_MAC_INDEX 0xff
+
+/**
+ * struct nfp_tun_mac_non_nfp_idx - converts non NFP netdev ifindex to 8-bit id
+ * @ifindex: netdev ifindex of the device
+ * @index: index of netdevs mac on NFP
+ * @list: list pointer
+ */
+struct nfp_tun_mac_non_nfp_idx {
+ int ifindex;
+ u8 index;
+ struct list_head list;
+};
+
+void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
+{
+ struct nfp_tun_active_tuns *payload;
+ struct net_device *netdev;
+ int count, i, pay_len;
+ struct neighbour *n;
+ __be32 ipv4_addr;
+ u32 port;
+
+ payload = nfp_flower_cmsg_get_data(skb);
+ count = be32_to_cpu(payload->count);
+ if (count > NFP_FL_MAX_ROUTES) {
+ nfp_flower_cmsg_warn(app, "Tunnel keep-alive request exceeds max routes.\n");
+ return;
+ }
+
+ pay_len = nfp_flower_cmsg_get_data_len(skb);
+ if (pay_len != sizeof(struct nfp_tun_active_tuns) +
+ sizeof(struct route_ip_info) * count) {
+ nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
+ return;
+ }
+
+ for (i = 0; i < count; i++) {
+ ipv4_addr = payload->tun_info[i].ipv4;
+ port = be32_to_cpu(payload->tun_info[i].egress_port);
+ netdev = nfp_app_repr_get(app, port);
+ if (!netdev)
+ continue;
+
+ n = neigh_lookup(&arp_tbl, &ipv4_addr, netdev);
+ if (!n)
+ continue;
+
+ /* Update the used timestamp of neighbour */
+ neigh_event_send(n, NULL);
+ neigh_release(n);
+ }
+}
+
+static bool nfp_tun_is_netdev_to_offload(struct net_device *netdev)
+{
+ if (!netdev->rtnl_link_ops)
+ return false;
+ if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch"))
+ return true;
+ if (!strcmp(netdev->rtnl_link_ops->kind, "vxlan"))
+ return true;
+
+ return false;
+}
+
+static int
+nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata)
+{
+ struct sk_buff *skb;
+ unsigned char *msg;
+
+ skb = nfp_flower_cmsg_alloc(app, plen, mtype);
+ if (!skb)
+ return -ENOMEM;
+
+ msg = nfp_flower_cmsg_get_data(skb);
+ memcpy(msg, pdata, nfp_flower_cmsg_get_data_len(skb));
+
+ nfp_ctrl_tx(app->ctrl, skb);
+ return 0;
+}
+
+static bool nfp_tun_has_route(struct nfp_app *app, __be32 ipv4_addr)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_ipv4_route_entry *entry;
+ struct list_head *ptr, *storage;
+
+ mutex_lock(&priv->nfp_neigh_off_lock);
+ list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
+ entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
+ if (entry->ipv4_addr == ipv4_addr) {
+ mutex_unlock(&priv->nfp_neigh_off_lock);
+ return true;
+ }
+ }
+ mutex_unlock(&priv->nfp_neigh_off_lock);
+ return false;
+}
+
+static void nfp_tun_add_route_to_cache(struct nfp_app *app, __be32 ipv4_addr)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_ipv4_route_entry *entry;
+ struct list_head *ptr, *storage;
+
+ mutex_lock(&priv->nfp_neigh_off_lock);
+ list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
+ entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
+ if (entry->ipv4_addr == ipv4_addr) {
+ mutex_unlock(&priv->nfp_neigh_off_lock);
+ return;
+ }
+ }
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ mutex_unlock(&priv->nfp_neigh_off_lock);
+ nfp_flower_cmsg_warn(app, "Mem error when storing new route.\n");
+ return;
+ }
+
+ entry->ipv4_addr = ipv4_addr;
+ list_add_tail(&entry->list, &priv->nfp_neigh_off_list);
+ mutex_unlock(&priv->nfp_neigh_off_lock);
+}
+
+static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_ipv4_route_entry *entry;
+ struct list_head *ptr, *storage;
+
+ mutex_lock(&priv->nfp_neigh_off_lock);
+ list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
+ entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
+ if (entry->ipv4_addr == ipv4_addr) {
+ list_del(&entry->list);
+ kfree(entry);
+ break;
+ }
+ }
+ mutex_unlock(&priv->nfp_neigh_off_lock);
+}
+
+static void
+nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
+ struct flowi4 *flow, struct neighbour *neigh)
+{
+ struct nfp_tun_neigh payload;
+
+ /* Only offload representor IPv4s for now. */
+ if (!nfp_netdev_is_nfp_repr(netdev))
+ return;
+
+ memset(&payload, 0, sizeof(struct nfp_tun_neigh));
+ payload.dst_ipv4 = flow->daddr;
+
+ /* If entry has expired send dst IP with all other fields 0. */
+ if (!(neigh->nud_state & NUD_VALID)) {
+ nfp_tun_del_route_from_cache(app, payload.dst_ipv4);
+ /* Trigger ARP to verify invalid neighbour state. */
+ neigh_event_send(neigh, NULL);
+ goto send_msg;
+ }
+
+ /* Have a valid neighbour so populate rest of entry. */
+ payload.src_ipv4 = flow->saddr;
+ ether_addr_copy(payload.src_addr, netdev->dev_addr);
+ neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
+ payload.port_id = cpu_to_be32(nfp_repr_get_port_id(netdev));
+ /* Add destination of new route to NFP cache. */
+ nfp_tun_add_route_to_cache(app, payload.dst_ipv4);
+
+send_msg:
+ nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
+ sizeof(struct nfp_tun_neigh),
+ (unsigned char *)&payload);
+}
+
+static int
+nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ struct nfp_flower_priv *app_priv;
+ struct netevent_redirect *redir;
+ struct flowi4 flow = {};
+ struct neighbour *n;
+ struct nfp_app *app;
+ struct rtable *rt;
+ int err;
+
+ switch (event) {
+ case NETEVENT_REDIRECT:
+ redir = (struct netevent_redirect *)ptr;
+ n = redir->neigh;
+ break;
+ case NETEVENT_NEIGH_UPDATE:
+ n = (struct neighbour *)ptr;
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ flow.daddr = *(__be32 *)n->primary_key;
+
+ /* Only concerned with route changes for representors. */
+ if (!nfp_netdev_is_nfp_repr(n->dev))
+ return NOTIFY_DONE;
+
+ app_priv = container_of(nb, struct nfp_flower_priv, nfp_tun_neigh_nb);
+ app = app_priv->app;
+
+ /* Only concerned with changes to routes already added to NFP. */
+ if (!nfp_tun_has_route(app, flow.daddr))
+ return NOTIFY_DONE;
+
+#if IS_ENABLED(CONFIG_INET)
+ /* Do a route lookup to populate flow data. */
+ rt = ip_route_output_key(dev_net(n->dev), &flow);
+ err = PTR_ERR_OR_ZERO(rt);
+ if (err)
+ return NOTIFY_DONE;
+#else
+ return NOTIFY_DONE;
+#endif
+
+ flow.flowi4_proto = IPPROTO_UDP;
+ nfp_tun_write_neigh(n->dev, app, &flow, n);
+
+ return NOTIFY_OK;
+}
+
+void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
+{
+ struct nfp_tun_req_route_ipv4 *payload;
+ struct net_device *netdev;
+ struct flowi4 flow = {};
+ struct neighbour *n;
+ struct rtable *rt;
+ int err;
+
+ payload = nfp_flower_cmsg_get_data(skb);
+
+ netdev = nfp_app_repr_get(app, be32_to_cpu(payload->ingress_port));
+ if (!netdev)
+ goto route_fail_warning;
+
+ flow.daddr = payload->ipv4_addr;
+ flow.flowi4_proto = IPPROTO_UDP;
+
+#if IS_ENABLED(CONFIG_INET)
+ /* Do a route lookup on same namespace as ingress port. */
+ rt = ip_route_output_key(dev_net(netdev), &flow);
+ err = PTR_ERR_OR_ZERO(rt);
+ if (err)
+ goto route_fail_warning;
+#else
+ goto route_fail_warning;
+#endif
+
+ /* Get the neighbour entry for the lookup */
+ n = dst_neigh_lookup(&rt->dst, &flow.daddr);
+ ip_rt_put(rt);
+ if (!n)
+ goto route_fail_warning;
+ nfp_tun_write_neigh(n->dev, app, &flow, n);
+ neigh_release(n);
+ return;
+
+route_fail_warning:
+ nfp_flower_cmsg_warn(app, "Requested route not found.\n");
+}
+
+static void nfp_tun_write_ipv4_list(struct nfp_app *app)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_ipv4_addr_entry *entry;
+ struct nfp_tun_ipv4_addr payload;
+ struct list_head *ptr, *storage;
+ int count;
+
+ memset(&payload, 0, sizeof(struct nfp_tun_ipv4_addr));
+ mutex_lock(&priv->nfp_ipv4_off_lock);
+ count = 0;
+ list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
+ if (count >= NFP_FL_IPV4_ADDRS_MAX) {
+ mutex_unlock(&priv->nfp_ipv4_off_lock);
+ nfp_flower_cmsg_warn(app, "IPv4 offload exceeds limit.\n");
+ return;
+ }
+ entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
+ payload.ipv4_addr[count++] = entry->ipv4_addr;
+ }
+ payload.count = cpu_to_be32(count);
+ mutex_unlock(&priv->nfp_ipv4_off_lock);
+
+ nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS,
+ sizeof(struct nfp_tun_ipv4_addr),
+ &payload);
+}
+
+void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_ipv4_addr_entry *entry;
+ struct list_head *ptr, *storage;
+
+ mutex_lock(&priv->nfp_ipv4_off_lock);
+ list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
+ entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
+ if (entry->ipv4_addr == ipv4) {
+ entry->ref_count++;
+ mutex_unlock(&priv->nfp_ipv4_off_lock);
+ return;
+ }
+ }
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ mutex_unlock(&priv->nfp_ipv4_off_lock);
+ nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
+ return;
+ }
+ entry->ipv4_addr = ipv4;
+ entry->ref_count = 1;
+ list_add_tail(&entry->list, &priv->nfp_ipv4_off_list);
+ mutex_unlock(&priv->nfp_ipv4_off_lock);
+
+ nfp_tun_write_ipv4_list(app);
+}
+
+void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_ipv4_addr_entry *entry;
+ struct list_head *ptr, *storage;
+
+ mutex_lock(&priv->nfp_ipv4_off_lock);
+ list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
+ entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
+ if (entry->ipv4_addr == ipv4) {
+ entry->ref_count--;
+ if (!entry->ref_count) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ break;
+ }
+ }
+ mutex_unlock(&priv->nfp_ipv4_off_lock);
+
+ nfp_tun_write_ipv4_list(app);
+}
+
+void nfp_tunnel_write_macs(struct nfp_app *app)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_tun_mac_offload_entry *entry;
+ struct nfp_tun_mac_addr *payload;
+ struct list_head *ptr, *storage;
+ int mac_count, err, pay_size;
+
+ mutex_lock(&priv->nfp_mac_off_lock);
+ if (!priv->nfp_mac_off_count) {
+ mutex_unlock(&priv->nfp_mac_off_lock);
+ return;
+ }
+
+ pay_size = sizeof(struct nfp_tun_mac_addr) +
+ sizeof(struct index_mac_addr) * priv->nfp_mac_off_count;
+
+ payload = kzalloc(pay_size, GFP_KERNEL);
+ if (!payload) {
+ mutex_unlock(&priv->nfp_mac_off_lock);
+ return;
+ }
+
+ payload->count = cpu_to_be16(priv->nfp_mac_off_count);
+
+ mac_count = 0;
+ list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
+ entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
+ list);
+ payload->addresses[mac_count].index = entry->index;
+ ether_addr_copy(payload->addresses[mac_count].addr,
+ entry->addr);
+ mac_count++;
+ }
+
+ err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC,
+ pay_size, payload);
+
+ kfree(payload);
+
+ if (err) {
+ mutex_unlock(&priv->nfp_mac_off_lock);
+ /* Write failed so retain list for future retry. */
+ return;
+ }
+
+ /* If list was successfully offloaded, flush it. */
+ list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
+ entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
+ list);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+
+ priv->nfp_mac_off_count = 0;
+ mutex_unlock(&priv->nfp_mac_off_lock);
+}
+
+static int nfp_tun_get_mac_idx(struct nfp_app *app, int ifindex)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_tun_mac_non_nfp_idx *entry;
+ struct list_head *ptr, *storage;
+ int idx;
+
+ mutex_lock(&priv->nfp_mac_index_lock);
+ list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
+ entry = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list);
+ if (entry->ifindex == ifindex) {
+ idx = entry->index;
+ mutex_unlock(&priv->nfp_mac_index_lock);
+ return idx;
+ }
+ }
+
+ idx = ida_simple_get(&priv->nfp_mac_off_ids, 0,
+ NFP_MAX_MAC_INDEX, GFP_KERNEL);
+ if (idx < 0) {
+ mutex_unlock(&priv->nfp_mac_index_lock);
+ return idx;
+ }
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ mutex_unlock(&priv->nfp_mac_index_lock);
+ return -ENOMEM;
+ }
+ entry->ifindex = ifindex;
+ entry->index = idx;
+ list_add_tail(&entry->list, &priv->nfp_mac_index_list);
+ mutex_unlock(&priv->nfp_mac_index_lock);
+
+ return idx;
+}
+
+static void nfp_tun_del_mac_idx(struct nfp_app *app, int ifindex)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_tun_mac_non_nfp_idx *entry;
+ struct list_head *ptr, *storage;
+
+ mutex_lock(&priv->nfp_mac_index_lock);
+ list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
+ entry = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list);
+ if (entry->ifindex == ifindex) {
+ ida_simple_remove(&priv->nfp_mac_off_ids,
+ entry->index);
+ list_del(&entry->list);
+ kfree(entry);
+ break;
+ }
+ }
+ mutex_unlock(&priv->nfp_mac_index_lock);
+}
+
+static void nfp_tun_add_to_mac_offload_list(struct net_device *netdev,
+ struct nfp_app *app)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_tun_mac_offload_entry *entry;
+ u16 nfp_mac_idx;
+ int port = 0;
+
+ /* Check if MAC should be offloaded. */
+ if (!is_valid_ether_addr(netdev->dev_addr))
+ return;
+
+ if (nfp_netdev_is_nfp_repr(netdev))
+ port = nfp_repr_get_port_id(netdev);
+ else if (!nfp_tun_is_netdev_to_offload(netdev))
+ return;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ nfp_flower_cmsg_warn(app, "Mem fail when offloading MAC.\n");
+ return;
+ }
+
+ if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
+ NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT) {
+ nfp_mac_idx = port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT;
+ } else if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
+ NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT) {
+ port = FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC, port);
+ nfp_mac_idx = port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT;
+ } else {
+ /* Must assign our own unique 8-bit index. */
+ int idx = nfp_tun_get_mac_idx(app, netdev->ifindex);
+
+ if (idx < 0) {
+ nfp_flower_cmsg_warn(app, "Can't assign non-repr MAC index.\n");
+ kfree(entry);
+ return;
+ }
+ nfp_mac_idx = idx << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
+ }
+
+ entry->index = cpu_to_be16(nfp_mac_idx);
+ ether_addr_copy(entry->addr, netdev->dev_addr);
+
+ mutex_lock(&priv->nfp_mac_off_lock);
+ priv->nfp_mac_off_count++;
+ list_add_tail(&entry->list, &priv->nfp_mac_off_list);
+ mutex_unlock(&priv->nfp_mac_off_lock);
+}
+
+static int nfp_tun_mac_event_handler(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct nfp_flower_priv *app_priv;
+ struct net_device *netdev;
+ struct nfp_app *app;
+
+ if (event == NETDEV_DOWN || event == NETDEV_UNREGISTER) {
+ app_priv = container_of(nb, struct nfp_flower_priv,
+ nfp_tun_mac_nb);
+ app = app_priv->app;
+ netdev = netdev_notifier_info_to_dev(ptr);
+
+ /* If non-nfp netdev then free its offload index. */
+ if (nfp_tun_is_netdev_to_offload(netdev))
+ nfp_tun_del_mac_idx(app, netdev->ifindex);
+ } else if (event == NETDEV_UP || event == NETDEV_CHANGEADDR ||
+ event == NETDEV_REGISTER) {
+ app_priv = container_of(nb, struct nfp_flower_priv,
+ nfp_tun_mac_nb);
+ app = app_priv->app;
+ netdev = netdev_notifier_info_to_dev(ptr);
+
+ nfp_tun_add_to_mac_offload_list(netdev, app);
+
+ /* Force a list write to keep NFP up to date. */
+ nfp_tunnel_write_macs(app);
+ }
+ return NOTIFY_OK;
+}
+
+int nfp_tunnel_config_start(struct nfp_app *app)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct net_device *netdev;
+ int err;
+
+ /* Initialise priv data for MAC offloading. */
+ priv->nfp_mac_off_count = 0;
+ mutex_init(&priv->nfp_mac_off_lock);
+ INIT_LIST_HEAD(&priv->nfp_mac_off_list);
+ priv->nfp_tun_mac_nb.notifier_call = nfp_tun_mac_event_handler;
+ mutex_init(&priv->nfp_mac_index_lock);
+ INIT_LIST_HEAD(&priv->nfp_mac_index_list);
+ ida_init(&priv->nfp_mac_off_ids);
+
+ /* Initialise priv data for IPv4 offloading. */
+ mutex_init(&priv->nfp_ipv4_off_lock);
+ INIT_LIST_HEAD(&priv->nfp_ipv4_off_list);
+
+ /* Initialise priv data for neighbour offloading. */
+ mutex_init(&priv->nfp_neigh_off_lock);
+ INIT_LIST_HEAD(&priv->nfp_neigh_off_list);
+ priv->nfp_tun_neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
+
+ err = register_netdevice_notifier(&priv->nfp_tun_mac_nb);
+ if (err)
+ goto err_free_mac_ida;
+
+ err = register_netevent_notifier(&priv->nfp_tun_neigh_nb);
+ if (err)
+ goto err_unreg_mac_nb;
+
+ /* Parse netdevs already registered for MACs that need offloaded. */
+ rtnl_lock();
+ for_each_netdev(&init_net, netdev)
+ nfp_tun_add_to_mac_offload_list(netdev, app);
+ rtnl_unlock();
+
+ return 0;
+
+err_unreg_mac_nb:
+ unregister_netdevice_notifier(&priv->nfp_tun_mac_nb);
+err_free_mac_ida:
+ ida_destroy(&priv->nfp_mac_off_ids);
+ return err;
+}
+
+void nfp_tunnel_config_stop(struct nfp_app *app)
+{
+ struct nfp_tun_mac_offload_entry *mac_entry;
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_ipv4_route_entry *route_entry;
+ struct nfp_tun_mac_non_nfp_idx *mac_idx;
+ struct nfp_ipv4_addr_entry *ip_entry;
+ struct list_head *ptr, *storage;
+
+ unregister_netdevice_notifier(&priv->nfp_tun_mac_nb);
+ unregister_netevent_notifier(&priv->nfp_tun_neigh_nb);
+
+ /* Free any memory that may be occupied by MAC list. */
+ mutex_lock(&priv->nfp_mac_off_lock);
+ list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
+ mac_entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
+ list);
+ list_del(&mac_entry->list);
+ kfree(mac_entry);
+ }
+ mutex_unlock(&priv->nfp_mac_off_lock);
+
+ /* Free any memory that may be occupied by MAC index list. */
+ mutex_lock(&priv->nfp_mac_index_lock);
+ list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
+ mac_idx = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx,
+ list);
+ list_del(&mac_idx->list);
+ kfree(mac_idx);
+ }
+ mutex_unlock(&priv->nfp_mac_index_lock);
+
+ ida_destroy(&priv->nfp_mac_off_ids);
+
+ /* Free any memory that may be occupied by ipv4 list. */
+ mutex_lock(&priv->nfp_ipv4_off_lock);
+ list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
+ ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
+ list_del(&ip_entry->list);
+ kfree(ip_entry);
+ }
+ mutex_unlock(&priv->nfp_ipv4_off_lock);
+
+ /* Free any memory that may be occupied by the route list. */
+ mutex_lock(&priv->nfp_neigh_off_lock);
+ list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
+ route_entry = list_entry(ptr, struct nfp_ipv4_route_entry,
+ list);
+ list_del(&route_entry->list);
+ kfree(route_entry);
+ }
+ mutex_unlock(&priv->nfp_neigh_off_lock);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c
index 82c290763529..5d9e2eba5b49 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c
@@ -31,6 +31,7 @@
* SOFTWARE.
*/
+#include <linux/bug.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index af640b5c2108..857bb33020ba 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -36,6 +36,8 @@
#include <net/devlink.h>
+#include <trace/events/devlink.h>
+
#include "nfp_net_repr.h"
struct bpf_prog;
@@ -271,11 +273,17 @@ static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
static inline bool nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb)
{
+ trace_devlink_hwmsg(priv_to_devlink(app->pf), false, 0,
+ skb->data, skb->len);
+
return nfp_ctrl_tx(app->ctrl, skb);
}
static inline void nfp_app_ctrl_rx(struct nfp_app *app, struct sk_buff *skb)
{
+ trace_devlink_hwmsg(priv_to_devlink(app->pf), true, 0,
+ skb->data, skb->len);
+
app->type->ctrl_msg_rx(app, skb);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
new file mode 100644
index 000000000000..830f6de25f47
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright (C) 2016-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "nfp_asm.h"
+
+const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = {
+ [CMD_TGT_WRITE8_SWAP] = { 0x02, 0x42 },
+ [CMD_TGT_READ8] = { 0x01, 0x43 },
+ [CMD_TGT_READ32] = { 0x00, 0x5c },
+ [CMD_TGT_READ32_LE] = { 0x01, 0x5c },
+ [CMD_TGT_READ32_SWAP] = { 0x02, 0x5c },
+ [CMD_TGT_READ_LE] = { 0x01, 0x40 },
+ [CMD_TGT_READ_SWAP_LE] = { 0x03, 0x40 },
+};
+
+static u16 nfp_swreg_to_unreg(swreg reg, bool is_dst)
+{
+ bool lm_id, lm_dec = false;
+ u16 val = swreg_value(reg);
+
+ switch (swreg_type(reg)) {
+ case NN_REG_GPR_A:
+ case NN_REG_GPR_B:
+ case NN_REG_GPR_BOTH:
+ return val;
+ case NN_REG_NNR:
+ return UR_REG_NN | val;
+ case NN_REG_XFER:
+ return UR_REG_XFR | val;
+ case NN_REG_LMEM:
+ lm_id = swreg_lm_idx(reg);
+
+ switch (swreg_lm_mode(reg)) {
+ case NN_LM_MOD_NONE:
+ if (val & ~UR_REG_LM_IDX_MAX) {
+ pr_err("LM offset too large\n");
+ return 0;
+ }
+ return UR_REG_LM | FIELD_PREP(UR_REG_LM_IDX, lm_id) |
+ val;
+ case NN_LM_MOD_DEC:
+ lm_dec = true;
+ /* fall through */
+ case NN_LM_MOD_INC:
+ if (val) {
+ pr_err("LM offset in inc/dev mode\n");
+ return 0;
+ }
+ return UR_REG_LM | UR_REG_LM_POST_MOD |
+ FIELD_PREP(UR_REG_LM_IDX, lm_id) |
+ FIELD_PREP(UR_REG_LM_POST_MOD_DEC, lm_dec);
+ default:
+ pr_err("bad LM mode for unrestricted operands %d\n",
+ swreg_lm_mode(reg));
+ return 0;
+ }
+ case NN_REG_IMM:
+ if (val & ~0xff) {
+ pr_err("immediate too large\n");
+ return 0;
+ }
+ return UR_REG_IMM_encode(val);
+ case NN_REG_NONE:
+ return is_dst ? UR_REG_NO_DST : REG_NONE;
+ }
+
+ pr_err("unrecognized reg encoding %08x\n", reg);
+ return 0;
+}
+
+int swreg_to_unrestricted(swreg dst, swreg lreg, swreg rreg,
+ struct nfp_insn_ur_regs *reg)
+{
+ memset(reg, 0, sizeof(*reg));
+
+ /* Decode destination */
+ if (swreg_type(dst) == NN_REG_IMM)
+ return -EFAULT;
+
+ if (swreg_type(dst) == NN_REG_GPR_B)
+ reg->dst_ab = ALU_DST_B;
+ if (swreg_type(dst) == NN_REG_GPR_BOTH)
+ reg->wr_both = true;
+ reg->dst = nfp_swreg_to_unreg(dst, true);
+
+ /* Decode source operands */
+ if (swreg_type(lreg) == swreg_type(rreg))
+ return -EFAULT;
+
+ if (swreg_type(lreg) == NN_REG_GPR_B ||
+ swreg_type(rreg) == NN_REG_GPR_A) {
+ reg->areg = nfp_swreg_to_unreg(rreg, false);
+ reg->breg = nfp_swreg_to_unreg(lreg, false);
+ reg->swap = true;
+ } else {
+ reg->areg = nfp_swreg_to_unreg(lreg, false);
+ reg->breg = nfp_swreg_to_unreg(rreg, false);
+ }
+
+ reg->dst_lmextn = swreg_lmextn(dst);
+ reg->src_lmextn = swreg_lmextn(lreg) | swreg_lmextn(rreg);
+
+ return 0;
+}
+
+static u16 nfp_swreg_to_rereg(swreg reg, bool is_dst, bool has_imm8, bool *i8)
+{
+ u16 val = swreg_value(reg);
+ bool lm_id;
+
+ switch (swreg_type(reg)) {
+ case NN_REG_GPR_A:
+ case NN_REG_GPR_B:
+ case NN_REG_GPR_BOTH:
+ return val;
+ case NN_REG_XFER:
+ return RE_REG_XFR | val;
+ case NN_REG_LMEM:
+ lm_id = swreg_lm_idx(reg);
+
+ if (swreg_lm_mode(reg) != NN_LM_MOD_NONE) {
+ pr_err("bad LM mode for restricted operands %d\n",
+ swreg_lm_mode(reg));
+ return 0;
+ }
+
+ if (val & ~RE_REG_LM_IDX_MAX) {
+ pr_err("LM offset too large\n");
+ return 0;
+ }
+
+ return RE_REG_LM | FIELD_PREP(RE_REG_LM_IDX, lm_id) | val;
+ case NN_REG_IMM:
+ if (val & ~(0x7f | has_imm8 << 7)) {
+ pr_err("immediate too large\n");
+ return 0;
+ }
+ *i8 = val & 0x80;
+ return RE_REG_IMM_encode(val & 0x7f);
+ case NN_REG_NONE:
+ return is_dst ? RE_REG_NO_DST : REG_NONE;
+ case NN_REG_NNR:
+ pr_err("NNRs used with restricted encoding\n");
+ return 0;
+ }
+
+ pr_err("unrecognized reg encoding\n");
+ return 0;
+}
+
+int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg,
+ struct nfp_insn_re_regs *reg, bool has_imm8)
+{
+ memset(reg, 0, sizeof(*reg));
+
+ /* Decode destination */
+ if (swreg_type(dst) == NN_REG_IMM)
+ return -EFAULT;
+
+ if (swreg_type(dst) == NN_REG_GPR_B)
+ reg->dst_ab = ALU_DST_B;
+ if (swreg_type(dst) == NN_REG_GPR_BOTH)
+ reg->wr_both = true;
+ reg->dst = nfp_swreg_to_rereg(dst, true, false, NULL);
+
+ /* Decode source operands */
+ if (swreg_type(lreg) == swreg_type(rreg))
+ return -EFAULT;
+
+ if (swreg_type(lreg) == NN_REG_GPR_B ||
+ swreg_type(rreg) == NN_REG_GPR_A) {
+ reg->areg = nfp_swreg_to_rereg(rreg, false, has_imm8, &reg->i8);
+ reg->breg = nfp_swreg_to_rereg(lreg, false, has_imm8, &reg->i8);
+ reg->swap = true;
+ } else {
+ reg->areg = nfp_swreg_to_rereg(lreg, false, has_imm8, &reg->i8);
+ reg->breg = nfp_swreg_to_rereg(rreg, false, has_imm8, &reg->i8);
+ }
+
+ reg->dst_lmextn = swreg_lmextn(dst);
+ reg->src_lmextn = swreg_lmextn(lreg) | swreg_lmextn(rreg);
+
+ return 0;
+}
+
+#define NFP_USTORE_ECC_POLY_WORDS 7
+#define NFP_USTORE_OP_BITS 45
+
+static const u64 nfp_ustore_ecc_polynomials[NFP_USTORE_ECC_POLY_WORDS] = {
+ 0x0ff800007fffULL,
+ 0x11f801ff801fULL,
+ 0x1e387e0781e1ULL,
+ 0x17cb8e388e22ULL,
+ 0x1af5b2c93244ULL,
+ 0x1f56d5525488ULL,
+ 0x0daf69a46910ULL,
+};
+
+static bool parity(u64 value)
+{
+ return hweight64(value) & 1;
+}
+
+int nfp_ustore_check_valid_no_ecc(u64 insn)
+{
+ if (insn & ~GENMASK_ULL(NFP_USTORE_OP_BITS, 0))
+ return -EINVAL;
+
+ return 0;
+}
+
+u64 nfp_ustore_calc_ecc_insn(u64 insn)
+{
+ u8 ecc = 0;
+ int i;
+
+ for (i = 0; i < NFP_USTORE_ECC_POLY_WORDS; i++)
+ ecc |= parity(nfp_ustore_ecc_polynomials[i] & insn) << i;
+
+ return insn | (u64)ecc << NFP_USTORE_OP_BITS;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
index d2b535739d2b..86e7daee6099 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
@@ -34,6 +34,8 @@
#ifndef __NFP_ASM_H__
#define __NFP_ASM_H__ 1
+#include <linux/bitfield.h>
+#include <linux/bug.h>
#include <linux/types.h>
#define REG_NONE 0
@@ -43,23 +45,31 @@
#define RE_REG_IMM_encode(x) \
(RE_REG_IMM | ((x) & 0x1f) | (((x) & 0x60) << 1))
#define RE_REG_IMM_MAX 0x07fULL
+#define RE_REG_LM 0x050
+#define RE_REG_LM_IDX 0x008
+#define RE_REG_LM_IDX_MAX 0x7
#define RE_REG_XFR 0x080
#define UR_REG_XFR 0x180
+#define UR_REG_LM 0x200
+#define UR_REG_LM_IDX 0x020
+#define UR_REG_LM_POST_MOD 0x010
+#define UR_REG_LM_POST_MOD_DEC 0x001
+#define UR_REG_LM_IDX_MAX 0xf
#define UR_REG_NN 0x280
#define UR_REG_NO_DST 0x300
#define UR_REG_IMM UR_REG_NO_DST
#define UR_REG_IMM_encode(x) (UR_REG_IMM | (x))
#define UR_REG_IMM_MAX 0x0ffULL
-#define OP_BR_BASE 0x0d800000020ULL
-#define OP_BR_BASE_MASK 0x0f8000c3ce0ULL
-#define OP_BR_MASK 0x0000000001fULL
-#define OP_BR_EV_PIP 0x00000000300ULL
-#define OP_BR_CSS 0x0000003c000ULL
-#define OP_BR_DEFBR 0x00000300000ULL
-#define OP_BR_ADDR_LO 0x007ffc00000ULL
-#define OP_BR_ADDR_HI 0x10000000000ULL
+#define OP_BR_BASE 0x0d800000020ULL
+#define OP_BR_BASE_MASK 0x0f8000c3ce0ULL
+#define OP_BR_MASK 0x0000000001fULL
+#define OP_BR_EV_PIP 0x00000000300ULL
+#define OP_BR_CSS 0x0000003c000ULL
+#define OP_BR_DEFBR 0x00000300000ULL
+#define OP_BR_ADDR_LO 0x007ffc00000ULL
+#define OP_BR_ADDR_HI 0x10000000000ULL
#define nfp_is_br(_insn) \
(((_insn) & OP_BR_BASE_MASK) == OP_BR_BASE)
@@ -82,30 +92,33 @@ enum br_ctx_signal_state {
BR_CSS_NONE = 2,
};
-#define OP_BBYTE_BASE 0x0c800000000ULL
-#define OP_BB_A_SRC 0x000000000ffULL
-#define OP_BB_BYTE 0x00000000300ULL
-#define OP_BB_B_SRC 0x0000003fc00ULL
-#define OP_BB_I8 0x00000040000ULL
-#define OP_BB_EQ 0x00000080000ULL
-#define OP_BB_DEFBR 0x00000300000ULL
-#define OP_BB_ADDR_LO 0x007ffc00000ULL
-#define OP_BB_ADDR_HI 0x10000000000ULL
-
-#define OP_BALU_BASE 0x0e800000000ULL
-#define OP_BA_A_SRC 0x000000003ffULL
-#define OP_BA_B_SRC 0x000000ffc00ULL
-#define OP_BA_DEFBR 0x00000300000ULL
-#define OP_BA_ADDR_HI 0x0007fc00000ULL
-
-#define OP_IMMED_A_SRC 0x000000003ffULL
-#define OP_IMMED_B_SRC 0x000000ffc00ULL
-#define OP_IMMED_IMM 0x0000ff00000ULL
-#define OP_IMMED_WIDTH 0x00060000000ULL
-#define OP_IMMED_INV 0x00080000000ULL
-#define OP_IMMED_SHIFT 0x00600000000ULL
-#define OP_IMMED_BASE 0x0f000000000ULL
-#define OP_IMMED_WR_AB 0x20000000000ULL
+#define OP_BBYTE_BASE 0x0c800000000ULL
+#define OP_BB_A_SRC 0x000000000ffULL
+#define OP_BB_BYTE 0x00000000300ULL
+#define OP_BB_B_SRC 0x0000003fc00ULL
+#define OP_BB_I8 0x00000040000ULL
+#define OP_BB_EQ 0x00000080000ULL
+#define OP_BB_DEFBR 0x00000300000ULL
+#define OP_BB_ADDR_LO 0x007ffc00000ULL
+#define OP_BB_ADDR_HI 0x10000000000ULL
+#define OP_BB_SRC_LMEXTN 0x40000000000ULL
+
+#define OP_BALU_BASE 0x0e800000000ULL
+#define OP_BA_A_SRC 0x000000003ffULL
+#define OP_BA_B_SRC 0x000000ffc00ULL
+#define OP_BA_DEFBR 0x00000300000ULL
+#define OP_BA_ADDR_HI 0x0007fc00000ULL
+
+#define OP_IMMED_A_SRC 0x000000003ffULL
+#define OP_IMMED_B_SRC 0x000000ffc00ULL
+#define OP_IMMED_IMM 0x0000ff00000ULL
+#define OP_IMMED_WIDTH 0x00060000000ULL
+#define OP_IMMED_INV 0x00080000000ULL
+#define OP_IMMED_SHIFT 0x00600000000ULL
+#define OP_IMMED_BASE 0x0f000000000ULL
+#define OP_IMMED_WR_AB 0x20000000000ULL
+#define OP_IMMED_SRC_LMEXTN 0x40000000000ULL
+#define OP_IMMED_DST_LMEXTN 0x80000000000ULL
enum immed_width {
IMMED_WIDTH_ALL = 0,
@@ -119,17 +132,19 @@ enum immed_shift {
IMMED_SHIFT_2B = 2,
};
-#define OP_SHF_BASE 0x08000000000ULL
-#define OP_SHF_A_SRC 0x000000000ffULL
-#define OP_SHF_SC 0x00000000300ULL
-#define OP_SHF_B_SRC 0x0000003fc00ULL
-#define OP_SHF_I8 0x00000040000ULL
-#define OP_SHF_SW 0x00000080000ULL
-#define OP_SHF_DST 0x0000ff00000ULL
-#define OP_SHF_SHIFT 0x001f0000000ULL
-#define OP_SHF_OP 0x00e00000000ULL
-#define OP_SHF_DST_AB 0x01000000000ULL
-#define OP_SHF_WR_AB 0x20000000000ULL
+#define OP_SHF_BASE 0x08000000000ULL
+#define OP_SHF_A_SRC 0x000000000ffULL
+#define OP_SHF_SC 0x00000000300ULL
+#define OP_SHF_B_SRC 0x0000003fc00ULL
+#define OP_SHF_I8 0x00000040000ULL
+#define OP_SHF_SW 0x00000080000ULL
+#define OP_SHF_DST 0x0000ff00000ULL
+#define OP_SHF_SHIFT 0x001f0000000ULL
+#define OP_SHF_OP 0x00e00000000ULL
+#define OP_SHF_DST_AB 0x01000000000ULL
+#define OP_SHF_WR_AB 0x20000000000ULL
+#define OP_SHF_SRC_LMEXTN 0x40000000000ULL
+#define OP_SHF_DST_LMEXTN 0x80000000000ULL
enum shf_op {
SHF_OP_NONE = 0,
@@ -139,19 +154,22 @@ enum shf_op {
enum shf_sc {
SHF_SC_R_ROT = 0,
+ SHF_SC_NONE = SHF_SC_R_ROT,
SHF_SC_R_SHF = 1,
SHF_SC_L_SHF = 2,
SHF_SC_R_DSHF = 3,
};
-#define OP_ALU_A_SRC 0x000000003ffULL
-#define OP_ALU_B_SRC 0x000000ffc00ULL
-#define OP_ALU_DST 0x0003ff00000ULL
-#define OP_ALU_SW 0x00040000000ULL
-#define OP_ALU_OP 0x00f80000000ULL
-#define OP_ALU_DST_AB 0x01000000000ULL
-#define OP_ALU_BASE 0x0a000000000ULL
-#define OP_ALU_WR_AB 0x20000000000ULL
+#define OP_ALU_A_SRC 0x000000003ffULL
+#define OP_ALU_B_SRC 0x000000ffc00ULL
+#define OP_ALU_DST 0x0003ff00000ULL
+#define OP_ALU_SW 0x00040000000ULL
+#define OP_ALU_OP 0x00f80000000ULL
+#define OP_ALU_DST_AB 0x01000000000ULL
+#define OP_ALU_BASE 0x0a000000000ULL
+#define OP_ALU_WR_AB 0x20000000000ULL
+#define OP_ALU_SRC_LMEXTN 0x40000000000ULL
+#define OP_ALU_DST_LMEXTN 0x80000000000ULL
enum alu_op {
ALU_OP_NONE = 0x00,
@@ -170,26 +188,28 @@ enum alu_dst_ab {
ALU_DST_B = 1,
};
-#define OP_LDF_BASE 0x0c000000000ULL
-#define OP_LDF_A_SRC 0x000000000ffULL
-#define OP_LDF_SC 0x00000000300ULL
-#define OP_LDF_B_SRC 0x0000003fc00ULL
-#define OP_LDF_I8 0x00000040000ULL
-#define OP_LDF_SW 0x00000080000ULL
-#define OP_LDF_ZF 0x00000100000ULL
-#define OP_LDF_BMASK 0x0000f000000ULL
-#define OP_LDF_SHF 0x001f0000000ULL
-#define OP_LDF_WR_AB 0x20000000000ULL
-
-#define OP_CMD_A_SRC 0x000000000ffULL
-#define OP_CMD_CTX 0x00000000300ULL
-#define OP_CMD_B_SRC 0x0000003fc00ULL
-#define OP_CMD_TOKEN 0x000000c0000ULL
-#define OP_CMD_XFER 0x00001f00000ULL
-#define OP_CMD_CNT 0x0000e000000ULL
-#define OP_CMD_SIG 0x000f0000000ULL
-#define OP_CMD_TGT_CMD 0x07f00000000ULL
-#define OP_CMD_MODE 0x1c0000000000ULL
+#define OP_LDF_BASE 0x0c000000000ULL
+#define OP_LDF_A_SRC 0x000000000ffULL
+#define OP_LDF_SC 0x00000000300ULL
+#define OP_LDF_B_SRC 0x0000003fc00ULL
+#define OP_LDF_I8 0x00000040000ULL
+#define OP_LDF_SW 0x00000080000ULL
+#define OP_LDF_ZF 0x00000100000ULL
+#define OP_LDF_BMASK 0x0000f000000ULL
+#define OP_LDF_SHF 0x001f0000000ULL
+#define OP_LDF_WR_AB 0x20000000000ULL
+#define OP_LDF_SRC_LMEXTN 0x40000000000ULL
+#define OP_LDF_DST_LMEXTN 0x80000000000ULL
+
+#define OP_CMD_A_SRC 0x000000000ffULL
+#define OP_CMD_CTX 0x00000000300ULL
+#define OP_CMD_B_SRC 0x0000003fc00ULL
+#define OP_CMD_TOKEN 0x000000c0000ULL
+#define OP_CMD_XFER 0x00001f00000ULL
+#define OP_CMD_CNT 0x0000e000000ULL
+#define OP_CMD_SIG 0x000f0000000ULL
+#define OP_CMD_TGT_CMD 0x07f00000000ULL
+#define OP_CMD_MODE 0x1c0000000000ULL
struct cmd_tgt_act {
u8 token;
@@ -198,12 +218,17 @@ struct cmd_tgt_act {
enum cmd_tgt_map {
CMD_TGT_READ8,
- CMD_TGT_WRITE8,
+ CMD_TGT_WRITE8_SWAP,
+ CMD_TGT_READ32,
+ CMD_TGT_READ32_LE,
+ CMD_TGT_READ32_SWAP,
CMD_TGT_READ_LE,
CMD_TGT_READ_SWAP_LE,
__CMD_TGT_MAP_SIZE,
};
+extern const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE];
+
enum cmd_mode {
CMD_MODE_40b_AB = 0,
CMD_MODE_40b_BA = 1,
@@ -215,11 +240,13 @@ enum cmd_ctx_swap {
CMD_CTX_NO_SWAP = 3,
};
-#define OP_LCSR_BASE 0x0fc00000000ULL
-#define OP_LCSR_A_SRC 0x000000003ffULL
-#define OP_LCSR_B_SRC 0x000000ffc00ULL
-#define OP_LCSR_WRITE 0x00000200000ULL
-#define OP_LCSR_ADDR 0x001ffc00000ULL
+#define OP_LCSR_BASE 0x0fc00000000ULL
+#define OP_LCSR_A_SRC 0x000000003ffULL
+#define OP_LCSR_B_SRC 0x000000ffc00ULL
+#define OP_LCSR_WRITE 0x00000200000ULL
+#define OP_LCSR_ADDR 0x001ffc00000ULL
+#define OP_LCSR_SRC_LMEXTN 0x40000000000ULL
+#define OP_LCSR_DST_LMEXTN 0x80000000000ULL
enum lcsr_wr_src {
LCSR_WR_AREG,
@@ -227,7 +254,122 @@ enum lcsr_wr_src {
LCSR_WR_IMM,
};
-#define OP_CARB_BASE 0x0e000000000ULL
-#define OP_CARB_OR 0x00000010000ULL
+#define OP_CARB_BASE 0x0e000000000ULL
+#define OP_CARB_OR 0x00000010000ULL
+
+/* Software register representation, independent of operand type */
+#define NN_REG_TYPE GENMASK(31, 24)
+#define NN_REG_LM_IDX GENMASK(23, 22)
+#define NN_REG_LM_IDX_HI BIT(23)
+#define NN_REG_LM_IDX_LO BIT(22)
+#define NN_REG_LM_MOD GENMASK(21, 20)
+#define NN_REG_VAL GENMASK(7, 0)
+
+enum nfp_bpf_reg_type {
+ NN_REG_GPR_A = BIT(0),
+ NN_REG_GPR_B = BIT(1),
+ NN_REG_GPR_BOTH = NN_REG_GPR_A | NN_REG_GPR_B,
+ NN_REG_NNR = BIT(2),
+ NN_REG_XFER = BIT(3),
+ NN_REG_IMM = BIT(4),
+ NN_REG_NONE = BIT(5),
+ NN_REG_LMEM = BIT(6),
+};
+
+enum nfp_bpf_lm_mode {
+ NN_LM_MOD_NONE = 0,
+ NN_LM_MOD_INC,
+ NN_LM_MOD_DEC,
+};
+
+#define reg_both(x) __enc_swreg((x), NN_REG_GPR_BOTH)
+#define reg_a(x) __enc_swreg((x), NN_REG_GPR_A)
+#define reg_b(x) __enc_swreg((x), NN_REG_GPR_B)
+#define reg_nnr(x) __enc_swreg((x), NN_REG_NNR)
+#define reg_xfer(x) __enc_swreg((x), NN_REG_XFER)
+#define reg_imm(x) __enc_swreg((x), NN_REG_IMM)
+#define reg_none() __enc_swreg(0, NN_REG_NONE)
+#define reg_lm(x, off) __enc_swreg_lm((x), NN_LM_MOD_NONE, (off))
+#define reg_lm_inc(x) __enc_swreg_lm((x), NN_LM_MOD_INC, 0)
+#define reg_lm_dec(x) __enc_swreg_lm((x), NN_LM_MOD_DEC, 0)
+#define __reg_lm(x, mod, off) __enc_swreg_lm((x), (mod), (off))
+
+typedef __u32 __bitwise swreg;
+
+static inline swreg __enc_swreg(u16 id, u8 type)
+{
+ return (__force swreg)(id | FIELD_PREP(NN_REG_TYPE, type));
+}
+
+static inline swreg __enc_swreg_lm(u8 id, enum nfp_bpf_lm_mode mode, u8 off)
+{
+ WARN_ON(id > 3 || (off && mode != NN_LM_MOD_NONE));
+
+ return (__force swreg)(FIELD_PREP(NN_REG_TYPE, NN_REG_LMEM) |
+ FIELD_PREP(NN_REG_LM_IDX, id) |
+ FIELD_PREP(NN_REG_LM_MOD, mode) |
+ off);
+}
+
+static inline u32 swreg_raw(swreg reg)
+{
+ return (__force u32)reg;
+}
+
+static inline enum nfp_bpf_reg_type swreg_type(swreg reg)
+{
+ return FIELD_GET(NN_REG_TYPE, swreg_raw(reg));
+}
+
+static inline u16 swreg_value(swreg reg)
+{
+ return FIELD_GET(NN_REG_VAL, swreg_raw(reg));
+}
+
+static inline bool swreg_lm_idx(swreg reg)
+{
+ return FIELD_GET(NN_REG_LM_IDX_LO, swreg_raw(reg));
+}
+
+static inline bool swreg_lmextn(swreg reg)
+{
+ return FIELD_GET(NN_REG_LM_IDX_HI, swreg_raw(reg));
+}
+
+static inline enum nfp_bpf_lm_mode swreg_lm_mode(swreg reg)
+{
+ return FIELD_GET(NN_REG_LM_MOD, swreg_raw(reg));
+}
+
+struct nfp_insn_ur_regs {
+ enum alu_dst_ab dst_ab;
+ u16 dst;
+ u16 areg, breg;
+ bool swap;
+ bool wr_both;
+ bool dst_lmextn;
+ bool src_lmextn;
+};
+
+struct nfp_insn_re_regs {
+ enum alu_dst_ab dst_ab;
+ u8 dst;
+ u8 areg, breg;
+ bool swap;
+ bool wr_both;
+ bool i8;
+ bool dst_lmextn;
+ bool src_lmextn;
+};
+
+int swreg_to_unrestricted(swreg dst, swreg lreg, swreg rreg,
+ struct nfp_insn_ur_regs *reg);
+int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg,
+ struct nfp_insn_re_regs *reg, bool has_imm8);
+
+#define NFP_USTORE_PREFETCH_WINDOW 8
+
+int nfp_ustore_check_valid_no_ecc(u64 insn);
+u64 nfp_ustore_calc_ecc_insn(u64 insn);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 1c0187f0af51..d2f73feb8497 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1574,26 +1574,6 @@ nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
return true;
}
-static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, void *hard_start,
- unsigned int *off, unsigned int *len)
-{
- struct xdp_buff xdp;
- void *orig_data;
- int ret;
-
- xdp.data_hard_start = hard_start;
- xdp.data = data + *off;
- xdp.data_end = data + *off + *len;
-
- orig_data = xdp.data;
- ret = bpf_prog_run_xdp(prog, &xdp);
-
- *len -= xdp.data - orig_data;
- *off += xdp.data - orig_data;
-
- return ret;
-}
-
/**
* nfp_net_rx() - receive up to @budget packets on @rx_ring
* @rx_ring: RX ring to receive from
@@ -1629,6 +1609,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
struct nfp_meta_parsed meta;
struct net_device *netdev;
dma_addr_t new_dma_addr;
+ u32 meta_len_xdp = 0;
void *new_frag;
idx = D_IDX(rx_ring, rx_ring->rd_p);
@@ -1707,16 +1688,24 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF &&
dp->bpf_offload_xdp) && !meta.portid) {
+ void *orig_data = rxbuf->frag + pkt_off;
unsigned int dma_off;
- void *hard_start;
+ struct xdp_buff xdp;
int act;
- hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM;
+ xdp.data_hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM;
+ xdp.data = orig_data;
+ xdp.data_meta = orig_data;
+ xdp.data_end = orig_data + pkt_len;
+
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
+ pkt_len -= xdp.data - orig_data;
+ pkt_off += xdp.data - orig_data;
- act = nfp_net_run_xdp(xdp_prog, rxbuf->frag, hard_start,
- &pkt_off, &pkt_len);
switch (act) {
case XDP_PASS:
+ meta_len_xdp = xdp.data - xdp.data_meta;
break;
case XDP_TX:
dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM;
@@ -1784,6 +1773,8 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
le16_to_cpu(rxd->rxd.vlan));
+ if (meta_len_xdp)
+ skb_metadata_set(skb, meta_len_xdp);
napi_gro_receive(&rx_ring->r_vec->napi, skb);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index b0a452ba9039..782d452e0fc2 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -255,7 +255,7 @@
* @NFP_NET_CFG_BPF_ADDR: DMA address of the buffer with JITed BPF code
*/
#define NFP_NET_CFG_BPF_ABI 0x0080
-#define NFP_NET_BPF_ABI 1
+#define NFP_NET_BPF_ABI 2
#define NFP_NET_CFG_BPF_CAP 0x0081
#define NFP_NET_BPF_CAP_RELO (1 << 0) /* seamless reload */
#define NFP_NET_CFG_BPF_MAX_LEN 0x0082