summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/idr.c66
-rw-r--r--lib/nlattr.c54
-rw-r--r--lib/radix-tree.c6
-rw-r--r--lib/test_bpf.c364
4 files changed, 458 insertions, 32 deletions
diff --git a/lib/idr.c b/lib/idr.c
index b13682bb0a1c..082778cf883e 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -7,45 +7,32 @@
DEFINE_PER_CPU(struct ida_bitmap *, ida_bitmap);
static DEFINE_SPINLOCK(simple_ida_lock);
-/**
- * idr_alloc - allocate an id
- * @idr: idr handle
- * @ptr: pointer to be associated with the new id
- * @start: the minimum id (inclusive)
- * @end: the maximum id (exclusive)
- * @gfp: memory allocation flags
- *
- * Allocates an unused ID in the range [start, end). Returns -ENOSPC
- * if there are no unused IDs in that range.
- *
- * Note that @end is treated as max when <= 0. This is to always allow
- * using @start + N as @end as long as N is inside integer range.
- *
- * Simultaneous modifications to the @idr are not allowed and should be
- * prevented by the user, usually with a lock. idr_alloc() may be called
- * concurrently with read-only accesses to the @idr, such as idr_find() and
- * idr_for_each_entry().
- */
-int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
+int idr_alloc_cmn(struct idr *idr, void *ptr, unsigned long *index,
+ unsigned long start, unsigned long end, gfp_t gfp,
+ bool ext)
{
- void __rcu **slot;
struct radix_tree_iter iter;
+ void __rcu **slot;
- if (WARN_ON_ONCE(start < 0))
- return -EINVAL;
if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr)))
return -EINVAL;
radix_tree_iter_init(&iter, start);
- slot = idr_get_free(&idr->idr_rt, &iter, gfp, end);
+ if (ext)
+ slot = idr_get_free_ext(&idr->idr_rt, &iter, gfp, end);
+ else
+ slot = idr_get_free(&idr->idr_rt, &iter, gfp, end);
if (IS_ERR(slot))
return PTR_ERR(slot);
radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr);
radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE);
- return iter.index;
+
+ if (index)
+ *index = iter.index;
+ return 0;
}
-EXPORT_SYMBOL_GPL(idr_alloc);
+EXPORT_SYMBOL_GPL(idr_alloc_cmn);
/**
* idr_alloc_cyclic - allocate new idr entry in a cyclical fashion
@@ -134,6 +121,20 @@ void *idr_get_next(struct idr *idr, int *nextid)
}
EXPORT_SYMBOL(idr_get_next);
+void *idr_get_next_ext(struct idr *idr, unsigned long *nextid)
+{
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+
+ slot = radix_tree_iter_find(&idr->idr_rt, &iter, *nextid);
+ if (!slot)
+ return NULL;
+
+ *nextid = iter.index;
+ return rcu_dereference_raw(*slot);
+}
+EXPORT_SYMBOL(idr_get_next_ext);
+
/**
* idr_replace - replace pointer for given id
* @idr: idr handle
@@ -150,12 +151,19 @@ EXPORT_SYMBOL(idr_get_next);
*/
void *idr_replace(struct idr *idr, void *ptr, int id)
{
+ if (WARN_ON_ONCE(id < 0))
+ return ERR_PTR(-EINVAL);
+
+ return idr_replace_ext(idr, ptr, id);
+}
+EXPORT_SYMBOL(idr_replace);
+
+void *idr_replace_ext(struct idr *idr, void *ptr, unsigned long id)
+{
struct radix_tree_node *node;
void __rcu **slot = NULL;
void *entry;
- if (WARN_ON_ONCE(id < 0))
- return ERR_PTR(-EINVAL);
if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr)))
return ERR_PTR(-EINVAL);
@@ -167,7 +175,7 @@ void *idr_replace(struct idr *idr, void *ptr, int id)
return entry;
}
-EXPORT_SYMBOL(idr_replace);
+EXPORT_SYMBOL(idr_replace_ext);
/**
* DOC: IDA description
diff --git a/lib/nlattr.c b/lib/nlattr.c
index fb52435be42d..927c2f19f119 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -27,6 +27,30 @@ static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = {
[NLA_S64] = sizeof(s64),
};
+static int validate_nla_bitfield32(const struct nlattr *nla,
+ u32 *valid_flags_allowed)
+{
+ const struct nla_bitfield32 *bf = nla_data(nla);
+ u32 *valid_flags_mask = valid_flags_allowed;
+
+ if (!valid_flags_allowed)
+ return -EINVAL;
+
+ /*disallow invalid bit selector */
+ if (bf->selector & ~*valid_flags_mask)
+ return -EINVAL;
+
+ /*disallow invalid bit values */
+ if (bf->value & ~*valid_flags_mask)
+ return -EINVAL;
+
+ /*disallow valid bit values that are not selected*/
+ if (bf->value & ~bf->selector)
+ return -EINVAL;
+
+ return 0;
+}
+
static int validate_nla(const struct nlattr *nla, int maxtype,
const struct nla_policy *policy)
{
@@ -46,6 +70,12 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
return -ERANGE;
break;
+ case NLA_BITFIELD32:
+ if (attrlen != sizeof(struct nla_bitfield32))
+ return -ERANGE;
+
+ return validate_nla_bitfield32(nla, pt->validation_data);
+
case NLA_NUL_STRING:
if (pt->len)
minlen = min_t(int, attrlen, pt->len + 1);
@@ -272,6 +302,30 @@ size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize)
EXPORT_SYMBOL(nla_strlcpy);
/**
+ * nla_strdup - Copy string attribute payload into a newly allocated buffer
+ * @nla: attribute to copy the string from
+ * @flags: the type of memory to allocate (see kmalloc).
+ *
+ * Returns a pointer to the allocated buffer or NULL on error.
+ */
+char *nla_strdup(const struct nlattr *nla, gfp_t flags)
+{
+ size_t srclen = nla_len(nla);
+ char *src = nla_data(nla), *dst;
+
+ if (srclen > 0 && src[srclen - 1] == '\0')
+ srclen--;
+
+ dst = kmalloc(srclen + 1, flags);
+ if (dst != NULL) {
+ memcpy(dst, src, srclen);
+ dst[srclen] = '\0';
+ }
+ return dst;
+}
+EXPORT_SYMBOL(nla_strdup);
+
+/**
* nla_memcpy - Copy a netlink attribute into another memory area
* @dest: where to copy to memcpy
* @src: netlink attribute to copy from
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 3527eb364964..9717e2a50374 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -2138,13 +2138,13 @@ int ida_pre_get(struct ida *ida, gfp_t gfp)
}
EXPORT_SYMBOL(ida_pre_get);
-void __rcu **idr_get_free(struct radix_tree_root *root,
- struct radix_tree_iter *iter, gfp_t gfp, int end)
+void __rcu **idr_get_free_cmn(struct radix_tree_root *root,
+ struct radix_tree_iter *iter, gfp_t gfp,
+ unsigned long max)
{
struct radix_tree_node *node = NULL, *child;
void __rcu **slot = (void __rcu **)&root->rnode;
unsigned long maxindex, start = iter->next_index;
- unsigned long max = end > 0 ? end - 1 : INT_MAX;
unsigned int shift, offset = 0;
grow:
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index d9d5a410955c..aa8812ae6776 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -952,6 +952,32 @@ static struct bpf_test tests[] = {
{ { 2, 0 }, { 3, 1 }, { 4, MAX_K } },
},
{
+ "JGE (jt 0), test 1",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_LEN, 0),
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
+ BPF_JUMP(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 1),
+ BPF_STMT(BPF_RET | BPF_K, 1),
+ BPF_STMT(BPF_RET | BPF_K, MAX_K)
+ },
+ CLASSIC,
+ { 4, 4, 4, 3, 3 },
+ { { 2, 0 }, { 3, 1 }, { 4, 1 } },
+ },
+ {
+ "JGE (jt 0), test 2",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_LEN, 0),
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
+ BPF_JUMP(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 1),
+ BPF_STMT(BPF_RET | BPF_K, 1),
+ BPF_STMT(BPF_RET | BPF_K, MAX_K)
+ },
+ CLASSIC,
+ { 4, 4, 5, 3, 3 },
+ { { 4, 1 }, { 5, 1 }, { 6, MAX_K } },
+ },
+ {
"JGE",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_LEN, 0),
@@ -4492,6 +4518,35 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
},
+ /* BPF_JMP | BPF_JSLT | BPF_K */
+ {
+ "JMP_JSLT_K: Signed jump: if (-2 < -1) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 0xfffffffffffffffeLL),
+ BPF_JMP_IMM(BPF_JSLT, R1, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSLT_K: Signed jump: if (-1 < -1) return 0",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
+ BPF_JMP_IMM(BPF_JSLT, R1, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
/* BPF_JMP | BPF_JSGT | BPF_K */
{
"JMP_JSGT_K: Signed jump: if (-1 > -2) return 1",
@@ -4521,6 +4576,73 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
},
+ /* BPF_JMP | BPF_JSLE | BPF_K */
+ {
+ "JMP_JSLE_K: Signed jump: if (-2 <= -1) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 0xfffffffffffffffeLL),
+ BPF_JMP_IMM(BPF_JSLE, R1, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSLE_K: Signed jump: if (-1 <= -1) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
+ BPF_JMP_IMM(BPF_JSLE, R1, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSLE_K: Signed jump: value walk 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_JMP_IMM(BPF_JSLE, R1, 0, 6),
+ BPF_ALU64_IMM(BPF_SUB, R1, 1),
+ BPF_JMP_IMM(BPF_JSLE, R1, 0, 4),
+ BPF_ALU64_IMM(BPF_SUB, R1, 1),
+ BPF_JMP_IMM(BPF_JSLE, R1, 0, 2),
+ BPF_ALU64_IMM(BPF_SUB, R1, 1),
+ BPF_JMP_IMM(BPF_JSLE, R1, 0, 1),
+ BPF_EXIT_INSN(), /* bad exit */
+ BPF_ALU32_IMM(BPF_MOV, R0, 1), /* good exit */
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSLE_K: Signed jump: value walk 2",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_JMP_IMM(BPF_JSLE, R1, 0, 4),
+ BPF_ALU64_IMM(BPF_SUB, R1, 2),
+ BPF_JMP_IMM(BPF_JSLE, R1, 0, 2),
+ BPF_ALU64_IMM(BPF_SUB, R1, 2),
+ BPF_JMP_IMM(BPF_JSLE, R1, 0, 1),
+ BPF_EXIT_INSN(), /* bad exit */
+ BPF_ALU32_IMM(BPF_MOV, R0, 1), /* good exit */
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
/* BPF_JMP | BPF_JSGE | BPF_K */
{
"JMP_JSGE_K: Signed jump: if (-1 >= -2) return 1",
@@ -4617,6 +4739,35 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
},
+ /* BPF_JMP | BPF_JLT | BPF_K */
+ {
+ "JMP_JLT_K: if (2 < 3) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 2),
+ BPF_JMP_IMM(BPF_JLT, R1, 3, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JGT_K: Unsigned jump: if (1 < -1) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 1),
+ BPF_JMP_IMM(BPF_JLT, R1, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
/* BPF_JMP | BPF_JGE | BPF_K */
{
"JMP_JGE_K: if (3 >= 2) return 1",
@@ -4632,6 +4783,21 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
},
+ /* BPF_JMP | BPF_JLE | BPF_K */
+ {
+ "JMP_JLE_K: if (2 <= 3) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 2),
+ BPF_JMP_IMM(BPF_JLE, R1, 3, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
/* BPF_JMP | BPF_JGT | BPF_K jump backwards */
{
"JMP_JGT_K: if (3 > 2) return 1 (jump backwards)",
@@ -4662,6 +4828,36 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
},
+ /* BPF_JMP | BPF_JLT | BPF_K jump backwards */
+ {
+ "JMP_JGT_K: if (2 < 3) return 1 (jump backwards)",
+ .u.insns_int = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2), /* goto start */
+ BPF_ALU32_IMM(BPF_MOV, R0, 1), /* out: */
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0), /* start: */
+ BPF_LD_IMM64(R1, 2), /* note: this takes 2 insns */
+ BPF_JMP_IMM(BPF_JLT, R1, 3, -6), /* goto out */
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JLE_K: if (3 <= 3) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_JMP_IMM(BPF_JLE, R1, 3, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
/* BPF_JMP | BPF_JNE | BPF_K */
{
"JMP_JNE_K: if (3 != 2) return 1",
@@ -4752,6 +4948,37 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
},
+ /* BPF_JMP | BPF_JSLT | BPF_X */
+ {
+ "JMP_JSLT_X: Signed jump: if (-2 < -1) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, -1),
+ BPF_LD_IMM64(R2, -2),
+ BPF_JMP_REG(BPF_JSLT, R2, R1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSLT_X: Signed jump: if (-1 < -1) return 0",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_LD_IMM64(R1, -1),
+ BPF_LD_IMM64(R2, -1),
+ BPF_JMP_REG(BPF_JSLT, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
/* BPF_JMP | BPF_JSGE | BPF_X */
{
"JMP_JSGE_X: Signed jump: if (-1 >= -2) return 1",
@@ -4783,6 +5010,37 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
},
+ /* BPF_JMP | BPF_JSLE | BPF_X */
+ {
+ "JMP_JSLE_X: Signed jump: if (-2 <= -1) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, -1),
+ BPF_LD_IMM64(R2, -2),
+ BPF_JMP_REG(BPF_JSLE, R2, R1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSLE_X: Signed jump: if (-1 <= -1) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, -1),
+ BPF_LD_IMM64(R2, -1),
+ BPF_JMP_REG(BPF_JSLE, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
/* BPF_JMP | BPF_JGT | BPF_X */
{
"JMP_JGT_X: if (3 > 2) return 1",
@@ -4814,6 +5072,37 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
},
+ /* BPF_JMP | BPF_JLT | BPF_X */
+ {
+ "JMP_JLT_X: if (2 < 3) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JLT, R2, R1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JLT_X: Unsigned jump: if (1 < -1) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, -1),
+ BPF_LD_IMM64(R2, 1),
+ BPF_JMP_REG(BPF_JLT, R2, R1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
/* BPF_JMP | BPF_JGE | BPF_X */
{
"JMP_JGE_X: if (3 >= 2) return 1",
@@ -4845,6 +5134,37 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
},
+ /* BPF_JMP | BPF_JLE | BPF_X */
+ {
+ "JMP_JLE_X: if (2 <= 3) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JLE, R2, R1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JLE_X: if (3 <= 3) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 3),
+ BPF_JMP_REG(BPF_JLE, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
{
/* Mainly testing JIT + imm64 here. */
"JMP_JGE_X: ldimm64 test 1",
@@ -4890,6 +5210,50 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
},
+ {
+ "JMP_JLE_X: ldimm64 test 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JLE, R2, R1, 2),
+ BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
+ BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xeeeeeeeeU } },
+ },
+ {
+ "JMP_JLE_X: ldimm64 test 2",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JLE, R2, R1, 0),
+ BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffffU } },
+ },
+ {
+ "JMP_JLE_X: ldimm64 test 3",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JLE, R2, R1, 4),
+ BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
+ BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
/* BPF_JMP | BPF_JNE | BPF_X */
{
"JMP_JNE_X: if (3 != 2) return 1",