summaryrefslogtreecommitdiff
path: root/tools/testing/selftests/bpf/xskxceiver.c
diff options
context:
space:
mode:
authorTushar Vyavahare <tushar.vyavahare@intel.com>2023-12-14 13:00:07 +0000
committerDaniel Borkmann <daniel@iogearbox.net>2023-12-14 16:11:13 +0100
commit2e1d6a04116c373fbd25beddba4267178535bc60 (patch)
tree66ed3a51fb09661b55276001751737d0424961a8 /tools/testing/selftests/bpf/xskxceiver.c
parentc838fe1282df540ebf6e24e386ac34acb3ef3115 (diff)
selftests/xsk: Fix for SEND_RECEIVE_UNALIGNED test
Fix test broken by shared umem test and framework enhancement commit. Correct the current implementation of pkt_stream_replace_half() by ensuring that nb_valid_entries are not set to half, as this is not true for all the tests. Ensure that the expected value for valid_entries for the SEND_RECEIVE_UNALIGNED test equals the total number of packets sent, which is 4096. Create a new function called pkt_stream_pkt_set() that allows for packet modification to meet specific requirements while ensuring the accurate maintenance of the valid packet count to prevent inconsistencies in packet tracking. Fixes: 6d198a89c004 ("selftests/xsk: Add a test for shared umem feature") Reported-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com> Signed-off-by: Tushar Vyavahare <tushar.vyavahare@intel.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Reviewed-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com> Acked-by: Magnus Karlsson <magnus.karlsson@intel.com> Link: https://lore.kernel.org/bpf/20231214130007.33281-1-tushar.vyavahare@intel.com
Diffstat (limited to 'tools/testing/selftests/bpf/xskxceiver.c')
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.c25
1 files changed, 16 insertions, 9 deletions
diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c
index b604c570309a..b1102ee13faa 100644
--- a/tools/testing/selftests/bpf/xskxceiver.c
+++ b/tools/testing/selftests/bpf/xskxceiver.c
@@ -634,16 +634,24 @@ static u32 pkt_nb_frags(u32 frame_size, struct pkt_stream *pkt_stream, struct pk
return nb_frags;
}
+static bool set_pkt_valid(int offset, u32 len)
+{
+ return len <= MAX_ETH_JUMBO_SIZE;
+}
+
static void pkt_set(struct pkt_stream *pkt_stream, struct pkt *pkt, int offset, u32 len)
{
pkt->offset = offset;
pkt->len = len;
- if (len > MAX_ETH_JUMBO_SIZE) {
- pkt->valid = false;
- } else {
- pkt->valid = true;
- pkt_stream->nb_valid_entries++;
- }
+ pkt->valid = set_pkt_valid(offset, len);
+}
+
+static void pkt_stream_pkt_set(struct pkt_stream *pkt_stream, struct pkt *pkt, int offset, u32 len)
+{
+ bool prev_pkt_valid = pkt->valid;
+
+ pkt_set(pkt_stream, pkt, offset, len);
+ pkt_stream->nb_valid_entries += pkt->valid - prev_pkt_valid;
}
static u32 pkt_get_buffer_len(struct xsk_umem_info *umem, u32 len)
@@ -665,7 +673,7 @@ static struct pkt_stream *__pkt_stream_generate(u32 nb_pkts, u32 pkt_len, u32 nb
for (i = 0; i < nb_pkts; i++) {
struct pkt *pkt = &pkt_stream->pkts[i];
- pkt_set(pkt_stream, pkt, 0, pkt_len);
+ pkt_stream_pkt_set(pkt_stream, pkt, 0, pkt_len);
pkt->pkt_nb = nb_start + i * nb_off;
}
@@ -700,10 +708,9 @@ static void __pkt_stream_replace_half(struct ifobject *ifobj, u32 pkt_len,
pkt_stream = pkt_stream_clone(ifobj->xsk->pkt_stream);
for (i = 1; i < ifobj->xsk->pkt_stream->nb_pkts; i += 2)
- pkt_set(pkt_stream, &pkt_stream->pkts[i], offset, pkt_len);
+ pkt_stream_pkt_set(pkt_stream, &pkt_stream->pkts[i], offset, pkt_len);
ifobj->xsk->pkt_stream = pkt_stream;
- pkt_stream->nb_valid_entries /= 2;
}
static void pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, int offset)