diff options
author | Jordan Rife <jordan@jrife.io> | 2025-07-14 11:09:08 -0700 |
---|---|---|
committer | Martin KaFai Lau <martin.lau@kernel.org> | 2025-07-14 12:09:09 -0700 |
commit | efeb820951ebf3778830256496ff72d00d135310 (patch) | |
tree | b34189d9a124289cb571db3d17a82167fb794109 /net/ipv4/tcp_ipv4.c | |
parent | e25ab9b874a4bd8c6e3e5ce66cbe8a1dd4096e2e (diff) |
bpf: tcp: Use bpf_tcp_iter_batch_item for bpf_tcp_iter_state batch items
Prepare for the next patch that tracks cookies between iterations by
converting struct sock **batch to union bpf_tcp_iter_batch_item *batch
inside struct bpf_tcp_iter_state.
Signed-off-by: Jordan Rife <jordan@jrife.io>
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
Acked-by: Stanislav Fomichev <sdf@fomichev.me>
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 24 |
1 files changed, 14 insertions, 10 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 1c88b537109f..28062e292d8b 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -3014,12 +3014,16 @@ out: } #ifdef CONFIG_BPF_SYSCALL +union bpf_tcp_iter_batch_item { + struct sock *sk; +}; + struct bpf_tcp_iter_state { struct tcp_iter_state state; unsigned int cur_sk; unsigned int end_sk; unsigned int max_sk; - struct sock **batch; + union bpf_tcp_iter_batch_item *batch; }; struct bpf_iter__tcp { @@ -3045,13 +3049,13 @@ static void bpf_iter_tcp_put_batch(struct bpf_tcp_iter_state *iter) unsigned int cur_sk = iter->cur_sk; while (cur_sk < iter->end_sk) - sock_gen_put(iter->batch[cur_sk++]); + sock_gen_put(iter->batch[cur_sk++].sk); } static int bpf_iter_tcp_realloc_batch(struct bpf_tcp_iter_state *iter, unsigned int new_batch_sz, gfp_t flags) { - struct sock **new_batch; + union bpf_tcp_iter_batch_item *new_batch; new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz, flags | __GFP_NOWARN); @@ -3075,7 +3079,7 @@ static unsigned int bpf_iter_tcp_listening_batch(struct seq_file *seq, struct sock *sk; sock_hold(*start_sk); - iter->batch[iter->end_sk++] = *start_sk; + iter->batch[iter->end_sk++].sk = *start_sk; sk = sk_nulls_next(*start_sk); *start_sk = NULL; @@ -3083,7 +3087,7 @@ static unsigned int bpf_iter_tcp_listening_batch(struct seq_file *seq, if (seq_sk_match(seq, sk)) { if (iter->end_sk < iter->max_sk) { sock_hold(sk); - iter->batch[iter->end_sk++] = sk; + iter->batch[iter->end_sk++].sk = sk; } else if (!*start_sk) { /* Remember where we left off. */ *start_sk = sk; @@ -3104,7 +3108,7 @@ static unsigned int bpf_iter_tcp_established_batch(struct seq_file *seq, struct sock *sk; sock_hold(*start_sk); - iter->batch[iter->end_sk++] = *start_sk; + iter->batch[iter->end_sk++].sk = *start_sk; sk = sk_nulls_next(*start_sk); *start_sk = NULL; @@ -3112,7 +3116,7 @@ static unsigned int bpf_iter_tcp_established_batch(struct seq_file *seq, if (seq_sk_match(seq, sk)) { if (iter->end_sk < iter->max_sk) { sock_hold(sk); - iter->batch[iter->end_sk++] = sk; + iter->batch[iter->end_sk++].sk = sk; } else if (!*start_sk) { /* Remember where we left off. */ *start_sk = sk; @@ -3216,7 +3220,7 @@ static struct sock *bpf_iter_tcp_batch(struct seq_file *seq) WARN_ON_ONCE(iter->end_sk != expected); done: bpf_iter_tcp_unlock_bucket(seq); - return iter->batch[0]; + return iter->batch[0].sk; } static void *bpf_iter_tcp_seq_start(struct seq_file *seq, loff_t *pos) @@ -3251,11 +3255,11 @@ static void *bpf_iter_tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos) * st->bucket. See tcp_seek_last_pos(). */ st->offset++; - sock_gen_put(iter->batch[iter->cur_sk++]); + sock_gen_put(iter->batch[iter->cur_sk++].sk); } if (iter->cur_sk < iter->end_sk) - sk = iter->batch[iter->cur_sk]; + sk = iter->batch[iter->cur_sk].sk; else sk = bpf_iter_tcp_batch(seq); |