summaryrefslogtreecommitdiff
path: root/kernel/bpf/task_iter.c
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2024-07-31 11:49:04 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2024-10-07 13:34:41 -0400
commit8fd3395ec9051a52828fcca2328cb50a69dea8ef (patch)
treed68b2a86f57af48fbece1070a3d3a09fc16a7521 /kernel/bpf/task_iter.c
parent8cf0b93919e13d1e8d4466eb4080a4c4d9d66d7b (diff)
get rid of ...lookup...fdget_rcu() family
Once upon a time, predecessors of those used to do file lookup without bumping a refcount, provided that caller held rcu_read_lock() across the lookup and whatever it wanted to read from the struct file found. When struct file allocation switched to SLAB_TYPESAFE_BY_RCU, that stopped being feasible and these primitives started to bump the file refcount for lookup result, requiring the caller to call fput() afterwards. But that turned them pointless - e.g. rcu_read_lock(); file = lookup_fdget_rcu(fd); rcu_read_unlock(); is equivalent to file = fget_raw(fd); and all callers of lookup_fdget_rcu() are of that form. Similarly, task_lookup_fdget_rcu() calls can be replaced with calling fget_task(). task_lookup_next_fdget_rcu() doesn't have direct counterparts, but its callers would be happier if we replaced it with an analogue that deals with RCU internally. Reviewed-by: Christian Brauner <brauner@kernel.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'kernel/bpf/task_iter.c')
-rw-r--r--kernel/bpf/task_iter.c6
1 files changed, 1 insertions, 5 deletions
diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c
index 02aa9db8d796..7fe602ca74a0 100644
--- a/kernel/bpf/task_iter.c
+++ b/kernel/bpf/task_iter.c
@@ -5,7 +5,6 @@
#include <linux/namei.h>
#include <linux/pid_namespace.h>
#include <linux/fs.h>
-#include <linux/fdtable.h>
#include <linux/filter.h>
#include <linux/bpf_mem_alloc.h>
#include <linux/btf_ids.h>
@@ -286,17 +285,14 @@ again:
curr_fd = 0;
}
- rcu_read_lock();
- f = task_lookup_next_fdget_rcu(curr_task, &curr_fd);
+ f = fget_task_next(curr_task, &curr_fd);
if (f) {
/* set info->fd */
info->fd = curr_fd;
- rcu_read_unlock();
return f;
}
/* the current task is done, go to the next task */
- rcu_read_unlock();
put_task_struct(curr_task);
if (info->common.type == BPF_TASK_ITER_TID) {