summaryrefslogtreecommitdiff
path: root/fs/file.c
diff options
context:
space:
mode:
authorJann Horn <jannh@google.com>2022-10-31 18:52:56 +0100
committerAl Viro <viro@zeniv.linux.org.uk>2022-10-31 15:30:11 -0400
commit7ee47dcfff1835ff75a794d1075b6b5f5462cfed (patch)
tree42d8767ff75c33525b7bc0ba0a05e112b7958200 /fs/file.c
parent9abf2313adc1ca1b6180c508c25f22f9395cc780 (diff)
fs: use acquire ordering in __fget_light()
We must prevent the CPU from reordering the files->count read with the FD table access like this, on architectures where read-read reordering is possible: files_lookup_fd_raw() close_fd() put_files_struct() atomic_read(&files->count) I would like to mark this for stable, but the stable rules explicitly say "no theoretical races", and given that the FD table pointer and files->count are explicitly stored in the same cacheline, this sort of reordering seems quite unlikely in practice... Signed-off-by: Jann Horn <jannh@google.com> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/file.c')
-rw-r--r--fs/file.c11
1 files changed, 10 insertions, 1 deletions
diff --git a/fs/file.c b/fs/file.c
index 5f9c802a5d8d..c942c89ca4cd 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -1003,7 +1003,16 @@ static unsigned long __fget_light(unsigned int fd, fmode_t mask)
struct files_struct *files = current->files;
struct file *file;
- if (atomic_read(&files->count) == 1) {
+ /*
+ * If another thread is concurrently calling close_fd() followed
+ * by put_files_struct(), we must not observe the old table
+ * entry combined with the new refcount - otherwise we could
+ * return a file that is concurrently being freed.
+ *
+ * atomic_read_acquire() pairs with atomic_dec_and_test() in
+ * put_files_struct().
+ */
+ if (atomic_read_acquire(&files->count) == 1) {
file = files_lookup_fd_raw(files, fd);
if (!file || unlikely(file->f_mode & mask))
return 0;