summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-03-06 14:55:27 -0600
committerLinus Torvalds <torvalds@linux-foundation.org>2020-03-06 14:55:27 -0600
commit0b25d458035d0ca6502e678874e2ccb2fa2ddc23 (patch)
treebe031839fcdb58b4c9388864804635d23b8d2a4d /fs
parentae24a21bbd96a7a4a3e82890285255d7e86540f0 (diff)
parent6d390e4b5d48ec03bb87e63cf0a2bff5f4e116da (diff)
Merge tag 'filelock-v5.6-1' of git://git.kernel.org/pub/scm/linux/kernel/git/jlayton/linux
Pull file locking fixes from Jeff Layton: "Just a couple of late-breaking patches for the file locking code. The second patch (from yangerkun) fixes a rather nasty looking potential use-after-free that should go to stable. The other patch could technically wait for 5.7, but it's fairly innocuous so I figured we might as well take it" * tag 'filelock-v5.6-1' of git://git.kernel.org/pub/scm/linux/kernel/git/jlayton/linux: locks: fix a potential use-after-free problem when wakeup a waiter fcntl: Distribute switch variables for initialization
Diffstat (limited to 'fs')
-rw-r--r--fs/fcntl.c6
-rw-r--r--fs/locks.c14
2 files changed, 4 insertions, 16 deletions
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 9bc167562ee8..2e4c0fa2074b 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -735,8 +735,9 @@ static void send_sigio_to_task(struct task_struct *p,
return;
switch (signum) {
- kernel_siginfo_t si;
- default:
+ default: {
+ kernel_siginfo_t si;
+
/* Queue a rt signal with the appropriate fd as its
value. We use SI_SIGIO as the source, not
SI_KERNEL, since kernel signals always get
@@ -769,6 +770,7 @@ static void send_sigio_to_task(struct task_struct *p,
si.si_fd = fd;
if (!do_send_sig_info(signum, &si, p, type))
break;
+ }
/* fall-through - fall back on the old plain SIGIO signal */
case 0:
do_send_sig_info(SIGIO, SEND_SIG_PRIV, p, type);
diff --git a/fs/locks.c b/fs/locks.c
index 44b6da032842..426b55d333d5 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -753,20 +753,6 @@ int locks_delete_block(struct file_lock *waiter)
{
int status = -ENOENT;
- /*
- * If fl_blocker is NULL, it won't be set again as this thread
- * "owns" the lock and is the only one that might try to claim
- * the lock. So it is safe to test fl_blocker locklessly.
- * Also if fl_blocker is NULL, this waiter is not listed on
- * fl_blocked_requests for some lock, so no other request can
- * be added to the list of fl_blocked_requests for this
- * request. So if fl_blocker is NULL, it is safe to
- * locklessly check if fl_blocked_requests is empty. If both
- * of these checks succeed, there is no need to take the lock.
- */
- if (waiter->fl_blocker == NULL &&
- list_empty(&waiter->fl_blocked_requests))
- return status;
spin_lock(&blocked_lock_lock);
if (waiter->fl_blocker)
status = 0;