summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorJohn Ogness <john.ogness@linutronix.de>2020-06-19 17:23:18 +0206
committerJens Axboe <axboe@kernel.dk>2020-07-16 10:22:15 -0600
commitab96bbab467c884ad684c5f669c91272a0455087 (patch)
tree966c1f220ecc02124e43ec877d6808304c8f0e4e /block
parenta43f085f8720c4f705b86e7432d19ef955750b36 (diff)
block: remove retry loop in ioc_release_fn()
The reverse-order double lock dance in ioc_release_fn() is using a retry loop. This is a problem on PREEMPT_RT because it could preempt the task that would release q->queue_lock and thus live lock in the retry loop. RCU is already managing the freeing of the request queue and icq. If the trylock fails, use RCU to guarantee that the request queue and icq are not freed and re-acquire the locks in the correct order, allowing forward progress. Signed-off-by: John Ogness <john.ogness@linutronix.de> Reviewed-by: Daniel Wagner <dwagner@suse.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-ioc.c20
1 files changed, 17 insertions, 3 deletions
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 5dbcfa1b872e..57299f860d41 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -107,9 +107,23 @@ static void ioc_release_fn(struct work_struct *work)
ioc_destroy_icq(icq);
spin_unlock(&q->queue_lock);
} else {
- spin_unlock_irq(&ioc->lock);
- cpu_relax();
- spin_lock_irq(&ioc->lock);
+ /* Make sure q and icq cannot be freed. */
+ rcu_read_lock();
+
+ /* Re-acquire the locks in the correct order. */
+ spin_unlock(&ioc->lock);
+ spin_lock(&q->queue_lock);
+ spin_lock(&ioc->lock);
+
+ /*
+ * The icq may have been destroyed when the ioc lock
+ * was released.
+ */
+ if (!(icq->flags & ICQ_DESTROYED))
+ ioc_destroy_icq(icq);
+
+ spin_unlock(&q->queue_lock);
+ rcu_read_unlock();
}
}