summaryrefslogtreecommitdiff
path: root/block/blk-core.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index ee0e5cafa859..1a95272cca50 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -473,20 +473,20 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
* blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown
*
- * Mark @q DEAD, drain all pending requests, destroy and put it. All
+ * Mark @q DYING, drain all pending requests, destroy and put it. All
* future requests will be failed immediately with -ENODEV.
*/
void blk_cleanup_queue(struct request_queue *q)
{
spinlock_t *lock = q->queue_lock;
- /* mark @q DEAD, no new request or merges will be allowed afterwards */
+ /* mark @q DYING, no new request or merges will be allowed afterwards */
mutex_lock(&q->sysfs_lock);
- queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
+ queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
spin_lock_irq(lock);
/*
- * Dead queue is permanently in bypass mode till released. Note
+ * A dying queue is permanently in bypass mode till released. Note
* that, unlike blk_queue_bypass_start(), we aren't performing
* synchronize_rcu() after entering bypass mode to avoid the delay
* as some drivers create and destroy a lot of queues while
@@ -499,11 +499,11 @@ void blk_cleanup_queue(struct request_queue *q)
queue_flag_set(QUEUE_FLAG_NOMERGES, q);
queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
- queue_flag_set(QUEUE_FLAG_DEAD, q);
+ queue_flag_set(QUEUE_FLAG_DYING, q);
spin_unlock_irq(lock);
mutex_unlock(&q->sysfs_lock);
- /* drain all requests queued before DEAD marking */
+ /* drain all requests queued before DYING marking */
blk_drain_queue(q, true);
/* @q won't process any more request, flush async actions */
@@ -716,7 +716,7 @@ EXPORT_SYMBOL(blk_init_allocated_queue);
bool blk_get_queue(struct request_queue *q)
{
- if (likely(!blk_queue_dead(q))) {
+ if (likely(!blk_queue_dying(q))) {
__blk_get_queue(q);
return true;
}
@@ -870,7 +870,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
const bool is_sync = rw_is_sync(rw_flags) != 0;
int may_queue;
- if (unlikely(blk_queue_dead(q)))
+ if (unlikely(blk_queue_dying(q)))
return NULL;
may_queue = elv_may_queue(q, rw_flags);
@@ -1050,7 +1050,7 @@ retry:
if (rq)
return rq;
- if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dead(q))) {
+ if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) {
blk_put_rl(rl);
return NULL;
}
@@ -1910,7 +1910,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
return -EIO;
spin_lock_irqsave(q->queue_lock, flags);
- if (unlikely(blk_queue_dead(q))) {
+ if (unlikely(blk_queue_dying(q))) {
spin_unlock_irqrestore(q->queue_lock, flags);
return -ENODEV;
}
@@ -2885,9 +2885,9 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
trace_block_unplug(q, depth, !from_schedule);
/*
- * Don't mess with dead queue.
+ * Don't mess with a dying queue.
*/
- if (unlikely(blk_queue_dead(q))) {
+ if (unlikely(blk_queue_dying(q))) {
spin_unlock(q->queue_lock);
return;
}
@@ -2996,7 +2996,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
/*
* Short-circuit if @q is dead
*/
- if (unlikely(blk_queue_dead(q))) {
+ if (unlikely(blk_queue_dying(q))) {
__blk_end_request_all(rq, -ENODEV);
continue;
}