summaryrefslogtreecommitdiff
path: root/block/blk-mq-tag.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-mq-tag.c')
-rw-r--r--block/blk-mq-tag.c12
1 files changed, 8 insertions, 4 deletions
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 426197312069..cc57e2dd9a0b 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -40,16 +40,20 @@ void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
unsigned int users;
struct blk_mq_tags *tags = hctx->tags;
+ /*
+ * calling test_bit() prior to test_and_set_bit() is intentional,
+ * it avoids dirtying the cacheline if the queue is already active.
+ */
if (blk_mq_is_shared_tags(hctx->flags)) {
struct request_queue *q = hctx->queue;
- if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
+ if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) ||
+ test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
return;
- set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags);
} else {
- if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
+ if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) ||
+ test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
return;
- set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state);
}
spin_lock_irq(&tags->lock);