summaryrefslogtreecommitdiff
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2015-02-10 11:35:36 -0800
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2015-02-10 11:35:36 -0800
commit4ba24fef3eb3b142197135223b90ced2f319cd53 (patch)
treea20c125b27740ec7b4c761b11d801108e1b316b2 /block/blk-mq.c
parent47c1ffb2b6b630894e9a16442611c056ab21c057 (diff)
parent98a4a59ee31a12105a2b84f5b8b515ac2cb208ef (diff)
Merge branch 'next' into for-linus
Prepare first round of input updates for 3.20.
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c517
1 files changed, 292 insertions, 225 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index df8e1e09dd17..da1ab5641227 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -20,6 +20,7 @@
#include <linux/cache.h>
#include <linux/sched/sysctl.h>
#include <linux/delay.h>
+#include <linux/crash_dump.h>
#include <trace/events/block.h>
@@ -106,11 +107,7 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref)
wake_up_all(&q->mq_freeze_wq);
}
-/*
- * Guarantee no request is in use, so we can change any data structure of
- * the queue afterward.
- */
-void blk_mq_freeze_queue(struct request_queue *q)
+static void blk_mq_freeze_queue_start(struct request_queue *q)
{
bool freeze;
@@ -119,21 +116,26 @@ void blk_mq_freeze_queue(struct request_queue *q)
spin_unlock_irq(q->queue_lock);
if (freeze) {
- /*
- * XXX: Temporary kludge to work around SCSI blk-mq stall.
- * SCSI synchronously creates and destroys many queues
- * back-to-back during probe leading to lengthy stalls.
- * This will be fixed by keeping ->mq_usage_counter in
- * atomic mode until genhd registration, but, for now,
- * let's work around using expedited synchronization.
- */
- __percpu_ref_kill_expedited(&q->mq_usage_counter);
-
+ percpu_ref_kill(&q->mq_usage_counter);
blk_mq_run_queues(q, false);
}
+}
+
+static void blk_mq_freeze_queue_wait(struct request_queue *q)
+{
wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
}
+/*
+ * Guarantee no request is in use, so we can change any data structure of
+ * the queue afterward.
+ */
+void blk_mq_freeze_queue(struct request_queue *q)
+{
+ blk_mq_freeze_queue_start(q);
+ blk_mq_freeze_queue_wait(q);
+}
+
static void blk_mq_unfreeze_queue(struct request_queue *q)
{
bool wake;
@@ -232,9 +234,11 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
struct blk_mq_hw_ctx *hctx;
struct request *rq;
struct blk_mq_alloc_data alloc_data;
+ int ret;
- if (blk_mq_queue_enter(q))
- return NULL;
+ ret = blk_mq_queue_enter(q);
+ if (ret)
+ return ERR_PTR(ret);
ctx = blk_mq_get_ctx(q);
hctx = q->mq_ops->map_queue(q, ctx->cpu);
@@ -254,6 +258,8 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
ctx = alloc_data.ctx;
}
blk_mq_put_ctx(ctx);
+ if (!rq)
+ return ERR_PTR(-EWOULDBLOCK);
return rq;
}
EXPORT_SYMBOL(blk_mq_alloc_request);
@@ -273,39 +279,27 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
blk_mq_queue_exit(q);
}
-void blk_mq_free_request(struct request *rq)
+void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
- struct blk_mq_hw_ctx *hctx;
- struct request_queue *q = rq->q;
ctx->rq_completed[rq_is_sync(rq)]++;
-
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
__blk_mq_free_request(hctx, ctx, rq);
+
}
+EXPORT_SYMBOL_GPL(blk_mq_free_hctx_request);
-/*
- * Clone all relevant state from a request that has been put on hold in
- * the flush state machine into the preallocated flush request that hangs
- * off the request queue.
- *
- * For a driver the flush request should be invisible, that's why we are
- * impersonating the original request here.
- */
-void blk_mq_clone_flush_request(struct request *flush_rq,
- struct request *orig_rq)
+void blk_mq_free_request(struct request *rq)
{
- struct blk_mq_hw_ctx *hctx =
- orig_rq->q->mq_ops->map_queue(orig_rq->q, orig_rq->mq_ctx->cpu);
+ struct blk_mq_hw_ctx *hctx;
+ struct request_queue *q = rq->q;
- flush_rq->mq_ctx = orig_rq->mq_ctx;
- flush_rq->tag = orig_rq->tag;
- memcpy(blk_mq_rq_to_pdu(flush_rq), blk_mq_rq_to_pdu(orig_rq),
- hctx->cmd_size);
+ hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
+ blk_mq_free_hctx_request(hctx, rq);
}
+EXPORT_SYMBOL_GPL(blk_mq_free_request);
-inline void __blk_mq_end_io(struct request *rq, int error)
+inline void __blk_mq_end_request(struct request *rq, int error)
{
blk_account_io_done(rq);
@@ -317,15 +311,15 @@ inline void __blk_mq_end_io(struct request *rq, int error)
blk_mq_free_request(rq);
}
}
-EXPORT_SYMBOL(__blk_mq_end_io);
+EXPORT_SYMBOL(__blk_mq_end_request);
-void blk_mq_end_io(struct request *rq, int error)
+void blk_mq_end_request(struct request *rq, int error)
{
if (blk_update_request(rq, error, blk_rq_bytes(rq)))
BUG();
- __blk_mq_end_io(rq, error);
+ __blk_mq_end_request(rq, error);
}
-EXPORT_SYMBOL(blk_mq_end_io);
+EXPORT_SYMBOL(blk_mq_end_request);
static void __blk_mq_complete_request_remote(void *data)
{
@@ -365,7 +359,7 @@ void __blk_mq_complete_request(struct request *rq)
struct request_queue *q = rq->q;
if (!q->softirq_done_fn)
- blk_mq_end_io(rq, rq->errors);
+ blk_mq_end_request(rq, rq->errors);
else
blk_mq_ipi_complete_request(rq);
}
@@ -389,7 +383,7 @@ void blk_mq_complete_request(struct request *rq)
}
EXPORT_SYMBOL(blk_mq_complete_request);
-static void blk_mq_start_request(struct request *rq, bool last)
+void blk_mq_start_request(struct request *rq)
{
struct request_queue *q = rq->q;
@@ -426,35 +420,24 @@ static void blk_mq_start_request(struct request *rq, bool last)
*/
rq->nr_phys_segments++;
}
-
- /*
- * Flag the last request in the series so that drivers know when IO
- * should be kicked off, if they don't do it on a per-request basis.
- *
- * Note: the flag isn't the only condition drivers should do kick off.
- * If drive is busy, the last request might not have the bit set.
- */
- if (last)
- rq->cmd_flags |= REQ_END;
}
+EXPORT_SYMBOL(blk_mq_start_request);
static void __blk_mq_requeue_request(struct request *rq)
{
struct request_queue *q = rq->q;
trace_block_rq_requeue(q, rq);
- clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
-
- rq->cmd_flags &= ~REQ_END;
- if (q->dma_drain_size && blk_rq_bytes(rq))
- rq->nr_phys_segments--;
+ if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
+ if (q->dma_drain_size && blk_rq_bytes(rq))
+ rq->nr_phys_segments--;
+ }
}
void blk_mq_requeue_request(struct request *rq)
{
__blk_mq_requeue_request(rq);
- blk_clear_rq_complete(rq);
BUG_ON(blk_queued_rq(rq));
blk_mq_add_to_requeue_list(rq, true);
@@ -523,78 +506,35 @@ void blk_mq_kick_requeue_list(struct request_queue *q)
}
EXPORT_SYMBOL(blk_mq_kick_requeue_list);
-static inline bool is_flush_request(struct request *rq, unsigned int tag)
+static inline bool is_flush_request(struct request *rq,
+ struct blk_flush_queue *fq, unsigned int tag)
{
return ((rq->cmd_flags & REQ_FLUSH_SEQ) &&
- rq->q->flush_rq->tag == tag);
+ fq->flush_rq->tag == tag);
}
struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
{
struct request *rq = tags->rqs[tag];
+ /* mq_ctx of flush rq is always cloned from the corresponding req */
+ struct blk_flush_queue *fq = blk_get_flush_queue(rq->q, rq->mq_ctx);
- if (!is_flush_request(rq, tag))
+ if (!is_flush_request(rq, fq, tag))
return rq;
- return rq->q->flush_rq;
+ return fq->flush_rq;
}
EXPORT_SYMBOL(blk_mq_tag_to_rq);
struct blk_mq_timeout_data {
- struct blk_mq_hw_ctx *hctx;
- unsigned long *next;
- unsigned int *next_set;
+ unsigned long next;
+ unsigned int next_set;
};
-static void blk_mq_timeout_check(void *__data, unsigned long *free_tags)
+void blk_mq_rq_timed_out(struct request *req, bool reserved)
{
- struct blk_mq_timeout_data *data = __data;
- struct blk_mq_hw_ctx *hctx = data->hctx;
- unsigned int tag;
-
- /* It may not be in flight yet (this is where
- * the REQ_ATOMIC_STARTED flag comes in). The requests are
- * statically allocated, so we know it's always safe to access the
- * memory associated with a bit offset into ->rqs[].
- */
- tag = 0;
- do {
- struct request *rq;
-
- tag = find_next_zero_bit(free_tags, hctx->tags->nr_tags, tag);
- if (tag >= hctx->tags->nr_tags)
- break;
-
- rq = blk_mq_tag_to_rq(hctx->tags, tag++);
- if (rq->q != hctx->queue)
- continue;
- if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
- continue;
-
- blk_rq_check_expired(rq, data->next, data->next_set);
- } while (1);
-}
-
-static void blk_mq_hw_ctx_check_timeout(struct blk_mq_hw_ctx *hctx,
- unsigned long *next,
- unsigned int *next_set)
-{
- struct blk_mq_timeout_data data = {
- .hctx = hctx,
- .next = next,
- .next_set = next_set,
- };
-
- /*
- * Ask the tagging code to iterate busy requests, so we can
- * check them for timeout.
- */
- blk_mq_tag_busy_iter(hctx->tags, blk_mq_timeout_check, &data);
-}
-
-static enum blk_eh_timer_return blk_mq_rq_timed_out(struct request *rq)
-{
- struct request_queue *q = rq->q;
+ struct blk_mq_ops *ops = req->q->mq_ops;
+ enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
/*
* We know that complete is set at this point. If STARTED isn't set
@@ -605,36 +545,69 @@ static enum blk_eh_timer_return blk_mq_rq_timed_out(struct request *rq)
* we both flags will get cleared. So check here again, and ignore
* a timeout event with a request that isn't active.
*/
- if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
- return BLK_EH_NOT_HANDLED;
+ if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
+ return;
- if (!q->mq_ops->timeout)
- return BLK_EH_RESET_TIMER;
+ if (ops->timeout)
+ ret = ops->timeout(req, reserved);
+
+ switch (ret) {
+ case BLK_EH_HANDLED:
+ __blk_mq_complete_request(req);
+ break;
+ case BLK_EH_RESET_TIMER:
+ blk_add_timer(req);
+ blk_clear_rq_complete(req);
+ break;
+ case BLK_EH_NOT_HANDLED:
+ break;
+ default:
+ printk(KERN_ERR "block: bad eh return: %d\n", ret);
+ break;
+ }
+}
+
+static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
+ struct request *rq, void *priv, bool reserved)
+{
+ struct blk_mq_timeout_data *data = priv;
- return q->mq_ops->timeout(rq);
+ if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
+ return;
+
+ if (time_after_eq(jiffies, rq->deadline)) {
+ if (!blk_mark_rq_complete(rq))
+ blk_mq_rq_timed_out(rq, reserved);
+ } else if (!data->next_set || time_after(data->next, rq->deadline)) {
+ data->next = rq->deadline;
+ data->next_set = 1;
+ }
}
-static void blk_mq_rq_timer(unsigned long data)
+static void blk_mq_rq_timer(unsigned long priv)
{
- struct request_queue *q = (struct request_queue *) data;
+ struct request_queue *q = (struct request_queue *)priv;
+ struct blk_mq_timeout_data data = {
+ .next = 0,
+ .next_set = 0,
+ };
struct blk_mq_hw_ctx *hctx;
- unsigned long next = 0;
- int i, next_set = 0;
+ int i;
queue_for_each_hw_ctx(q, hctx, i) {
/*
* If not software queues are currently mapped to this
* hardware queue, there's nothing to check
*/
- if (!hctx->nr_ctx || !hctx->tags)
+ if (!blk_mq_hw_queue_mapped(hctx))
continue;
- blk_mq_hw_ctx_check_timeout(hctx, &next, &next_set);
+ blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data);
}
- if (next_set) {
- next = blk_rq_timeout(round_jiffies_up(next));
- mod_timer(&q->timeout, next);
+ if (data.next_set) {
+ data.next = blk_rq_timeout(round_jiffies_up(data.next));
+ mod_timer(&q->timeout, data.next);
} else {
queue_for_each_hw_ctx(q, hctx, i)
blk_mq_tag_idle(hctx);
@@ -725,6 +698,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
struct request_queue *q = hctx->queue;
struct request *rq;
LIST_HEAD(rq_list);
+ LIST_HEAD(driver_list);
+ struct list_head *dptr;
int queued;
WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
@@ -751,18 +726,27 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
}
/*
+ * Start off with dptr being NULL, so we start the first request
+ * immediately, even if we have more pending.
+ */
+ dptr = NULL;
+
+ /*
* Now process all the entries, sending them to the driver.
*/
queued = 0;
while (!list_empty(&rq_list)) {
+ struct blk_mq_queue_data bd;
int ret;
rq = list_first_entry(&rq_list, struct request, queuelist);
list_del_init(&rq->queuelist);
- blk_mq_start_request(rq, list_empty(&rq_list));
+ bd.rq = rq;
+ bd.list = dptr;
+ bd.last = list_empty(&rq_list);
- ret = q->mq_ops->queue_rq(hctx, rq);
+ ret = q->mq_ops->queue_rq(hctx, &bd);
switch (ret) {
case BLK_MQ_RQ_QUEUE_OK:
queued++;
@@ -775,12 +759,19 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
pr_err("blk-mq: bad return on queue: %d\n", ret);
case BLK_MQ_RQ_QUEUE_ERROR:
rq->errors = -EIO;
- blk_mq_end_io(rq, rq->errors);
+ blk_mq_end_request(rq, rq->errors);
break;
}
if (ret == BLK_MQ_RQ_QUEUE_BUSY)
break;
+
+ /*
+ * We've done the first request. If we have more than 1
+ * left in the list, set dptr to defer issue.
+ */
+ if (!dptr && rq_list.next != rq_list.prev)
+ dptr = &driver_list;
}
if (!queued)
@@ -807,10 +798,11 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
*/
static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
{
- int cpu = hctx->next_cpu;
+ if (hctx->queue->nr_hw_queues == 1)
+ return WORK_CPU_UNBOUND;
if (--hctx->next_cpu_batch <= 0) {
- int next_cpu;
+ int cpu = hctx->next_cpu, next_cpu;
next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
if (next_cpu >= nr_cpu_ids)
@@ -818,26 +810,32 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
hctx->next_cpu = next_cpu;
hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
+
+ return cpu;
}
- return cpu;
+ return hctx->next_cpu;
}
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
{
- if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
+ if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state) ||
+ !blk_mq_hw_queue_mapped(hctx)))
return;
- if (!async && cpumask_test_cpu(smp_processor_id(), hctx->cpumask))
- __blk_mq_run_hw_queue(hctx);
- else if (hctx->queue->nr_hw_queues == 1)
- kblockd_schedule_delayed_work(&hctx->run_work, 0);
- else {
- unsigned int cpu;
+ if (!async) {
+ int cpu = get_cpu();
+ if (cpumask_test_cpu(cpu, hctx->cpumask)) {
+ __blk_mq_run_hw_queue(hctx);
+ put_cpu();
+ return;
+ }
- cpu = blk_mq_hctx_next_cpu(hctx);
- kblockd_schedule_delayed_work_on(cpu, &hctx->run_work, 0);
+ put_cpu();
}
+
+ kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
+ &hctx->run_work, 0);
}
void blk_mq_run_queues(struct request_queue *q, bool async)
@@ -851,9 +849,7 @@ void blk_mq_run_queues(struct request_queue *q, bool async)
test_bit(BLK_MQ_S_STOPPED, &hctx->state))
continue;
- preempt_disable();
blk_mq_run_hw_queue(hctx, async);
- preempt_enable();
}
}
EXPORT_SYMBOL(blk_mq_run_queues);
@@ -880,9 +876,7 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
{
clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
- preempt_disable();
blk_mq_run_hw_queue(hctx, false);
- preempt_enable();
}
EXPORT_SYMBOL(blk_mq_start_hw_queue);
@@ -907,9 +901,7 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
continue;
clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
- preempt_disable();
blk_mq_run_hw_queue(hctx, async);
- preempt_enable();
}
}
EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
@@ -935,16 +927,11 @@ static void blk_mq_delay_work_fn(struct work_struct *work)
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
{
- unsigned long tmo = msecs_to_jiffies(msecs);
-
- if (hctx->queue->nr_hw_queues == 1)
- kblockd_schedule_delayed_work(&hctx->delay_work, tmo);
- else {
- unsigned int cpu;
+ if (unlikely(!blk_mq_hw_queue_mapped(hctx)))
+ return;
- cpu = blk_mq_hctx_next_cpu(hctx);
- kblockd_schedule_delayed_work_on(cpu, &hctx->delay_work, tmo);
- }
+ kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
+ &hctx->delay_work, msecs_to_jiffies(msecs));
}
EXPORT_SYMBOL(blk_mq_delay_queue);
@@ -1199,18 +1186,27 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
goto run_queue;
}
- if (is_sync) {
+ /*
+ * If the driver supports defer issued based on 'last', then
+ * queue it up like normal since we can potentially save some
+ * CPU this way.
+ */
+ if (is_sync && !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
+ struct blk_mq_queue_data bd = {
+ .rq = rq,
+ .list = NULL,
+ .last = 1
+ };
int ret;
blk_mq_bio_to_request(rq, bio);
- blk_mq_start_request(rq, true);
/*
* For OK queue, we are done. For error, kill it. Any other
* error (busy), just add it to our list as we previously
* would have done
*/
- ret = q->mq_ops->queue_rq(data.hctx, rq);
+ ret = q->mq_ops->queue_rq(data.hctx, &bd);
if (ret == BLK_MQ_RQ_QUEUE_OK)
goto done;
else {
@@ -1218,7 +1214,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
rq->errors = -EIO;
- blk_mq_end_io(rq, rq->errors);
+ blk_mq_end_request(rq, rq->errors);
goto done;
}
}
@@ -1540,6 +1536,28 @@ static int blk_mq_hctx_notify(void *data, unsigned long action,
return NOTIFY_OK;
}
+static void blk_mq_exit_hctx(struct request_queue *q,
+ struct blk_mq_tag_set *set,
+ struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
+{
+ unsigned flush_start_tag = set->queue_depth;
+
+ blk_mq_tag_idle(hctx);
+
+ if (set->ops->exit_request)
+ set->ops->exit_request(set->driver_data,
+ hctx->fq->flush_rq, hctx_idx,
+ flush_start_tag + hctx_idx);
+
+ if (set->ops->exit_hctx)
+ set->ops->exit_hctx(hctx, hctx_idx);
+
+ blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
+ blk_free_flush_queue(hctx->fq);
+ kfree(hctx->ctxs);
+ blk_mq_free_bitmap(&hctx->ctx_map);
+}
+
static void blk_mq_exit_hw_queues(struct request_queue *q,
struct blk_mq_tag_set *set, int nr_queue)
{
@@ -1549,17 +1567,8 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
queue_for_each_hw_ctx(q, hctx, i) {
if (i == nr_queue)
break;
-
- blk_mq_tag_idle(hctx);
-
- if (set->ops->exit_hctx)
- set->ops->exit_hctx(hctx, i);
-
- blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
- kfree(hctx->ctxs);
- blk_mq_free_bitmap(&hctx->ctx_map);
+ blk_mq_exit_hctx(q, set, hctx, i);
}
-
}
static void blk_mq_free_hw_queues(struct request_queue *q,
@@ -1574,53 +1583,88 @@ static void blk_mq_free_hw_queues(struct request_queue *q,
}
}
-static int blk_mq_init_hw_queues(struct request_queue *q,
- struct blk_mq_tag_set *set)
+static int blk_mq_init_hctx(struct request_queue *q,
+ struct blk_mq_tag_set *set,
+ struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
{
- struct blk_mq_hw_ctx *hctx;
- unsigned int i;
+ int node;
+ unsigned flush_start_tag = set->queue_depth;
+
+ node = hctx->numa_node;
+ if (node == NUMA_NO_NODE)
+ node = hctx->numa_node = set->numa_node;
+
+ INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
+ INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
+ spin_lock_init(&hctx->lock);
+ INIT_LIST_HEAD(&hctx->dispatch);
+ hctx->queue = q;
+ hctx->queue_num = hctx_idx;
+ hctx->flags = set->flags;
+ hctx->cmd_size = set->cmd_size;
+
+ blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
+ blk_mq_hctx_notify, hctx);
+ blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
+
+ hctx->tags = set->tags[hctx_idx];
/*
- * Initialize hardware queues
+ * Allocate space for all possible cpus to avoid allocation at
+ * runtime
*/
- queue_for_each_hw_ctx(q, hctx, i) {
- int node;
+ hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
+ GFP_KERNEL, node);
+ if (!hctx->ctxs)
+ goto unregister_cpu_notifier;
- node = hctx->numa_node;
- if (node == NUMA_NO_NODE)
- node = hctx->numa_node = set->numa_node;
+ if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
+ goto free_ctxs;
- INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
- INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
- spin_lock_init(&hctx->lock);
- INIT_LIST_HEAD(&hctx->dispatch);
- hctx->queue = q;
- hctx->queue_num = i;
- hctx->flags = set->flags;
- hctx->cmd_size = set->cmd_size;
+ hctx->nr_ctx = 0;
- blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
- blk_mq_hctx_notify, hctx);
- blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
+ if (set->ops->init_hctx &&
+ set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
+ goto free_bitmap;
- hctx->tags = set->tags[i];
+ hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
+ if (!hctx->fq)
+ goto exit_hctx;
- /*
- * Allocate space for all possible cpus to avoid allocation at
- * runtime
- */
- hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
- GFP_KERNEL, node);
- if (!hctx->ctxs)
- break;
+ if (set->ops->init_request &&
+ set->ops->init_request(set->driver_data,
+ hctx->fq->flush_rq, hctx_idx,
+ flush_start_tag + hctx_idx, node))
+ goto free_fq;
- if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
- break;
+ return 0;
- hctx->nr_ctx = 0;
+ free_fq:
+ kfree(hctx->fq);
+ exit_hctx:
+ if (set->ops->exit_hctx)
+ set->ops->exit_hctx(hctx, hctx_idx);
+ free_bitmap:
+ blk_mq_free_bitmap(&hctx->ctx_map);
+ free_ctxs:
+ kfree(hctx->ctxs);
+ unregister_cpu_notifier:
+ blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
- if (set->ops->init_hctx &&
- set->ops->init_hctx(hctx, set->driver_data, i))
+ return -1;
+}
+
+static int blk_mq_init_hw_queues(struct request_queue *q,
+ struct blk_mq_tag_set *set)
+{
+ struct blk_mq_hw_ctx *hctx;
+ unsigned int i;
+
+ /*
+ * Initialize hardware queues
+ */
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if (blk_mq_init_hctx(q, set, hctx, i))
break;
}
@@ -1792,7 +1836,8 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
if (!hctxs[i])
goto err_hctxs;
- if (!zalloc_cpumask_var(&hctxs[i]->cpumask, GFP_KERNEL))
+ if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
+ node))
goto err_hctxs;
atomic_set(&hctxs[i]->nr_active, 0);
@@ -1804,7 +1849,12 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
if (!q)
goto err_hctxs;
- if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release))
+ /*
+ * Init percpu_ref in atomic mode so that it's faster to shutdown.
+ * See blk_register_queue() for details.
+ */
+ if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
+ PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
goto err_map;
setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
@@ -1834,7 +1884,6 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
else
blk_queue_make_request(q, blk_sq_make_request);
- blk_queue_rq_timed_out(q, blk_mq_rq_timed_out);
if (set->timeout)
blk_queue_rq_timeout(q, set->timeout);
@@ -1846,17 +1895,10 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
if (set->ops->complete)
blk_queue_softirq_done(q, set->ops->complete);
- blk_mq_init_flush(q);
blk_mq_init_cpu_queues(q, set->nr_hw_queues);
- q->flush_rq = kzalloc(round_up(sizeof(struct request) +
- set->cmd_size, cache_line_size()),
- GFP_KERNEL);
- if (!q->flush_rq)
- goto err_hw;
-
if (blk_mq_init_hw_queues(q, set))
- goto err_flush_rq;
+ goto err_hw;
mutex_lock(&all_q_mutex);
list_add_tail(&q->all_q_node, &all_q_list);
@@ -1868,8 +1910,6 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
return q;
-err_flush_rq:
- kfree(q->flush_rq);
err_hw:
blk_cleanup_queue(q);
err_hctxs:
@@ -1915,7 +1955,7 @@ void blk_mq_free_queue(struct request_queue *q)
/* Basically redo blk_mq_init_queue with queue frozen */
static void blk_mq_queue_reinit(struct request_queue *q)
{
- blk_mq_freeze_queue(q);
+ WARN_ON_ONCE(!q->mq_freeze_depth);
blk_mq_sysfs_unregister(q);
@@ -1930,8 +1970,6 @@ static void blk_mq_queue_reinit(struct request_queue *q)
blk_mq_map_swqueue(q);
blk_mq_sysfs_register(q);
-
- blk_mq_unfreeze_queue(q);
}
static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
@@ -1950,8 +1988,25 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
return NOTIFY_OK;
mutex_lock(&all_q_mutex);
+
+ /*
+ * We need to freeze and reinit all existing queues. Freezing
+ * involves synchronous wait for an RCU grace period and doing it
+ * one by one may take a long time. Start freezing all queues in
+ * one swoop and then wait for the completions so that freezing can
+ * take place in parallel.
+ */
+ list_for_each_entry(q, &all_q_list, all_q_node)
+ blk_mq_freeze_queue_start(q);
+ list_for_each_entry(q, &all_q_list, all_q_node)
+ blk_mq_freeze_queue_wait(q);
+
list_for_each_entry(q, &all_q_list, all_q_node)
blk_mq_queue_reinit(q);
+
+ list_for_each_entry(q, &all_q_list, all_q_node)
+ blk_mq_unfreeze_queue(q);
+
mutex_unlock(&all_q_mutex);
return NOTIFY_OK;
}
@@ -2018,6 +2073,8 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
*/
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
{
+ BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
+
if (!set->nr_hw_queues)
return -EINVAL;
if (!set->queue_depth)
@@ -2034,6 +2091,16 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
set->queue_depth = BLK_MQ_MAX_DEPTH;
}
+ /*
+ * If a crashdump is active, then we are potentially in a very
+ * memory constrained environment. Limit us to 1 queue and
+ * 64 tags to prevent using too much memory.
+ */
+ if (is_kdump_kernel()) {
+ set->nr_hw_queues = 1;
+ set->queue_depth = min(64U, set->queue_depth);
+ }
+
set->tags = kmalloc_node(set->nr_hw_queues *
sizeof(struct blk_mq_tags *),
GFP_KERNEL, set->numa_node);