summaryrefslogtreecommitdiff
path: root/block/blk-mq-sched.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-06-08 12:12:11 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-06-08 12:12:11 -0700
commit8d72e5bd86cb405d8d8b9e92905d8cfffd08dde8 (patch)
tree65d5e73b08d7a573784dca677e374fd6fc25a694 /block/blk-mq-sched.c
parent1b02caa319cf73ae89aced8714066a3a5bbe648b (diff)
parent6c70f899b8089ae23cdb4aa63050e3df4e20c71e (diff)
Merge tag 'for-linus-20190608' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: - Allow symlink from the bfq.weight cgroup parameter to the general weight (Angelo) - Damien is new skd maintainer (Bart) - NVMe pull request from Sagi, with a few small fixes. - Ensure we set DMA segment size properly, dma-debug is now tripping on these (Christoph) - Remove useless debugfs_create() return check (Greg) - Remove redundant unlikely() check on IS_ERR() (Kefeng) - Fixup request freeing on exit (Ming) * tag 'for-linus-20190608' of git://git.kernel.dk/linux-block: block, bfq: add weight symlink to the bfq.weight cgroup parameter cgroup: let a symlink too be created with a cftype file block: free sched's request pool in blk_cleanup_queue nvme-rdma: use dynamic dma mapping per command nvme: Fix u32 overflow in the number of namespace list calculation mmc: also set max_segment_size in the device mtip32xx: also set max_segment_size in the device rsxx: don't call dma_set_max_seg_size nvme-pci: don't limit DMA segement size block: Drop unlikely before IS_ERR(_OR_NULL) block: aoe: no need to check return value of debugfs_create functions nvmet: fix data_len to 0 for bdev-backed write_zeroes MAINTAINERS: Hand over skd maintainership nvme-tcp: fix queue mapping when queue count is limited nvme-rdma: fix queue mapping when queue count is limited
Diffstat (limited to 'block/blk-mq-sched.c')
-rw-r--r--block/blk-mq-sched.c30
1 files changed, 27 insertions, 3 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 74c6bb871f7e..500cb04901cc 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -475,14 +475,18 @@ static int blk_mq_sched_alloc_tags(struct request_queue *q,
return ret;
}
+/* called in queue's release handler, tagset has gone away */
static void blk_mq_sched_tags_teardown(struct request_queue *q)
{
- struct blk_mq_tag_set *set = q->tag_set;
struct blk_mq_hw_ctx *hctx;
int i;
- queue_for_each_hw_ctx(q, hctx, i)
- blk_mq_sched_free_tags(set, hctx, i);
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if (hctx->sched_tags) {
+ blk_mq_free_rq_map(hctx->sched_tags);
+ hctx->sched_tags = NULL;
+ }
+ }
}
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
@@ -523,6 +527,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
ret = e->ops.init_hctx(hctx, i);
if (ret) {
eq = q->elevator;
+ blk_mq_sched_free_requests(q);
blk_mq_exit_sched(q, eq);
kobject_put(&eq->kobj);
return ret;
@@ -534,11 +539,30 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
return 0;
err:
+ blk_mq_sched_free_requests(q);
blk_mq_sched_tags_teardown(q);
q->elevator = NULL;
return ret;
}
+/*
+ * called in either blk_queue_cleanup or elevator_switch, tagset
+ * is required for freeing requests
+ */
+void blk_mq_sched_free_requests(struct request_queue *q)
+{
+ struct blk_mq_hw_ctx *hctx;
+ int i;
+
+ lockdep_assert_held(&q->sysfs_lock);
+ WARN_ON(!q->elevator);
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if (hctx->sched_tags)
+ blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i);
+ }
+}
+
void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
{
struct blk_mq_hw_ctx *hctx;