diff options
Diffstat (limited to 'block/blk-mq-tag.c')
| -rw-r--r-- | block/blk-mq-tag.c | 173 |
1 files changed, 69 insertions, 104 deletions
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 2cafcf11ee8b..33946cdb5716 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -8,6 +8,9 @@ */ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/kmemleak.h> #include <linux/delay.h> #include "blk.h" @@ -190,8 +193,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) sbitmap_finish_wait(bt, ws, &wait); data->ctx = blk_mq_get_ctx(data->q); - data->hctx = blk_mq_map_queue(data->q, data->cmd_flags, - data->ctx); + data->hctx = blk_mq_map_queue(data->cmd_flags, data->ctx); tags = blk_mq_tags_from_data(data); if (data->flags & BLK_MQ_REQ_RESERVED) bt = &tags->breserved_tags; @@ -254,13 +256,10 @@ static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags, unsigned int bitnr) { struct request *rq; - unsigned long flags; - spin_lock_irqsave(&tags->lock, flags); rq = tags->rqs[bitnr]; if (!rq || rq->tag != bitnr || !req_ref_inc_not_zero(rq)) rq = NULL; - spin_unlock_irqrestore(&tags->lock, flags); return rq; } @@ -298,15 +297,15 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) /** * bt_for_each - iterate over the requests associated with a hardware queue * @hctx: Hardware queue to examine. - * @q: Request queue to examine. + * @q: Request queue @hctx is associated with (@hctx->queue). * @bt: sbitmap to examine. This is either the breserved_tags member * or the bitmap_tags member of struct blk_mq_tags. * @fn: Pointer to the function that will be called for each request * associated with @hctx that has been assigned a driver tag. - * @fn will be called as follows: @fn(@hctx, rq, @data, @reserved) - * where rq is a pointer to a request. Return true to continue - * iterating tags, false to stop. - * @data: Will be passed as third argument to @fn. + * @fn will be called as follows: @fn(rq, @data) where rq is a + * pointer to a request. Return %true to continue iterating tags; + * %false to stop. + * @data: Will be passed as second argument to @fn. * @reserved: Indicates whether @bt is the breserved_tags member or the * bitmap_tags member of struct blk_mq_tags. */ @@ -372,9 +371,9 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) * @bt: sbitmap to examine. This is either the breserved_tags member * or the bitmap_tags member of struct blk_mq_tags. * @fn: Pointer to the function that will be called for each started - * request. @fn will be called as follows: @fn(rq, @data, - * @reserved) where rq is a pointer to a request. Return true - * to continue iterating tags, false to stop. + * request. @fn will be called as follows: @fn(rq, @data) where rq + * is a pointer to a request. Return %true to continue iterating + * tags; %false to stop. * @data: Will be passed as second argument to @fn. * @flags: BT_TAG_ITER_* */ @@ -407,10 +406,9 @@ static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags, * blk_mq_all_tag_iter - iterate over all requests in a tag map * @tags: Tag map to iterate over. * @fn: Pointer to the function that will be called for each - * request. @fn will be called as follows: @fn(rq, @priv, - * reserved) where rq is a pointer to a request. 'reserved' - * indicates whether or not @rq is a reserved request. Return - * true to continue iterating tags, false to stop. + * request. @fn will be called as follows: @fn(rq, @priv) where rq + * is a pointer to a request. Return %true to continue iterating + * tags; %false to stop. * @priv: Will be passed as second argument to @fn. * * Caller has to pass the tag map from which requests are allocated. @@ -425,10 +423,9 @@ void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set * @tagset: Tag set to iterate over. * @fn: Pointer to the function that will be called for each started - * request. @fn will be called as follows: @fn(rq, @priv, - * reserved) where rq is a pointer to a request. 'reserved' - * indicates whether or not @rq is a reserved request. Return - * true to continue iterating tags, false to stop. + * request. @fn will be called as follows: @fn(rq, @priv) where + * rq is a pointer to a request. Return true to continue iterating + * tags, false to stop. * @priv: Will be passed as second argument to @fn. * * We grab one request reference before calling @fn and release it after @@ -438,7 +435,9 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, busy_tag_iter_fn *fn, void *priv) { unsigned int flags = tagset->flags; - int i, nr_tags; + int i, nr_tags, srcu_idx; + + srcu_idx = srcu_read_lock(&tagset->tags_srcu); nr_tags = blk_mq_is_shared_tags(flags) ? 1 : tagset->nr_hw_queues; @@ -447,6 +446,7 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, __blk_mq_all_tag_iter(tagset->tags[i], fn, priv, BT_TAG_ITER_STARTED); } + srcu_read_unlock(&tagset->tags_srcu, srcu_idx); } EXPORT_SYMBOL(blk_mq_tagset_busy_iter); @@ -484,11 +484,10 @@ EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request); * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag * @q: Request queue to examine. * @fn: Pointer to the function that will be called for each request - * on @q. @fn will be called as follows: @fn(hctx, rq, @priv, - * reserved) where rq is a pointer to a request and hctx points - * to the hardware queue associated with the request. 'reserved' - * indicates whether or not @rq is a reserved request. - * @priv: Will be passed as third argument to @fn. + * on @q. @fn will be called as follows: @fn(rq, @priv) where rq + * is a pointer to a request and hctx points to the hardware queue + * associated with the request. + * @priv: Will be passed as second argument to @fn. * * Note: if @q->tag_set is shared with other request queues then @fn will be * called for all requests on all queues that share that tag set and not only @@ -497,14 +496,17 @@ EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request); void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn, void *priv) { + int srcu_idx; + /* - * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and hctx_table + * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and queue_hw_ctx * while the queue is frozen. So we can use q_usage_counter to avoid * racing with it. */ if (!percpu_ref_tryget(&q->q_usage_counter)) return; + srcu_idx = srcu_read_lock(&q->tag_set->tags_srcu); if (blk_mq_is_shared_tags(q->tag_set->flags)) { struct blk_mq_tags *tags = q->tag_set->shared_tags; struct sbitmap_queue *bresv = &tags->breserved_tags; @@ -534,6 +536,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn, bt_for_each(hctx, q, btags, fn, priv, false); } } + srcu_read_unlock(&q->tag_set->tags_srcu, srcu_idx); blk_queue_exit(q); } @@ -544,30 +547,11 @@ static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, node); } -int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags, - struct sbitmap_queue *breserved_tags, - unsigned int queue_depth, unsigned int reserved, - int node, int alloc_policy) -{ - unsigned int depth = queue_depth - reserved; - bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR; - - if (bt_alloc(bitmap_tags, depth, round_robin, node)) - return -ENOMEM; - if (bt_alloc(breserved_tags, reserved, round_robin, node)) - goto free_bitmap_tags; - - return 0; - -free_bitmap_tags: - sbitmap_queue_free(bitmap_tags); - return -ENOMEM; -} - struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, - unsigned int reserved_tags, - int node, int alloc_policy) + unsigned int reserved_tags, unsigned int flags, int node) { + unsigned int depth = total_tags - reserved_tags; + bool round_robin = flags & BLK_MQ_F_TAG_RR; struct blk_mq_tags *tags; if (total_tags > BLK_MQ_TAG_MAX) { @@ -582,73 +566,53 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, tags->nr_tags = total_tags; tags->nr_reserved_tags = reserved_tags; spin_lock_init(&tags->lock); + INIT_LIST_HEAD(&tags->page_list); + + if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node)) + goto out_free_tags; + if (bt_alloc(&tags->breserved_tags, reserved_tags, round_robin, node)) + goto out_free_bitmap_tags; - if (blk_mq_init_bitmaps(&tags->bitmap_tags, &tags->breserved_tags, - total_tags, reserved_tags, node, - alloc_policy) < 0) { - kfree(tags); - return NULL; - } return tags; -} -void blk_mq_free_tags(struct blk_mq_tags *tags) -{ +out_free_bitmap_tags: sbitmap_queue_free(&tags->bitmap_tags); - sbitmap_queue_free(&tags->breserved_tags); +out_free_tags: kfree(tags); + return NULL; } -int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, - struct blk_mq_tags **tagsptr, unsigned int tdepth, - bool can_grow) +static void blk_mq_free_tags_callback(struct rcu_head *head) { - struct blk_mq_tags *tags = *tagsptr; - - if (tdepth <= tags->nr_reserved_tags) - return -EINVAL; - - /* - * If we are allowed to grow beyond the original size, allocate - * a new set of tags before freeing the old one. - */ - if (tdepth > tags->nr_tags) { - struct blk_mq_tag_set *set = hctx->queue->tag_set; - struct blk_mq_tags *new; - - if (!can_grow) - return -EINVAL; + struct blk_mq_tags *tags = container_of(head, struct blk_mq_tags, + rcu_head); + struct page *page; + while (!list_empty(&tags->page_list)) { + page = list_first_entry(&tags->page_list, struct page, lru); + list_del_init(&page->lru); /* - * We need some sort of upper limit, set it high enough that - * no valid use cases should require more. + * Remove kmemleak object previously allocated in + * blk_mq_alloc_rqs(). */ - if (tdepth > MAX_SCHED_RQ) - return -EINVAL; - - /* - * Only the sbitmap needs resizing since we allocated the max - * initially. - */ - if (blk_mq_is_shared_tags(set->flags)) - return 0; + kmemleak_free(page_address(page)); + __free_pages(page, page->private); + } + kfree(tags); +} - new = blk_mq_alloc_map_and_rqs(set, hctx->queue_num, tdepth); - if (!new) - return -ENOMEM; +void blk_mq_free_tags(struct blk_mq_tag_set *set, struct blk_mq_tags *tags) +{ + sbitmap_queue_free(&tags->bitmap_tags); + sbitmap_queue_free(&tags->breserved_tags); - blk_mq_free_map_and_rqs(set, *tagsptr, hctx->queue_num); - *tagsptr = new; - } else { - /* - * Don't need (or can't) update reserved tags here, they - * remain static and should never need resizing. - */ - sbitmap_queue_resize(&tags->bitmap_tags, - tdepth - tags->nr_reserved_tags); + /* if tags pages is not allocated yet, free tags directly */ + if (list_empty(&tags->page_list)) { + kfree(tags); + return; } - return 0; + call_srcu(&set->tags_srcu, &tags->rcu_head, blk_mq_free_tags_callback); } void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, unsigned int size) @@ -658,10 +622,11 @@ void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, unsigned int size sbitmap_queue_resize(&tags->bitmap_tags, size - set->reserved_tags); } -void blk_mq_tag_update_sched_shared_tags(struct request_queue *q) +void blk_mq_tag_update_sched_shared_tags(struct request_queue *q, + unsigned int nr) { sbitmap_queue_resize(&q->sched_shared_tags->bitmap_tags, - q->nr_requests - q->tag_set->reserved_tags); + nr - q->tag_set->reserved_tags); } /** |
