diff options
Diffstat (limited to 'block/kyber-iosched.c')
| -rw-r--r-- | block/kyber-iosched.c | 76 |
1 files changed, 38 insertions, 38 deletions
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c index b05357bced99..c1b36ffd19ce 100644 --- a/block/kyber-iosched.c +++ b/block/kyber-iosched.c @@ -8,7 +8,6 @@ #include <linux/kernel.h> #include <linux/blkdev.h> -#include <linux/blk-mq.h> #include <linux/module.h> #include <linux/sbitmap.h> @@ -19,7 +18,6 @@ #include "blk-mq.h" #include "blk-mq-debugfs.h" #include "blk-mq-sched.h" -#include "blk-mq-tag.h" #define CREATE_TRACE_POINTS #include <trace/events/kyber.h> @@ -159,10 +157,7 @@ struct kyber_queue_data { */ struct sbitmap_queue domain_tokens[KYBER_NUM_DOMAINS]; - /* - * Async request percentage, converted to per-word depth for - * sbitmap_get_shallow(). - */ + /* Number of allowed async requests. */ unsigned int async_depth; struct kyber_cpu_latency __percpu *cpu_latency; @@ -278,7 +273,7 @@ static void kyber_resize_domain(struct kyber_queue_data *kqd, static void kyber_timer_fn(struct timer_list *t) { - struct kyber_queue_data *kqd = from_timer(kqd, t, timer); + struct kyber_queue_data *kqd = timer_container_of(kqd, t, timer); unsigned int sched_domain; int cpu; bool bad = false; @@ -404,38 +399,52 @@ err: return ERR_PTR(ret); } -static int kyber_init_sched(struct request_queue *q, struct elevator_type *e) +static void kyber_depth_updated(struct request_queue *q) { - struct kyber_queue_data *kqd; - struct elevator_queue *eq; - - eq = elevator_alloc(q, e); - if (!eq) - return -ENOMEM; + struct kyber_queue_data *kqd = q->elevator->elevator_data; - kqd = kyber_queue_data_alloc(q); - if (IS_ERR(kqd)) { - kobject_put(&eq->kobj); - return PTR_ERR(kqd); - } + kqd->async_depth = q->nr_requests * KYBER_ASYNC_PERCENT / 100U; + blk_mq_set_min_shallow_depth(q, kqd->async_depth); +} +static int kyber_init_sched(struct request_queue *q, struct elevator_queue *eq) +{ blk_stat_enable_accounting(q); blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q); - eq->elevator_data = kqd; q->elevator = eq; + kyber_depth_updated(q); return 0; } +static void *kyber_alloc_sched_data(struct request_queue *q) +{ + struct kyber_queue_data *kqd; + + kqd = kyber_queue_data_alloc(q); + if (IS_ERR(kqd)) + return NULL; + + return kqd; +} + static void kyber_exit_sched(struct elevator_queue *e) { struct kyber_queue_data *kqd = e->elevator_data; - int i; - del_timer_sync(&kqd->timer); + timer_shutdown_sync(&kqd->timer); blk_stat_disable_accounting(kqd->q); +} + +static void kyber_free_sched_data(void *elv_data) +{ + struct kyber_queue_data *kqd = elv_data; + int i; + + if (!kqd) + return; for (i = 0; i < KYBER_NUM_DOMAINS; i++) sbitmap_queue_free(&kqd->domain_tokens[i]); @@ -452,17 +461,6 @@ static void kyber_ctx_queue_init(struct kyber_ctx_queue *kcq) INIT_LIST_HEAD(&kcq->rq_list[i]); } -static void kyber_depth_updated(struct blk_mq_hw_ctx *hctx) -{ - struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data; - struct blk_mq_tags *tags = hctx->sched_tags; - unsigned int shift = tags->bitmap_tags.sb.shift; - - kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U; - - sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, kqd->async_depth); -} - static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) { struct kyber_hctx_data *khd; @@ -507,7 +505,6 @@ static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) khd->batching = 0; hctx->sched_data = khd; - kyber_depth_updated(hctx); return 0; @@ -570,7 +567,7 @@ static bool kyber_bio_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs) { struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); - struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); + struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(bio->bi_opf, ctx); struct kyber_hctx_data *khd = hctx->sched_data; struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]]; unsigned int sched_domain = kyber_sched_domain(bio->bi_opf); @@ -590,7 +587,8 @@ static void kyber_prepare_request(struct request *rq) } static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx, - struct list_head *rq_list, bool at_head) + struct list_head *rq_list, + blk_insert_t flags) { struct kyber_hctx_data *khd = hctx->sched_data; struct request *rq, *next; @@ -602,7 +600,7 @@ static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx, spin_lock(&kcq->lock); trace_block_rq_insert(rq); - if (at_head) + if (flags & BLK_MQ_INSERT_AT_HEAD) list_move(&rq->queuelist, head); else list_move_tail(&rq->queuelist, head); @@ -890,7 +888,7 @@ KYBER_LAT_SHOW_STORE(KYBER_WRITE, write); #undef KYBER_LAT_SHOW_STORE #define KYBER_LAT_ATTR(op) __ATTR(op##_lat_nsec, 0644, kyber_##op##_lat_show, kyber_##op##_lat_store) -static struct elv_fs_entry kyber_sched_attrs[] = { +static const struct elv_fs_entry kyber_sched_attrs[] = { KYBER_LAT_ATTR(read), KYBER_LAT_ATTR(write), __ATTR_NULL @@ -1018,6 +1016,8 @@ static struct elevator_type kyber_sched = { .exit_sched = kyber_exit_sched, .init_hctx = kyber_init_hctx, .exit_hctx = kyber_exit_hctx, + .alloc_sched_data = kyber_alloc_sched_data, + .free_sched_data = kyber_free_sched_data, .limit_depth = kyber_limit_depth, .bio_merge = kyber_bio_merge, .prepare_request = kyber_prepare_request, |
