diff options
Diffstat (limited to 'block/blk-mq-sched.h')
| -rw-r--r-- | block/blk-mq-sched.h | 115 |
1 files changed, 84 insertions, 31 deletions
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index 5246ae040704..02c40a72e959 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -2,71 +2,113 @@ #ifndef BLK_MQ_SCHED_H #define BLK_MQ_SCHED_H +#include "elevator.h" #include "blk-mq.h" -#include "blk-mq-tag.h" -#define MAX_SCHED_RQ (16 * BLKDEV_MAX_RQ) - -void blk_mq_sched_assign_ioc(struct request *rq); +#define MAX_SCHED_RQ (16 * BLKDEV_DEFAULT_RQ) bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs, struct request **merged_request); -bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, +bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs); bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq, struct list_head *free); void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx); -void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx); - -void blk_mq_sched_insert_request(struct request *rq, bool at_head, - bool run_queue, bool async); -void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, - struct blk_mq_ctx *ctx, - struct list_head *list, bool run_queue_async); +void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx); void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx); -int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e); +int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e, + struct elevator_resources *res); void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e); -void blk_mq_sched_free_requests(struct request_queue *q); +void blk_mq_sched_free_rqs(struct request_queue *q); + +struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set, + unsigned int nr_hw_queues, unsigned int nr_requests); +int blk_mq_alloc_sched_res(struct request_queue *q, + struct elevator_type *type, + struct elevator_resources *res, + unsigned int nr_hw_queues); +int blk_mq_alloc_sched_res_batch(struct xarray *elv_tbl, + struct blk_mq_tag_set *set, unsigned int nr_hw_queues); +int blk_mq_alloc_sched_ctx_batch(struct xarray *elv_tbl, + struct blk_mq_tag_set *set); +void blk_mq_free_sched_ctx_batch(struct xarray *elv_tbl); +void blk_mq_free_sched_tags(struct elevator_tags *et, + struct blk_mq_tag_set *set); +void blk_mq_free_sched_res(struct elevator_resources *res, + struct elevator_type *type, + struct blk_mq_tag_set *set); +void blk_mq_free_sched_res_batch(struct xarray *et_table, + struct blk_mq_tag_set *set); +/* + * blk_mq_alloc_sched_data() - Allocates scheduler specific data + * Returns: + * - Pointer to allocated data on success + * - NULL if no allocation needed + * - ERR_PTR(-ENOMEM) in case of failure + */ +static inline void *blk_mq_alloc_sched_data(struct request_queue *q, + struct elevator_type *e) +{ + void *sched_data; -static inline bool -blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, - unsigned int nr_segs) + if (!e || !e->ops.alloc_sched_data) + return NULL; + + sched_data = e->ops.alloc_sched_data(q); + return (sched_data) ?: ERR_PTR(-ENOMEM); +} + +static inline void blk_mq_free_sched_data(struct elevator_type *e, void *data) { - if (blk_queue_nomerges(q) || !bio_mergeable(bio)) - return false; + if (e && e->ops.free_sched_data) + e->ops.free_sched_data(data); +} - return __blk_mq_sched_bio_merge(q, bio, nr_segs); +static inline void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) +{ + if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) + __blk_mq_sched_restart(hctx); +} + +static inline bool bio_mergeable(struct bio *bio) +{ + return !(bio->bi_opf & REQ_NOMERGE_FLAGS); } static inline bool blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq, struct bio *bio) { - struct elevator_queue *e = q->elevator; - - if (e && e->type->ops.allow_merge) - return e->type->ops.allow_merge(q, rq, bio); + if (rq->rq_flags & RQF_USE_SCHED) { + struct elevator_queue *e = q->elevator; + if (e->type->ops.allow_merge) + return e->type->ops.allow_merge(q, rq, bio); + } return true; } static inline void blk_mq_sched_completed_request(struct request *rq, u64 now) { - struct elevator_queue *e = rq->q->elevator; + if (rq->rq_flags & RQF_USE_SCHED) { + struct elevator_queue *e = rq->q->elevator; - if (e && e->type->ops.completed_request) - e->type->ops.completed_request(rq, now); + if (e->type->ops.completed_request) + e->type->ops.completed_request(rq, now); + } } static inline void blk_mq_sched_requeue_request(struct request *rq) { - struct request_queue *q = rq->q; - struct elevator_queue *e = q->elevator; + if (rq->rq_flags & RQF_USE_SCHED) { + struct request_queue *q = rq->q; + struct elevator_queue *e = q->elevator; - if ((rq->rq_flags & RQF_ELVPRIV) && e && e->type->ops.requeue_request) - e->type->ops.requeue_request(rq); + if (e->type->ops.requeue_request) + e->type->ops.requeue_request(rq); + } } static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx) @@ -84,4 +126,15 @@ static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); } +static inline void blk_mq_set_min_shallow_depth(struct request_queue *q, + unsigned int depth) +{ + struct blk_mq_hw_ctx *hctx; + unsigned long i; + + queue_for_each_hw_ctx(q, hctx, i) + sbitmap_queue_min_shallow_depth(&hctx->sched_tags->bitmap_tags, + depth); +} + #endif |
