summaryrefslogtreecommitdiff
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2021-10-18 10:12:12 -0600
committerJens Axboe <axboe@kernel.dk>2021-10-19 09:21:42 -0600
commitbc490f81731e181b07b8d7577425c06ae91692c8 (patch)
treeabe26a3d3c309e8a5d62e61e50ccd8a13036aebb /include/linux/blkdev.h
parent480d42dc001bbfe953825a92073012fcd5a99161 (diff)
block: change plugging to use a singly linked list
Use a singly linked list for the blk_plug. This saves 8 bytes in the blk_plug struct, and makes for faster list manipulations than doubly linked lists. As we don't use the doubly linked lists for anything, singly linked is just fine. This yields a bump in default (merging enabled) performance from 7.0 to 7.1M IOPS, and ~7.5M IOPS with merging disabled. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h5
1 files changed, 2 insertions, 3 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index fd9771a1da09..4027112b9851 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -728,7 +728,7 @@ extern void blk_set_queue_dying(struct request_queue *);
* schedule() where blk_schedule_flush_plug() is called.
*/
struct blk_plug {
- struct list_head mq_list; /* blk-mq requests */
+ struct request *mq_list; /* blk-mq requests */
/* if ios_left is > 1, we can batch tag/rq allocations */
struct request *cached_rq;
@@ -777,8 +777,7 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
struct blk_plug *plug = tsk->plug;
return plug &&
- (!list_empty(&plug->mq_list) ||
- !list_empty(&plug->cb_list));
+ (plug->mq_list || !list_empty(&plug->cb_list));
}
int blkdev_issue_flush(struct block_device *bdev);