summaryrefslogtreecommitdiff
path: root/block/blk-settings.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2020-09-24 08:51:34 +0200
committerJens Axboe <axboe@kernel.dk>2020-09-24 13:43:39 -0600
commitc2e4cd57cfa1f627b786c764d185fff85fd12be9 (patch)
tree7b562f0ea09b6bf6dc45c14a7938ead0daab2aee /block/blk-settings.c
parent16ef510139315a2147ee7525796f8dbd4e4b7864 (diff)
block: lift setting the readahead size into the block layer
Drivers shouldn't really mess with the readahead size, as that is a VM concept. Instead set it based on the optimal I/O size by lifting the algorithm from the md driver when registering the disk. Also set bdi->io_pages there as well by applying the same scheme based on max_sectors. To ensure the limits work well for stacking drivers a new helper is added to update the readahead limits from the block limits, which is also called from disk_stack_limits. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Reviewed-by: Jan Kara <jack@suse.cz> Reviewed-by: Mike Snitzer <snitzer@redhat.com> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Acked-by: Coly Li <colyli@suse.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-settings.c')
-rw-r--r--block/blk-settings.c18
1 files changed, 16 insertions, 2 deletions
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 5ea3de48afba..4f6eb4bb1723 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -372,6 +372,19 @@ void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
}
EXPORT_SYMBOL(blk_queue_alignment_offset);
+void blk_queue_update_readahead(struct request_queue *q)
+{
+ /*
+ * For read-ahead of large files to be effective, we need to read ahead
+ * at least twice the optimal I/O size.
+ */
+ q->backing_dev_info->ra_pages =
+ max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
+ q->backing_dev_info->io_pages =
+ queue_max_sectors(q) >> (PAGE_SHIFT - 9);
+}
+EXPORT_SYMBOL_GPL(blk_queue_update_readahead);
+
/**
* blk_limits_io_min - set minimum request size for a device
* @limits: the queue limits
@@ -450,6 +463,8 @@ EXPORT_SYMBOL(blk_limits_io_opt);
void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
{
blk_limits_io_opt(&q->limits, opt);
+ q->backing_dev_info->ra_pages =
+ max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
}
EXPORT_SYMBOL(blk_queue_io_opt);
@@ -631,8 +646,7 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
top, bottom);
}
- t->backing_dev_info->io_pages =
- t->limits.max_sectors >> (PAGE_SHIFT - 9);
+ blk_queue_update_readahead(disk->queue);
}
EXPORT_SYMBOL(disk_stack_limits);