summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2017-06-19 09:26:24 +0200
committerJens Axboe <axboe@kernel.dk>2017-06-27 12:13:45 -0600
commit46685d1a9521054fa3a7a352f6bb54166cd5b2c5 (patch)
tree4da41c3661d2fc577ff72d36ab53e64c8d803c62
parent0b0bcacc3b4300c4bba0bacb4c7a279b2728f331 (diff)
blk-mq: don't bounce by default
For historical reasons we default to bouncing highmem pages for all block queues. But the blk-mq drivers are easy to audit to ensure that we don't need this - scsi and mtip32xx set explicit limits and everyone else doesn't have any particular ones. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-mq.c5
-rw-r--r--drivers/block/virtio_blk.c3
-rw-r--r--drivers/block/xen-blkfront.c3
3 files changed, 0 insertions, 11 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 41e3aeb51c9a..05dfa3f270ae 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2350,11 +2350,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
blk_queue_make_request(q, blk_mq_make_request);
/*
- * by default assume old behaviour and bounce for any highmem page
- */
- blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
-
- /*
* Do this after blk_queue_make_request() overrides it...
*/
q->nr_requests = set->queue_depth;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index e59bd4549a8a..0297ad7c1452 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -720,9 +720,6 @@ static int virtblk_probe(struct virtio_device *vdev)
/* We can handle whatever the host told us to handle. */
blk_queue_max_segments(q, vblk->sg_elems-2);
- /* No need to bounce any requests */
- blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
-
/* No real sector limit. */
blk_queue_max_hw_sectors(q, -1U);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index ac90093fcb25..c852ed3c01d5 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -953,9 +953,6 @@ static void blkif_set_queue_limits(struct blkfront_info *info)
/* Make sure buffer addresses are sector-aligned. */
blk_queue_dma_alignment(rq, 511);
-
- /* Make sure we don't use bounce buffers. */
- blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
}
static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,