summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c32
-rw-r--r--block/blk-sysfs.c30
2 files changed, 61 insertions, 1 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index f4f78c03f735..7d05a56e3639 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -988,13 +988,43 @@ static inline void blk_account_io_done(struct request *req, u64 now)
}
}
+static inline bool blk_rq_passthrough_stats(struct request *req)
+{
+ struct bio *bio = req->bio;
+
+ if (!blk_queue_passthrough_stat(req->q))
+ return false;
+
+ /* Requests without a bio do not transfer data. */
+ if (!bio)
+ return false;
+
+ /*
+ * Stats are accumulated in the bdev, so must have one attached to a
+ * bio to track stats. Most drivers do not set the bdev for passthrough
+ * requests, but nvme is one that will set it.
+ */
+ if (!bio->bi_bdev)
+ return false;
+
+ /*
+ * We don't know what a passthrough command does, but we know the
+ * payload size and data direction. Ensuring the size is aligned to the
+ * block size filters out most commands with payloads that don't
+ * represent sector access.
+ */
+ if (blk_rq_bytes(req) & (bdev_logical_block_size(bio->bi_bdev) - 1))
+ return false;
+ return true;
+}
+
static inline void blk_account_io_start(struct request *req)
{
trace_block_io_start(req);
if (!blk_queue_io_stat(req->q))
return;
- if (blk_rq_is_passthrough(req))
+ if (blk_rq_is_passthrough(req) && !blk_rq_passthrough_stats(req))
return;
req->rq_flags |= RQF_IO_STAT;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 8717d43e0792..741b95dfdbf6 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -272,6 +272,34 @@ static ssize_t queue_nr_zones_show(struct gendisk *disk, char *page)
return queue_var_show(disk_nr_zones(disk), page);
}
+static ssize_t queue_iostats_passthrough_show(struct gendisk *disk, char *page)
+{
+ return queue_var_show(blk_queue_passthrough_stat(disk->queue), page);
+}
+
+static ssize_t queue_iostats_passthrough_store(struct gendisk *disk,
+ const char *page, size_t count)
+{
+ struct queue_limits lim;
+ unsigned long ios;
+ ssize_t ret;
+
+ ret = queue_var_store(&ios, page, count);
+ if (ret < 0)
+ return ret;
+
+ lim = queue_limits_start_update(disk->queue);
+ if (ios)
+ lim.flags |= BLK_FLAG_IOSTATS_PASSTHROUGH;
+ else
+ lim.flags &= ~BLK_FLAG_IOSTATS_PASSTHROUGH;
+
+ ret = queue_limits_commit_update(disk->queue, &lim);
+ if (ret)
+ return ret;
+
+ return count;
+}
static ssize_t queue_nomerges_show(struct gendisk *disk, char *page)
{
return queue_var_show((blk_queue_nomerges(disk->queue) << 1) |
@@ -460,6 +488,7 @@ QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones");
QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones");
QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
+QUEUE_RW_ENTRY(queue_iostats_passthrough, "iostats_passthrough");
QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
QUEUE_RW_ENTRY(queue_poll, "io_poll");
QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
@@ -586,6 +615,7 @@ static struct attribute *queue_attrs[] = {
&queue_max_open_zones_entry.attr,
&queue_max_active_zones_entry.attr,
&queue_nomerges_entry.attr,
+ &queue_iostats_passthrough_entry.attr,
&queue_iostats_entry.attr,
&queue_stable_writes_entry.attr,
&queue_add_random_entry.attr,