summaryrefslogtreecommitdiff
path: root/block/blk-sysfs.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-12-13 10:19:16 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-13 10:19:16 -0800
commit36869cb93d36269f34800b3384ba7991060a69cf (patch)
tree1ff266dcb3386bb1403494aa89647a96fd2396cd /block/blk-sysfs.c
parent9439b3710df688d853eb6cb4851256f2c92b1797 (diff)
parent7cd54aa8438947602cf68eda1db327822b9b8e6b (diff)
Merge branch 'for-4.10/block' of git://git.kernel.dk/linux-block
Pull block layer updates from Jens Axboe: "This is the main block pull request this series. Contrary to previous release, I've kept the core and driver changes in the same branch. We always ended up having dependencies between the two for obvious reasons, so makes more sense to keep them together. That said, I'll probably try and keep more topical branches going forward, especially for cycles that end up being as busy as this one. The major parts of this pull request is: - Improved support for O_DIRECT on block devices, with a small private implementation instead of using the pig that is fs/direct-io.c. From Christoph. - Request completion tracking in a scalable fashion. This is utilized by two components in this pull, the new hybrid polling and the writeback queue throttling code. - Improved support for polling with O_DIRECT, adding a hybrid mode that combines pure polling with an initial sleep. From me. - Support for automatic throttling of writeback queues on the block side. This uses feedback from the device completion latencies to scale the queue on the block side up or down. From me. - Support from SMR drives in the block layer and for SD. From Hannes and Shaun. - Multi-connection support for nbd. From Josef. - Cleanup of request and bio flags, so we have a clear split between which are bio (or rq) private, and which ones are shared. From Christoph. - A set of patches from Bart, that improve how we handle queue stopping and starting in blk-mq. - Support for WRITE_ZEROES from Chaitanya. - Lightnvm updates from Javier/Matias. - Supoort for FC for the nvme-over-fabrics code. From James Smart. - A bunch of fixes from a whole slew of people, too many to name here" * 'for-4.10/block' of git://git.kernel.dk/linux-block: (182 commits) blk-stat: fix a few cases of missing batch flushing blk-flush: run the queue when inserting blk-mq flush elevator: make the rqhash helpers exported blk-mq: abstract out blk_mq_dispatch_rq_list() helper blk-mq: add blk_mq_start_stopped_hw_queue() block: improve handling of the magic discard payload blk-wbt: don't throttle discard or write zeroes nbd: use dev_err_ratelimited in io path nbd: reset the setup task for NBD_CLEAR_SOCK nvme-fabrics: Add FC LLDD loopback driver to test FC-NVME nvme-fabrics: Add target support for FC transport nvme-fabrics: Add host support for FC transport nvme-fabrics: Add FC transport LLDD api definitions nvme-fabrics: Add FC transport FC-NVME definitions nvme-fabrics: Add FC transport error codes to nvme.h Add type 0x28 NVME type code to scsi fc headers nvme-fabrics: patch target code in prep for FC transport support nvme-fabrics: set sqe.command_id in core not transports parser: add u64 number parser nvme-rdma: align to generic ib_event logging helper ...
Diffstat (limited to 'block/blk-sysfs.c')
-rw-r--r--block/blk-sysfs.c190
1 files changed, 190 insertions, 0 deletions
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index ea374e820775..1dbce057592d 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -13,6 +13,7 @@
#include "blk.h"
#include "blk-mq.h"
+#include "blk-wbt.h"
struct queue_sysfs_entry {
struct attribute attr;
@@ -41,6 +42,19 @@ queue_var_store(unsigned long *var, const char *page, size_t count)
return count;
}
+static ssize_t queue_var_store64(s64 *var, const char *page)
+{
+ int err;
+ s64 v;
+
+ err = kstrtos64(page, 10, &v);
+ if (err < 0)
+ return err;
+
+ *var = v;
+ return 0;
+}
+
static ssize_t queue_requests_show(struct request_queue *q, char *page)
{
return queue_var_show(q->nr_requests, (page));
@@ -130,6 +144,11 @@ static ssize_t queue_physical_block_size_show(struct request_queue *q, char *pag
return queue_var_show(queue_physical_block_size(q), page);
}
+static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
+{
+ return queue_var_show(q->limits.chunk_sectors, page);
+}
+
static ssize_t queue_io_min_show(struct request_queue *q, char *page)
{
return queue_var_show(queue_io_min(q), page);
@@ -192,6 +211,11 @@ static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
(unsigned long long)q->limits.max_write_same_sectors << 9);
}
+static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
+{
+ return sprintf(page, "%llu\n",
+ (unsigned long long)q->limits.max_write_zeroes_sectors << 9);
+}
static ssize_t
queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
@@ -258,6 +282,18 @@ QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
#undef QUEUE_SYSFS_BIT_FNS
+static ssize_t queue_zoned_show(struct request_queue *q, char *page)
+{
+ switch (blk_queue_zoned_model(q)) {
+ case BLK_ZONED_HA:
+ return sprintf(page, "host-aware\n");
+ case BLK_ZONED_HM:
+ return sprintf(page, "host-managed\n");
+ default:
+ return sprintf(page, "none\n");
+ }
+}
+
static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
{
return queue_var_show((blk_queue_nomerges(q) << 1) |
@@ -320,6 +356,38 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
return ret;
}
+static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
+{
+ int val;
+
+ if (q->poll_nsec == -1)
+ val = -1;
+ else
+ val = q->poll_nsec / 1000;
+
+ return sprintf(page, "%d\n", val);
+}
+
+static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
+ size_t count)
+{
+ int err, val;
+
+ if (!q->mq_ops || !q->mq_ops->poll)
+ return -EINVAL;
+
+ err = kstrtoint(page, 10, &val);
+ if (err < 0)
+ return err;
+
+ if (val == -1)
+ q->poll_nsec = -1;
+ else
+ q->poll_nsec = val * 1000;
+
+ return count;
+}
+
static ssize_t queue_poll_show(struct request_queue *q, char *page)
{
return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
@@ -348,6 +416,50 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page,
return ret;
}
+static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
+{
+ if (!q->rq_wb)
+ return -EINVAL;
+
+ return sprintf(page, "%llu\n", div_u64(q->rq_wb->min_lat_nsec, 1000));
+}
+
+static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
+ size_t count)
+{
+ struct rq_wb *rwb;
+ ssize_t ret;
+ s64 val;
+
+ ret = queue_var_store64(&val, page);
+ if (ret < 0)
+ return ret;
+ if (val < -1)
+ return -EINVAL;
+
+ rwb = q->rq_wb;
+ if (!rwb) {
+ ret = wbt_init(q);
+ if (ret)
+ return ret;
+
+ rwb = q->rq_wb;
+ if (!rwb)
+ return -EINVAL;
+ }
+
+ if (val == -1)
+ rwb->min_lat_nsec = wbt_default_latency_nsec(q);
+ else if (val >= 0)
+ rwb->min_lat_nsec = val * 1000ULL;
+
+ if (rwb->enable_state == WBT_STATE_ON_DEFAULT)
+ rwb->enable_state = WBT_STATE_ON_MANUAL;
+
+ wbt_update_limits(rwb);
+ return count;
+}
+
static ssize_t queue_wc_show(struct request_queue *q, char *page)
{
if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
@@ -385,6 +497,26 @@ static ssize_t queue_dax_show(struct request_queue *q, char *page)
return queue_var_show(blk_queue_dax(q), page);
}
+static ssize_t print_stat(char *page, struct blk_rq_stat *stat, const char *pre)
+{
+ return sprintf(page, "%s samples=%llu, mean=%lld, min=%lld, max=%lld\n",
+ pre, (long long) stat->nr_samples,
+ (long long) stat->mean, (long long) stat->min,
+ (long long) stat->max);
+}
+
+static ssize_t queue_stats_show(struct request_queue *q, char *page)
+{
+ struct blk_rq_stat stat[2];
+ ssize_t ret;
+
+ blk_queue_stat_get(q, stat);
+
+ ret = print_stat(page, &stat[BLK_STAT_READ], "read :");
+ ret += print_stat(page + ret, &stat[BLK_STAT_WRITE], "write:");
+ return ret;
+}
+
static struct queue_sysfs_entry queue_requests_entry = {
.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
.show = queue_requests_show,
@@ -444,6 +576,11 @@ static struct queue_sysfs_entry queue_physical_block_size_entry = {
.show = queue_physical_block_size_show,
};
+static struct queue_sysfs_entry queue_chunk_sectors_entry = {
+ .attr = {.name = "chunk_sectors", .mode = S_IRUGO },
+ .show = queue_chunk_sectors_show,
+};
+
static struct queue_sysfs_entry queue_io_min_entry = {
.attr = {.name = "minimum_io_size", .mode = S_IRUGO },
.show = queue_io_min_show,
@@ -480,12 +617,22 @@ static struct queue_sysfs_entry queue_write_same_max_entry = {
.show = queue_write_same_max_show,
};
+static struct queue_sysfs_entry queue_write_zeroes_max_entry = {
+ .attr = {.name = "write_zeroes_max_bytes", .mode = S_IRUGO },
+ .show = queue_write_zeroes_max_show,
+};
+
static struct queue_sysfs_entry queue_nonrot_entry = {
.attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
.show = queue_show_nonrot,
.store = queue_store_nonrot,
};
+static struct queue_sysfs_entry queue_zoned_entry = {
+ .attr = {.name = "zoned", .mode = S_IRUGO },
+ .show = queue_zoned_show,
+};
+
static struct queue_sysfs_entry queue_nomerges_entry = {
.attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
.show = queue_nomerges_show,
@@ -516,6 +663,12 @@ static struct queue_sysfs_entry queue_poll_entry = {
.store = queue_poll_store,
};
+static struct queue_sysfs_entry queue_poll_delay_entry = {
+ .attr = {.name = "io_poll_delay", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_poll_delay_show,
+ .store = queue_poll_delay_store,
+};
+
static struct queue_sysfs_entry queue_wc_entry = {
.attr = {.name = "write_cache", .mode = S_IRUGO | S_IWUSR },
.show = queue_wc_show,
@@ -527,6 +680,17 @@ static struct queue_sysfs_entry queue_dax_entry = {
.show = queue_dax_show,
};
+static struct queue_sysfs_entry queue_stats_entry = {
+ .attr = {.name = "stats", .mode = S_IRUGO },
+ .show = queue_stats_show,
+};
+
+static struct queue_sysfs_entry queue_wb_lat_entry = {
+ .attr = {.name = "wbt_lat_usec", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_wb_lat_show,
+ .store = queue_wb_lat_store,
+};
+
static struct attribute *default_attrs[] = {
&queue_requests_entry.attr,
&queue_ra_entry.attr,
@@ -539,6 +703,7 @@ static struct attribute *default_attrs[] = {
&queue_hw_sector_size_entry.attr,
&queue_logical_block_size_entry.attr,
&queue_physical_block_size_entry.attr,
+ &queue_chunk_sectors_entry.attr,
&queue_io_min_entry.attr,
&queue_io_opt_entry.attr,
&queue_discard_granularity_entry.attr,
@@ -546,7 +711,9 @@ static struct attribute *default_attrs[] = {
&queue_discard_max_hw_entry.attr,
&queue_discard_zeroes_data_entry.attr,
&queue_write_same_max_entry.attr,
+ &queue_write_zeroes_max_entry.attr,
&queue_nonrot_entry.attr,
+ &queue_zoned_entry.attr,
&queue_nomerges_entry.attr,
&queue_rq_affinity_entry.attr,
&queue_iostats_entry.attr,
@@ -554,6 +721,9 @@ static struct attribute *default_attrs[] = {
&queue_poll_entry.attr,
&queue_wc_entry.attr,
&queue_dax_entry.attr,
+ &queue_stats_entry.attr,
+ &queue_wb_lat_entry.attr,
+ &queue_poll_delay_entry.attr,
NULL,
};
@@ -628,6 +798,7 @@ static void blk_release_queue(struct kobject *kobj)
struct request_queue *q =
container_of(kobj, struct request_queue, kobj);
+ wbt_exit(q);
bdi_exit(&q->backing_dev_info);
blkcg_exit_queue(q);
@@ -668,6 +839,23 @@ struct kobj_type blk_queue_ktype = {
.release = blk_release_queue,
};
+static void blk_wb_init(struct request_queue *q)
+{
+#ifndef CONFIG_BLK_WBT_MQ
+ if (q->mq_ops)
+ return;
+#endif
+#ifndef CONFIG_BLK_WBT_SQ
+ if (q->request_fn)
+ return;
+#endif
+
+ /*
+ * If this fails, we don't get throttling
+ */
+ wbt_init(q);
+}
+
int blk_register_queue(struct gendisk *disk)
{
int ret;
@@ -707,6 +895,8 @@ int blk_register_queue(struct gendisk *disk)
if (q->mq_ops)
blk_mq_register_dev(dev, q);
+ blk_wb_init(q);
+
if (!q->request_fn)
return 0;