summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2015-11-05 10:44:55 -0700
committerJens Axboe <axboe@fb.com>2015-11-07 10:40:47 -0700
commit05229beeddf7e75e2e616ddaad4b70e7fca9528d (patch)
tree2f8c5efffcdba6f6fd113960da9009979cf679e4 /block
parent7b371636fb6d187873d9d2730c2b1febc48a9b47 (diff)
block: add block polling support
Add basic support for polling for specific IO to complete. This uses the cookie that blk-mq passes back, which enables the block layer to pass this cookie to the driver to spin for a specific request. This will be combined with request latency tracking, so we can make qualified decisions about when to poll and when not to. For now, for benchmark purposes, we add a sysfs file that controls whether polling is enabled or not. Signed-off-by: Jens Axboe <axboe@fb.com> Acked-by: Christoph Hellwig <hch@lst.de> Acked-by: Keith Busch <keith.busch@intel.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c41
-rw-r--r--block/blk-mq-sysfs.c10
-rw-r--r--block/blk-sysfs.c35
3 files changed, 86 insertions, 0 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index e93df6d386a0..fa36b4ff7d63 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -3312,6 +3312,47 @@ void blk_finish_plug(struct blk_plug *plug)
}
EXPORT_SYMBOL(blk_finish_plug);
+bool blk_poll(struct request_queue *q, blk_qc_t cookie)
+{
+ struct blk_plug *plug;
+ long state;
+
+ if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) ||
+ !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
+ return false;
+
+ plug = current->plug;
+ if (plug)
+ blk_flush_plug_list(plug, false);
+
+ state = current->state;
+ while (!need_resched()) {
+ unsigned int queue_num = blk_qc_t_to_queue_num(cookie);
+ struct blk_mq_hw_ctx *hctx = q->queue_hw_ctx[queue_num];
+ int ret;
+
+ hctx->poll_invoked++;
+
+ ret = q->mq_ops->poll(hctx, blk_qc_t_to_tag(cookie));
+ if (ret > 0) {
+ hctx->poll_success++;
+ set_current_state(TASK_RUNNING);
+ return true;
+ }
+
+ if (signal_pending_state(state, current))
+ set_current_state(TASK_RUNNING);
+
+ if (current->state == TASK_RUNNING)
+ return true;
+ if (ret < 0)
+ break;
+ cpu_relax();
+ }
+
+ return false;
+}
+
#ifdef CONFIG_PM
/**
* blk_pm_runtime_init - Block layer runtime PM initialization routine
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 6f57a110289c..1cf18784c5cf 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -174,6 +174,11 @@ static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page)
return ret;
}
+static ssize_t blk_mq_hw_sysfs_poll_show(struct blk_mq_hw_ctx *hctx, char *page)
+{
+ return sprintf(page, "invoked=%lu, success=%lu\n", hctx->poll_invoked, hctx->poll_success);
+}
+
static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx,
char *page)
{
@@ -295,6 +300,10 @@ static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
.attr = {.name = "cpu_list", .mode = S_IRUGO },
.show = blk_mq_hw_sysfs_cpus_show,
};
+static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_poll = {
+ .attr = {.name = "io_poll", .mode = S_IRUGO },
+ .show = blk_mq_hw_sysfs_poll_show,
+};
static struct attribute *default_hw_ctx_attrs[] = {
&blk_mq_hw_sysfs_queued.attr,
@@ -304,6 +313,7 @@ static struct attribute *default_hw_ctx_attrs[] = {
&blk_mq_hw_sysfs_tags.attr,
&blk_mq_hw_sysfs_cpus.attr,
&blk_mq_hw_sysfs_active.attr,
+ &blk_mq_hw_sysfs_poll.attr,
NULL,
};
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 31849e328b45..565b8dac5782 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -317,6 +317,34 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
return ret;
}
+static ssize_t queue_poll_show(struct request_queue *q, char *page)
+{
+ return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
+}
+
+static ssize_t queue_poll_store(struct request_queue *q, const char *page,
+ size_t count)
+{
+ unsigned long poll_on;
+ ssize_t ret;
+
+ if (!q->mq_ops || !q->mq_ops->poll)
+ return -EINVAL;
+
+ ret = queue_var_store(&poll_on, page, count);
+ if (ret < 0)
+ return ret;
+
+ spin_lock_irq(q->queue_lock);
+ if (poll_on)
+ queue_flag_set(QUEUE_FLAG_POLL, q);
+ else
+ queue_flag_clear(QUEUE_FLAG_POLL, q);
+ spin_unlock_irq(q->queue_lock);
+
+ return ret;
+}
+
static struct queue_sysfs_entry queue_requests_entry = {
.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
.show = queue_requests_show,
@@ -442,6 +470,12 @@ static struct queue_sysfs_entry queue_random_entry = {
.store = queue_store_random,
};
+static struct queue_sysfs_entry queue_poll_entry = {
+ .attr = {.name = "io_poll", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_poll_show,
+ .store = queue_poll_store,
+};
+
static struct attribute *default_attrs[] = {
&queue_requests_entry.attr,
&queue_ra_entry.attr,
@@ -466,6 +500,7 @@ static struct attribute *default_attrs[] = {
&queue_rq_affinity_entry.attr,
&queue_iostats_entry.attr,
&queue_random_entry.attr,
+ &queue_poll_entry.attr,
NULL,
};