summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-12-02 17:46:26 +0100
committerJens Axboe <axboe@kernel.dk>2018-12-04 11:38:19 -0700
commit529262d56dbebe6a26df5d2fd24cc0e1bc8579e5 (patch)
tree6040d2d4f2faa36d324559ea7fd2916b73130e1c /block
parent9d6610b76fa374eae3deb93bcbace4a06c2e3b95 (diff)
block: remove ->poll_fn
This was intended to support users like nvme multipath, but is just getting in the way and adding another indirect call. Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c23
-rw-r--r--block/blk-mq.c24
2 files changed, 19 insertions, 28 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index a1a5e1c14898..ad59102ee30a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1251,29 +1251,6 @@ blk_qc_t submit_bio(struct bio *bio)
EXPORT_SYMBOL(submit_bio);
/**
- * blk_poll - poll for IO completions
- * @q: the queue
- * @cookie: cookie passed back at IO submission time
- * @spin: whether to spin for completions
- *
- * Description:
- * Poll for completions on the passed in queue. Returns number of
- * completed entries found. If @spin is true, then blk_poll will continue
- * looping until at least one completion is found, unless the task is
- * otherwise marked running (or we need to reschedule).
- */
-int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
-{
- if (!q->poll_fn || !blk_qc_t_valid(cookie))
- return 0;
-
- if (current->plug)
- blk_flush_plug_list(current->plug, false);
- return q->poll_fn(q, cookie, spin);
-}
-EXPORT_SYMBOL_GPL(blk_poll);
-
-/**
* blk_cloned_rq_check_limits - Helper function to check a cloned request
* for new the queue limits
* @q: the queue
diff --git a/block/blk-mq.c b/block/blk-mq.c
index e09d7f500077..50d529602e05 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -38,7 +38,6 @@
#include "blk-mq-sched.h"
#include "blk-rq-qos.h"
-static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, bool spin);
static void blk_mq_poll_stats_start(struct request_queue *q);
static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
@@ -2838,8 +2837,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
spin_lock_init(&q->requeue_lock);
blk_queue_make_request(q, blk_mq_make_request);
- if (q->mq_ops->poll)
- q->poll_fn = blk_mq_poll;
/*
* Do this after blk_queue_make_request() overrides it...
@@ -3400,14 +3397,30 @@ static bool blk_mq_poll_hybrid(struct request_queue *q,
return blk_mq_poll_hybrid_sleep(q, hctx, rq);
}
-static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
+/**
+ * blk_poll - poll for IO completions
+ * @q: the queue
+ * @cookie: cookie passed back at IO submission time
+ * @spin: whether to spin for completions
+ *
+ * Description:
+ * Poll for completions on the passed in queue. Returns number of
+ * completed entries found. If @spin is true, then blk_poll will continue
+ * looping until at least one completion is found, unless the task is
+ * otherwise marked running (or we need to reschedule).
+ */
+int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
{
struct blk_mq_hw_ctx *hctx;
long state;
- if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
+ if (!blk_qc_t_valid(cookie) ||
+ !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
return 0;
+ if (current->plug)
+ blk_flush_plug_list(current->plug, false);
+
hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
/*
@@ -3448,6 +3461,7 @@ static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
__set_current_state(TASK_RUNNING);
return 0;
}
+EXPORT_SYMBOL_GPL(blk_poll);
unsigned int blk_mq_rq_cpu(struct request *rq)
{