summaryrefslogtreecommitdiff
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-11-13 21:32:10 -0700
committerJens Axboe <axboe@kernel.dk>2018-11-26 08:25:57 -0700
commitaa61bec30eca11816789dc25c2090366b0ccfaf8 (patch)
tree90fd99996f94dfa0cae4ee03c2fe8fce3f3329ee /block/blk-mq.c
parent0a1b8b87d064a47fad9ec475316002da28559207 (diff)
blk-mq: ensure mq_ops ->poll() is entered at least once
Right now we immediately bail if need_resched() is true, but we need to do at least one loop in case we have entries waiting. So just invert the need_resched() check, putting it at the bottom of the loop. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index c2751f0a3ccc..ba3c7b6476b7 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -3375,7 +3375,7 @@ static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
hctx->poll_considered++;
state = current->state;
- while (!need_resched()) {
+ do {
int ret;
hctx->poll_invoked++;
@@ -3395,7 +3395,7 @@ static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
if (ret < 0 || !spin)
break;
cpu_relax();
- }
+ } while (!need_resched());
__set_current_state(TASK_RUNNING);
return 0;