summaryrefslogtreecommitdiff
path: root/block/fops.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2021-10-12 13:12:19 +0200
committerJens Axboe <axboe@kernel.dk>2021-10-18 06:17:36 -0600
commitef99b2d37666b7a600baab9e1c4944436652b0a2 (patch)
tree81457d0c9620f8c7311bf76d532670fa5d53074f /block/fops.c
parent28a1ae6b9daba6ac65700eeb38479bd6fadec089 (diff)
block: replace the spin argument to blk_iopoll with a flags argument
Switch the boolean spin argument to blk_poll to passing a set of flags instead. This will allow to control polling behavior in a more fine grained way. Signed-off-by: Christoph Hellwig <hch@lst.de> Tested-by: Mark Wunderlich <mark.wunderlich@intel.com> Link: https://lore.kernel.org/r/20211012111226.760968-10-hch@lst.de [axboe: adapt to changed io_uring iopoll] Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/fops.c')
-rw-r--r--block/fops.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/block/fops.c b/block/fops.c
index 15324f2e5a91..db8f2fe68dd2 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -108,7 +108,7 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
if (!READ_ONCE(bio.bi_private))
break;
if (!(iocb->ki_flags & IOCB_HIPRI) ||
- !blk_poll(bdev_get_queue(bdev), qc, true))
+ !blk_poll(bdev_get_queue(bdev), qc, 0))
blk_io_schedule();
}
__set_current_state(TASK_RUNNING);
@@ -141,12 +141,12 @@ struct blkdev_dio {
static struct bio_set blkdev_dio_pool;
-static int blkdev_iopoll(struct kiocb *kiocb, bool wait)
+static int blkdev_iopoll(struct kiocb *kiocb, unsigned int flags)
{
struct block_device *bdev = I_BDEV(kiocb->ki_filp->f_mapping->host);
struct request_queue *q = bdev_get_queue(bdev);
- return blk_poll(q, READ_ONCE(kiocb->ki_cookie), wait);
+ return blk_poll(q, READ_ONCE(kiocb->ki_cookie), flags);
}
static void blkdev_bio_end_io(struct bio *bio)
@@ -297,7 +297,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
if (!READ_ONCE(dio->waiter))
break;
- if (!do_poll || !blk_poll(bdev_get_queue(bdev), qc, true))
+ if (!do_poll || !blk_poll(bdev_get_queue(bdev), qc, 0))
blk_io_schedule();
}
__set_current_state(TASK_RUNNING);