summaryrefslogtreecommitdiff
path: root/block/blk-core.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c37
1 files changed, 11 insertions, 26 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 651057c4146b..17667159482e 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -37,7 +37,6 @@
#include <linux/t10-pi.h>
#include <linux/debugfs.h>
#include <linux/bpf.h>
-#include <linux/psi.h>
#include <linux/part_stat.h>
#include <linux/sched/sysctl.h>
#include <linux/blk-crypto.h>
@@ -487,18 +486,15 @@ static int __init fail_make_request_debugfs(void)
late_initcall(fail_make_request_debugfs);
#endif /* CONFIG_FAIL_MAKE_REQUEST */
-static inline bool bio_check_ro(struct bio *bio)
+static inline void bio_check_ro(struct bio *bio)
{
if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
- return false;
+ return;
pr_warn("Trying to write to read-only block-device %pg\n",
bio->bi_bdev);
/* Older lvm-tools actually trigger this */
- return false;
}
-
- return false;
}
static noinline int should_fail_bio(struct bio *bio)
@@ -717,13 +713,12 @@ void submit_bio_noacct(struct bio *bio)
* For a REQ_NOWAIT based request, return -EOPNOTSUPP
* if queue does not support NOWAIT.
*/
- if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q))
+ if ((bio->bi_opf & REQ_NOWAIT) && !bdev_nowait(bdev))
goto not_supported;
if (should_fail_bio(bio))
goto end_io;
- if (unlikely(bio_check_ro(bio)))
- goto end_io;
+ bio_check_ro(bio);
if (!bio_flagged(bio, BIO_REMAPPED)) {
if (unlikely(bio_check_eod(bio)))
goto end_io;
@@ -814,7 +809,7 @@ EXPORT_SYMBOL(submit_bio_noacct);
*
* The success/failure status of the request, along with notification of
* completion, is delivered asynchronously through the ->bi_end_io() callback
- * in @bio. The bio must NOT be touched by thecaller until ->bi_end_io() has
+ * in @bio. The bio must NOT be touched by the caller until ->bi_end_io() has
* been called.
*/
void submit_bio(struct bio *bio)
@@ -829,22 +824,6 @@ void submit_bio(struct bio *bio)
count_vm_events(PGPGOUT, bio_sectors(bio));
}
- /*
- * If we're reading data that is part of the userspace workingset, count
- * submission time as memory stall. When the device is congested, or
- * the submitting cgroup IO-throttled, submission can be a significant
- * part of overall IO time.
- */
- if (unlikely(bio_op(bio) == REQ_OP_READ &&
- bio_flagged(bio, BIO_WORKINGSET))) {
- unsigned long pflags;
-
- psi_memstall_enter(&pflags);
- submit_bio_noacct(bio);
- psi_memstall_leave(&pflags);
- return;
- }
-
submit_bio_noacct(bio);
}
EXPORT_SYMBOL(submit_bio);
@@ -871,6 +850,12 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
return 0;
+ /*
+ * As the requests that require a zone lock are not plugged in the
+ * first place, directly accessing the plug instead of using
+ * blk_mq_plug() should not have any consequences during flushing for
+ * zoned devices.
+ */
blk_flush_plug(current->plug, false);
if (bio_queue_enter(bio))