From 5091cdec56faeaefa79de4b6cb3c3c55e50d1ac3 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Fri, 18 Sep 2020 20:22:30 -0400 Subject: dm: change max_io_len() to use blk_max_size_offset() Using blk_max_size_offset() enables DM core's splitting to impose ti->max_io_len (via q->limits.chunk_sectors) and also fallback to respecting q->limits.max_sectors if chunk_sectors isn't set. Signed-off-by: Mike Snitzer --- drivers/md/dm.c | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) (limited to 'drivers/md/dm.c') diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 977a962fa0bb..82886b4edab8 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1051,22 +1051,18 @@ static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti static sector_t max_io_len(sector_t sector, struct dm_target *ti) { sector_t len = max_io_len_target_boundary(sector, ti); - sector_t offset, max_len; + sector_t max_len; /* * Does the target need to split even further? + * - q->limits.chunk_sectors reflects ti->max_io_len so + * blk_max_size_offset() provides required splitting. + * - blk_max_size_offset() also respects q->limits.max_sectors */ - if (ti->max_io_len) { - offset = dm_target_offset(ti, sector); - if (unlikely(ti->max_io_len & (ti->max_io_len - 1))) - max_len = sector_div(offset, ti->max_io_len); - else - max_len = offset & (ti->max_io_len - 1); - max_len = ti->max_io_len - max_len; - - if (len > max_len) - len = max_len; - } + max_len = blk_max_size_offset(dm_table_get_md(ti->table)->queue, + dm_target_offset(ti, sector)); + if (len > max_len) + len = max_len; return len; } -- cgit From 094ee64d7de8ab72b495ff9c03d86a60272da56d Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Mon, 14 Sep 2020 13:50:49 -0400 Subject: dm: push md->immutable_target optimization down to __process_bio() Also, update associated stale comment in __bind(). Signed-off-by: Mike Snitzer --- drivers/md/dm.c | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) (limited to 'drivers/md/dm.c') diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 82886b4edab8..e1cb3b9fd207 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1680,7 +1680,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md, * fact that targets that use it do _not_ have a need to split bios. */ static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map, - struct bio *bio, struct dm_target *ti) + struct bio *bio) { struct clone_info ci; blk_qc_t ret = BLK_QC_T_NONE; @@ -1705,6 +1705,12 @@ static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map, /* dec_pending submits any data associated with flush */ } else { struct dm_target_io *tio; + struct dm_target *ti = md->immutable_target; + + if (WARN_ON_ONCE(!ti)) { + error = -EIO; + goto out; + } ci.bio = bio; ci.sector_count = bio_sectors(bio); @@ -1724,21 +1730,12 @@ static blk_qc_t dm_process_bio(struct mapped_device *md, struct dm_table *map, struct bio *bio) { blk_qc_t ret = BLK_QC_T_NONE; - struct dm_target *ti = md->immutable_target; if (unlikely(!map)) { bio_io_error(bio); return ret; } - if (!ti) { - ti = dm_table_find_target(map, bio->bi_iter.bi_sector); - if (unlikely(!ti)) { - bio_io_error(bio); - return ret; - } - } - /* * If in ->submit_bio we need to use blk_queue_split(), otherwise * queue_limits for abnormal requests (e.g. discard, writesame, etc) @@ -1753,7 +1750,7 @@ static blk_qc_t dm_process_bio(struct mapped_device *md, } if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED) - return __process_bio(md, map, bio, ti); + return __process_bio(md, map, bio); return __split_and_process_bio(md, map, bio); } @@ -2120,8 +2117,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, /* * Leverage the fact that request-based DM targets and * NVMe bio based targets are immutable singletons - * - used to optimize both dm_request_fn and dm_mq_queue_rq; - * and __process_bio. + * - used to optimize both __process_bio and dm_mq_queue_rq */ md->immutable_target = dm_table_get_immutable_target(t); } -- cgit From 3720281db9ad4905c3afc1bf389314d64e145093 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Sat, 19 Sep 2020 13:12:48 -0400 Subject: dm: optimize max_io_len() by inlining max_io_len_target_boundary() Saves redundant dm_target_offset() math. Also, reverse argument order for max_io_len() to be consistent with other similar functions. Signed-off-by: Mike Snitzer --- drivers/md/dm.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) (limited to 'drivers/md/dm.c') diff --git a/drivers/md/dm.c b/drivers/md/dm.c index e1cb3b9fd207..0d3639414be7 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1041,16 +1041,16 @@ static void clone_endio(struct bio *bio) * Return maximum size of I/O possible at the supplied sector up to the current * target boundary. */ -static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) +static inline sector_t max_io_len_target_boundary(struct dm_target *ti, + sector_t target_offset) { - sector_t target_offset = dm_target_offset(ti, sector); - return ti->len - target_offset; } -static sector_t max_io_len(sector_t sector, struct dm_target *ti) +static sector_t max_io_len(struct dm_target *ti, sector_t sector) { - sector_t len = max_io_len_target_boundary(sector, ti); + sector_t target_offset = dm_target_offset(ti, sector); + sector_t len = max_io_len_target_boundary(ti, target_offset); sector_t max_len; /* @@ -1060,7 +1060,7 @@ static sector_t max_io_len(sector_t sector, struct dm_target *ti) * - blk_max_size_offset() also respects q->limits.max_sectors */ max_len = blk_max_size_offset(dm_table_get_md(ti->table)->queue, - dm_target_offset(ti, sector)); + target_offset); if (len > max_len) len = max_len; @@ -1115,7 +1115,7 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, goto out; if (!ti->type->direct_access) goto out; - len = max_io_len(sector, ti) / PAGE_SECTORS; + len = max_io_len(ti, sector) / PAGE_SECTORS; if (len < 1) goto out; nr_pages = min(len, nr_pages); @@ -1497,7 +1497,8 @@ static int __send_changing_extent_only(struct clone_info *ci, struct dm_target * if (!num_bios) return -EOPNOTSUPP; - len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); + len = min_t(sector_t, ci->sector_count, + max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector))); __send_duplicate_bios(ci, ti, num_bios, &len); @@ -1578,7 +1579,7 @@ static int __split_and_process_non_flush(struct clone_info *ci) if (__process_abnormal_io(ci, ti, &r)) return r; - len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count); + len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count); r = __clone_and_map_data_bio(ci, ti, ci->sector, &len); if (r < 0) -- cgit From 828678b87eff06a4fff1e13e8b107287d522ed30 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Mon, 14 Sep 2020 13:59:53 -0400 Subject: dm: push use of on-stack flush_bio down to __send_empty_flush() Eliminates duplicate code, no functional change. Signed-off-by: Mike Snitzer --- drivers/md/dm.c | 37 +++++++++++++------------------------ 1 file changed, 13 insertions(+), 24 deletions(-) (limited to 'drivers/md/dm.c') diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 0d3639414be7..ea901cb09ea7 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1426,6 +1426,17 @@ static int __send_empty_flush(struct clone_info *ci) { unsigned target_nr = 0; struct dm_target *ti; + struct bio flush_bio; + + /* + * Use an on-stack bio for this, it's safe since we don't + * need to reference it after submit. It's just used as + * the basis for the clone(s). + */ + bio_init(&flush_bio, NULL, 0); + flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; + ci->bio = &flush_bio; + ci->sector_count = 0; /* * Empty flush uses a statically initialized bio, as the base for @@ -1439,6 +1450,8 @@ static int __send_empty_flush(struct clone_info *ci) BUG_ON(bio_has_data(ci->bio)); while ((ti = dm_table_get_target(ci->map, target_nr++))) __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); + + bio_uninit(ci->bio); return 0; } @@ -1615,19 +1628,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md, init_clone_info(&ci, md, map, bio); if (bio->bi_opf & REQ_PREFLUSH) { - struct bio flush_bio; - - /* - * Use an on-stack bio for this, it's safe since we don't - * need to reference it after submit. It's just used as - * the basis for the clone(s). - */ - bio_init(&flush_bio, NULL, 0); - flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; - ci.bio = &flush_bio; - ci.sector_count = 0; error = __send_empty_flush(&ci); - bio_uninit(ci.bio); /* dec_pending submits any data associated with flush */ } else if (op_is_zone_mgmt(bio_op(bio))) { ci.bio = bio; @@ -1690,19 +1691,7 @@ static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map, init_clone_info(&ci, md, map, bio); if (bio->bi_opf & REQ_PREFLUSH) { - struct bio flush_bio; - - /* - * Use an on-stack bio for this, it's safe since we don't - * need to reference it after submit. It's just used as - * the basis for the clone(s). - */ - bio_init(&flush_bio, NULL, 0); - flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; - ci.bio = &flush_bio; - ci.sector_count = 0; error = __send_empty_flush(&ci); - bio_uninit(ci.bio); /* dec_pending submits any data associated with flush */ } else { struct dm_target_io *tio; -- cgit From 9679b5a7ec400f18f1812339b59c94750db48a76 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Tue, 15 Sep 2020 21:56:29 -0400 Subject: dm: simplify __process_abnormal_io() Only call bio_op() once in switch statement. Also remove the excessive factoring out to one line functions. Signed-off-by: Mike Snitzer --- drivers/md/dm.c | 68 +++++++++++++++------------------------------------------ 1 file changed, 17 insertions(+), 51 deletions(-) (limited to 'drivers/md/dm.c') diff --git a/drivers/md/dm.c b/drivers/md/dm.c index ea901cb09ea7..f7184b3dca66 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1474,28 +1474,6 @@ static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, return 0; } -typedef unsigned (*get_num_bios_fn)(struct dm_target *ti); - -static unsigned get_num_discard_bios(struct dm_target *ti) -{ - return ti->num_discard_bios; -} - -static unsigned get_num_secure_erase_bios(struct dm_target *ti) -{ - return ti->num_secure_erase_bios; -} - -static unsigned get_num_write_same_bios(struct dm_target *ti) -{ - return ti->num_write_same_bios; -} - -static unsigned get_num_write_zeroes_bios(struct dm_target *ti) -{ - return ti->num_write_zeroes_bios; -} - static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, unsigned num_bios) { @@ -1521,26 +1499,6 @@ static int __send_changing_extent_only(struct clone_info *ci, struct dm_target * return 0; } -static int __send_discard(struct clone_info *ci, struct dm_target *ti) -{ - return __send_changing_extent_only(ci, ti, get_num_discard_bios(ti)); -} - -static int __send_secure_erase(struct clone_info *ci, struct dm_target *ti) -{ - return __send_changing_extent_only(ci, ti, get_num_secure_erase_bios(ti)); -} - -static int __send_write_same(struct clone_info *ci, struct dm_target *ti) -{ - return __send_changing_extent_only(ci, ti, get_num_write_same_bios(ti)); -} - -static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti) -{ - return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios(ti)); -} - static bool is_abnormal_io(struct bio *bio) { bool r = false; @@ -1561,18 +1519,26 @@ static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti, int *result) { struct bio *bio = ci->bio; + unsigned num_bios = 0; - if (bio_op(bio) == REQ_OP_DISCARD) - *result = __send_discard(ci, ti); - else if (bio_op(bio) == REQ_OP_SECURE_ERASE) - *result = __send_secure_erase(ci, ti); - else if (bio_op(bio) == REQ_OP_WRITE_SAME) - *result = __send_write_same(ci, ti); - else if (bio_op(bio) == REQ_OP_WRITE_ZEROES) - *result = __send_write_zeroes(ci, ti); - else + switch (bio_op(bio)) { + case REQ_OP_DISCARD: + num_bios = ti->num_discard_bios; + break; + case REQ_OP_SECURE_ERASE: + num_bios = ti->num_secure_erase_bios; + break; + case REQ_OP_WRITE_SAME: + num_bios = ti->num_write_same_bios; + break; + case REQ_OP_WRITE_ZEROES: + num_bios = ti->num_write_zeroes_bios; + break; + default: return false; + } + *result = __send_changing_extent_only(ci, ti, num_bios); return true; } -- cgit From 7465d7ac50edb3158c5eb957c5ecd3a5310e1c68 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Thu, 17 Sep 2020 12:59:36 -0400 Subject: dm: eliminate need for start_io_acct() forward declaration Signed-off-by: Mike Snitzer --- drivers/md/dm.c | 78 ++++++++++++++++++++++++++++----------------------------- 1 file changed, 38 insertions(+), 40 deletions(-) (limited to 'drivers/md/dm.c') diff --git a/drivers/md/dm.c b/drivers/md/dm.c index f7184b3dca66..b5b18bafd865 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -591,7 +591,44 @@ out: return r; } -static void start_io_acct(struct dm_io *io); +u64 dm_start_time_ns_from_clone(struct bio *bio) +{ + struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); + struct dm_io *io = tio->io; + + return jiffies_to_nsecs(io->start_time); +} +EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone); + +static void start_io_acct(struct dm_io *io) +{ + struct mapped_device *md = io->md; + struct bio *bio = io->orig_bio; + + io->start_time = bio_start_io_acct(bio); + if (unlikely(dm_stats_used(&md->stats))) + dm_stats_account_io(&md->stats, bio_data_dir(bio), + bio->bi_iter.bi_sector, bio_sectors(bio), + false, 0, &io->stats_aux); +} + +static void end_io_acct(struct dm_io *io) +{ + struct mapped_device *md = io->md; + struct bio *bio = io->orig_bio; + unsigned long duration = jiffies - io->start_time; + + bio_end_io_acct(bio, io->start_time); + + if (unlikely(dm_stats_used(&md->stats))) + dm_stats_account_io(&md->stats, bio_data_dir(bio), + bio->bi_iter.bi_sector, bio_sectors(bio), + true, duration, &io->stats_aux); + + /* nudge anyone waiting on suspend queue */ + if (unlikely(wq_has_sleeper(&md->wait))) + wake_up(&md->wait); +} static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) { @@ -657,45 +694,6 @@ static void free_tio(struct dm_target_io *tio) bio_put(&tio->clone); } -u64 dm_start_time_ns_from_clone(struct bio *bio) -{ - struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); - struct dm_io *io = tio->io; - - return jiffies_to_nsecs(io->start_time); -} -EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone); - -static void start_io_acct(struct dm_io *io) -{ - struct mapped_device *md = io->md; - struct bio *bio = io->orig_bio; - - io->start_time = bio_start_io_acct(bio); - if (unlikely(dm_stats_used(&md->stats))) - dm_stats_account_io(&md->stats, bio_data_dir(bio), - bio->bi_iter.bi_sector, bio_sectors(bio), - false, 0, &io->stats_aux); -} - -static void end_io_acct(struct dm_io *io) -{ - struct mapped_device *md = io->md; - struct bio *bio = io->orig_bio; - unsigned long duration = jiffies - io->start_time; - - bio_end_io_acct(bio, io->start_time); - - if (unlikely(dm_stats_used(&md->stats))) - dm_stats_account_io(&md->stats, bio_data_dir(bio), - bio->bi_iter.bi_sector, bio_sectors(bio), - true, duration, &io->stats_aux); - - /* nudge anyone waiting on suspend queue */ - if (unlikely(wq_has_sleeper(&md->wait))) - wake_up(&md->wait); -} - /* * Add the bio to the list of deferred io. */ -- cgit From 33bd6f0693857492ab19869d79801437ac1e42ba Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Sat, 19 Sep 2020 13:09:11 -0400 Subject: dm table: make 'struct dm_table' definition accessible to all of DM core Move 'struct dm_table' definition from dm-table.c to dm-core.h and update DM core to access its members directly. Helps optimize max_io_len() and other methods slightly. Signed-off-by: Mike Snitzer --- drivers/md/dm.c | 23 ++++------------------- 1 file changed, 4 insertions(+), 19 deletions(-) (limited to 'drivers/md/dm.c') diff --git a/drivers/md/dm.c b/drivers/md/dm.c index b5b18bafd865..a1adcf0ab821 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -422,21 +422,6 @@ static void do_deferred_remove(struct work_struct *w) dm_deferred_remove(); } -sector_t dm_get_size(struct mapped_device *md) -{ - return get_capacity(md->disk); -} - -struct request_queue *dm_get_md_queue(struct mapped_device *md) -{ - return md->queue; -} - -struct dm_stats *dm_get_stats(struct mapped_device *md) -{ - return &md->stats; -} - static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) { struct mapped_device *md = bdev->bd_disk->private_data; @@ -1057,7 +1042,7 @@ static sector_t max_io_len(struct dm_target *ti, sector_t sector) * blk_max_size_offset() provides required splitting. * - blk_max_size_offset() also respects q->limits.max_sectors */ - max_len = blk_max_size_offset(dm_table_get_md(ti->table)->queue, + max_len = blk_max_size_offset(ti->table->md->queue, target_offset); if (len > max_len) len = max_len; @@ -2931,19 +2916,19 @@ int dm_test_deferred_remove_flag(struct mapped_device *md) int dm_suspended(struct dm_target *ti) { - return dm_suspended_md(dm_table_get_md(ti->table)); + return dm_suspended_md(ti->table->md); } EXPORT_SYMBOL_GPL(dm_suspended); int dm_post_suspending(struct dm_target *ti) { - return dm_post_suspending_md(dm_table_get_md(ti->table)); + return dm_post_suspending_md(ti->table->md); } EXPORT_SYMBOL_GPL(dm_post_suspending); int dm_noflush_suspending(struct dm_target *ti) { - return __noflush_suspending(dm_table_get_md(ti->table)); + return __noflush_suspending(ti->table->md); } EXPORT_SYMBOL_GPL(dm_noflush_suspending); -- cgit From 0c2915b8c6db108b1dfb240391cc5a175f97f15b Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Mon, 28 Sep 2020 13:41:36 -0400 Subject: dm: fix missing imposition of queue_limits from dm_wq_work() thread If a DM device was suspended when bios were issued to it, those bios would be deferred using queue_io(). Once the DM device was resumed dm_process_bio() could be called by dm_wq_work() for original bio that still needs splitting. dm_process_bio()'s check for current->bio_list (meaning call chain is within ->submit_bio) as a prerequisite for calling blk_queue_split() for "abnormal IO" would result in dm_process_bio() never imposing corresponding queue_limits (e.g. discard_granularity, discard_max_bytes, etc). Fix this by always having dm_wq_work() resubmit deferred bios using submit_bio_noacct(). Side-effect is blk_queue_split() is always called for "abnormal IO" from ->submit_bio, be it from application thread or dm_wq_work() workqueue, so proper bio splitting and depth-first bio submission is performed. For sake of clarity, remove current->bio_list check before call to blk_queue_split(). Also, remove dm_wq_work()'s use of dm_{get,put}_live_table() -- no longer needed since IO will be reissued in terms of ->submit_bio. And rename bio variable from 'c' to 'bio'. Fixes: cf9c37865557 ("dm: fix comment in dm_process_bio()") Reported-by: Jeffle Xu Reviewed-by: Mikulas Patocka Signed-off-by: Mike Snitzer --- drivers/md/dm.c | 34 +++++++++------------------------- 1 file changed, 9 insertions(+), 25 deletions(-) (limited to 'drivers/md/dm.c') diff --git a/drivers/md/dm.c b/drivers/md/dm.c index a1adcf0ab821..80266b94b002 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1676,17 +1676,11 @@ static blk_qc_t dm_process_bio(struct mapped_device *md, } /* - * If in ->submit_bio we need to use blk_queue_split(), otherwise - * queue_limits for abnormal requests (e.g. discard, writesame, etc) - * won't be imposed. - * If called from dm_wq_work() for deferred bio processing, bio - * was already handled by following code with previous ->submit_bio. + * Use blk_queue_split() for abnormal IO (e.g. discard, writesame, etc) + * otherwise associated queue_limits won't be imposed. */ - if (current->bio_list) { - if (is_abnormal_io(bio)) - blk_queue_split(&bio); - /* regular IO is split by __split_and_process_bio */ - } + if (is_abnormal_io(bio)) + blk_queue_split(&bio); if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED) return __process_bio(md, map, bio); @@ -2383,29 +2377,19 @@ static int dm_wait_for_completion(struct mapped_device *md, long task_state) */ static void dm_wq_work(struct work_struct *work) { - struct mapped_device *md = container_of(work, struct mapped_device, - work); - struct bio *c; - int srcu_idx; - struct dm_table *map; - - map = dm_get_live_table(md, &srcu_idx); + struct mapped_device *md = container_of(work, struct mapped_device, work); + struct bio *bio; while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { spin_lock_irq(&md->deferred_lock); - c = bio_list_pop(&md->deferred); + bio = bio_list_pop(&md->deferred); spin_unlock_irq(&md->deferred_lock); - if (!c) + if (!bio) break; - if (dm_request_based(md)) - (void) submit_bio_noacct(c); - else - (void) dm_process_bio(md, map, c); + submit_bio_noacct(bio); } - - dm_put_live_table(md, srcu_idx); } static void dm_queue_flush(struct mapped_device *md) -- cgit From b2abdb1b4b9eaffc4f41aa466ce77c2d91bb23df Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Wed, 30 Sep 2020 13:45:20 -0400 Subject: dm: fold dm_process_bio() into dm_submit_bio() dm_process_bio() is only called by dm_submit_bio(), there is no benefit to keeping dm_process_bio() factored out, so fold it. While at it, cleanup dm_submit_bio()'s DMF_BLOCK_IO_FOR_SUSPEND related branching and expand scope of dm_get_live_table() rcu reference on map via common 'out' label to dm_put_live_table(). Signed-off-by: Mike Snitzer --- drivers/md/dm.c | 52 ++++++++++++++++++++++------------------------------ 1 file changed, 22 insertions(+), 30 deletions(-) (limited to 'drivers/md/dm.c') diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 80266b94b002..93ca051f88f0 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1665,28 +1665,6 @@ out: return ret; } -static blk_qc_t dm_process_bio(struct mapped_device *md, - struct dm_table *map, struct bio *bio) -{ - blk_qc_t ret = BLK_QC_T_NONE; - - if (unlikely(!map)) { - bio_io_error(bio); - return ret; - } - - /* - * Use blk_queue_split() for abnormal IO (e.g. discard, writesame, etc) - * otherwise associated queue_limits won't be imposed. - */ - if (is_abnormal_io(bio)) - blk_queue_split(&bio); - - if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED) - return __process_bio(md, map, bio); - return __split_and_process_bio(md, map, bio); -} - static blk_qc_t dm_submit_bio(struct bio *bio) { struct mapped_device *md = bio->bi_disk->private_data; @@ -1707,22 +1685,36 @@ static blk_qc_t dm_submit_bio(struct bio *bio) } map = dm_get_live_table(md, &srcu_idx); + if (unlikely(!map)) { + DMERR_LIMIT("%s: mapping table unavailable, erroring io", + dm_device_name(md)); + bio_io_error(bio); + goto out; + } - /* if we're suspended, we have to queue this io for later */ + /* If suspended, queue this IO for later */ if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { - dm_put_live_table(md, srcu_idx); - if (bio->bi_opf & REQ_NOWAIT) bio_wouldblock_error(bio); - else if (!(bio->bi_opf & REQ_RAHEAD)) - queue_io(md, bio); - else + else if (bio->bi_opf & REQ_RAHEAD) bio_io_error(bio); - return ret; + else + queue_io(md, bio); + goto out; } - ret = dm_process_bio(md, map, bio); + /* + * Use blk_queue_split() for abnormal IO (e.g. discard, writesame, etc) + * otherwise associated queue_limits won't be imposed. + */ + if (is_abnormal_io(bio)) + blk_queue_split(&bio); + if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED) + ret = __process_bio(md, map, bio); + else + ret = __split_and_process_bio(md, map, bio); +out: dm_put_live_table(md, srcu_idx); return ret; } -- cgit From 0cede372ce6a8adf4d4d28fe7edd2aa913804595 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Wed, 30 Sep 2020 15:12:04 -0400 Subject: dm: fix comment in __dm_suspend() Fix stale references to functions that have been renamed and fix typo. Signed-off-by: Mike Snitzer --- drivers/md/dm.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'drivers/md/dm.c') diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 93ca051f88f0..32ac19645255 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2518,13 +2518,12 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map, /* * Here we must make sure that no processes are submitting requests * to target drivers i.e. no one may be executing - * __split_and_process_bio. This is called from dm_request and - * dm_wq_work. + * __split_and_process_bio from dm_submit_bio. * - * To get all processes out of __split_and_process_bio in dm_request, + * To get all processes out of __split_and_process_bio in dm_submit_bio, * we take the write lock. To prevent any process from reentering - * __split_and_process_bio from dm_request and quiesce the thread - * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call + * __split_and_process_bio from dm_submit_bio and quiesce the thread + * (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND and call * flush_workqueue(md->wq). */ set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); -- cgit From 9c37de297f6590937f95a28bec1b7ac68a38618f Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Wed, 7 Oct 2020 15:15:08 -0400 Subject: dm: remove special-casing of bio-based immutable singleton target on NVMe Since commit 5a6c35f9af416 ("block: remove direct_make_request") there is no benefit to DM special-casing NVMe. Remove all code used to establish DM_TYPE_NVME_BIO_BASED. Signed-off-by: Mike Snitzer --- drivers/md/dm.c | 55 +++++-------------------------------------------------- 1 file changed, 5 insertions(+), 50 deletions(-) (limited to 'drivers/md/dm.c') diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 32ac19645255..af1bab3a810e 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -975,7 +975,7 @@ static void clone_endio(struct bio *bio) dm_endio_fn endio = tio->ti->type->end_io; struct bio *orig_bio = io->orig_bio; - if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) { + if (unlikely(error == BLK_STS_TARGET)) { if (bio_op(bio) == REQ_OP_DISCARD && !bio->bi_disk->queue->limits.max_discard_sectors) disable_discard(md); @@ -1626,45 +1626,6 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md, return ret; } -/* - * Optimized variant of __split_and_process_bio that leverages the - * fact that targets that use it do _not_ have a need to split bios. - */ -static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map, - struct bio *bio) -{ - struct clone_info ci; - blk_qc_t ret = BLK_QC_T_NONE; - int error = 0; - - init_clone_info(&ci, md, map, bio); - - if (bio->bi_opf & REQ_PREFLUSH) { - error = __send_empty_flush(&ci); - /* dec_pending submits any data associated with flush */ - } else { - struct dm_target_io *tio; - struct dm_target *ti = md->immutable_target; - - if (WARN_ON_ONCE(!ti)) { - error = -EIO; - goto out; - } - - ci.bio = bio; - ci.sector_count = bio_sectors(bio); - if (__process_abnormal_io(&ci, ti, &error)) - goto out; - - tio = alloc_tio(&ci, ti, 0, GFP_NOIO); - ret = __clone_and_map_simple_bio(&ci, tio, NULL); - } -out: - /* drop the extra reference count */ - dec_pending(ci.io, errno_to_blk_status(error)); - return ret; -} - static blk_qc_t dm_submit_bio(struct bio *bio) { struct mapped_device *md = bio->bi_disk->private_data; @@ -1710,10 +1671,7 @@ static blk_qc_t dm_submit_bio(struct bio *bio) if (is_abnormal_io(bio)) blk_queue_split(&bio); - if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED) - ret = __process_bio(md, map, bio); - else - ret = __split_and_process_bio(md, map, bio); + ret = __split_and_process_bio(md, map, bio); out: dm_put_live_table(md, srcu_idx); return ret; @@ -2038,11 +1996,10 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, if (request_based) dm_stop_queue(q); - if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) { + if (request_based) { /* - * Leverage the fact that request-based DM targets and - * NVMe bio based targets are immutable singletons - * - used to optimize both __process_bio and dm_mq_queue_rq + * Leverage the fact that request-based DM targets are + * immutable singletons - used to optimize dm_mq_queue_rq. */ md->immutable_target = dm_table_get_immutable_target(t); } @@ -2164,7 +2121,6 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) break; case DM_TYPE_BIO_BASED: case DM_TYPE_DAX_BIO_BASED: - case DM_TYPE_NVME_BIO_BASED: break; case DM_TYPE_NONE: WARN_ON_ONCE(true); @@ -2922,7 +2878,6 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu switch (type) { case DM_TYPE_BIO_BASED: case DM_TYPE_DAX_BIO_BASED: - case DM_TYPE_NVME_BIO_BASED: pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); io_front_pad = roundup(front_pad, __alignof__(struct dm_io)) + offsetof(struct dm_io, tio); -- cgit From 681cc5e8667e8579a2da8fa4090c48a2d73fc3bb Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Wed, 7 Oct 2020 16:41:01 -0400 Subject: dm: fix request-based DM to not bounce through indirect dm_submit_bio It is unnecessary to force request-based DM to call into bio-based dm_submit_bio (via indirect disk->fops->submit_bio) only to have it then call blk_mq_submit_bio(). Fix this by establishing a request-based DM block_device_operations (dm_rq_blk_dops, which doesn't have .submit_bio) and update dm_setup_md_queue() to set md->disk->fops to it for DM_TYPE_REQUEST_BASED. Remove DM_TYPE_REQUEST_BASED conditional in dm_submit_bio and unexport blk_mq_submit_bio. Fixes: c62b37d96b6eb ("block: move ->make_request_fn to struct block_device_operations") Signed-off-by: Mike Snitzer --- drivers/md/dm.c | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) (limited to 'drivers/md/dm.c') diff --git a/drivers/md/dm.c b/drivers/md/dm.c index af1bab3a810e..e396ec2b45c3 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1633,18 +1633,6 @@ static blk_qc_t dm_submit_bio(struct bio *bio) int srcu_idx; struct dm_table *map; - if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) { - /* - * We are called with a live reference on q_usage_counter, but - * that one will be released as soon as we return. Grab an - * extra one as blk_mq_submit_bio expects to be able to consume - * a reference (which lives until the request is freed in case a - * request is allocated). - */ - percpu_ref_get(&bio->bi_disk->queue->q_usage_counter); - return blk_mq_submit_bio(bio); - } - map = dm_get_live_table(md, &srcu_idx); if (unlikely(!map)) { DMERR_LIMIT("%s: mapping table unavailable, erroring io", @@ -1727,6 +1715,7 @@ static int next_free_minor(int *minor) } static const struct block_device_operations dm_blk_dops; +static const struct block_device_operations dm_rq_blk_dops; static const struct dax_operations dm_dax_ops; static void dm_wq_work(struct work_struct *work); @@ -2113,9 +2102,10 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) switch (type) { case DM_TYPE_REQUEST_BASED: + md->disk->fops = &dm_rq_blk_dops; r = dm_mq_init_request_queue(md, t); if (r) { - DMERR("Cannot initialize queue for request-based dm-mq mapped device"); + DMERR("Cannot initialize queue for request-based dm mapped device"); return r; } break; @@ -3095,6 +3085,15 @@ static const struct block_device_operations dm_blk_dops = { .owner = THIS_MODULE }; +static const struct block_device_operations dm_rq_blk_dops = { + .open = dm_blk_open, + .release = dm_blk_close, + .ioctl = dm_blk_ioctl, + .getgeo = dm_blk_getgeo, + .pr_ops = &dm_pr_ops, + .owner = THIS_MODULE +}; + static const struct dax_operations dm_dax_ops = { .direct_access = dm_dax_direct_access, .dax_supported = dm_dax_supported, -- cgit