diff options
Diffstat (limited to 'drivers/md/dm.c')
| -rw-r--r-- | drivers/md/dm.c | 2933 |
1 files changed, 1781 insertions, 1152 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index d67c95ef8d7e..6c83ab940af7 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. @@ -8,10 +9,13 @@ #include "dm-core.h" #include "dm-rq.h" #include "dm-uevent.h" +#include "dm-ima.h" +#include <linux/bio-integrity.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mutex.h> +#include <linux/sched/mm.h> #include <linux/sched/signal.h> #include <linux/blkpg.h> #include <linux/bio.h> @@ -25,6 +29,9 @@ #include <linux/wait.h> #include <linux/pr.h> #include <linux/refcount.h> +#include <linux/part_stat.h> +#include <linux/blk-crypto.h> +#include <linux/blk-crypto-profile.h> #define DM_MSG_PREFIX "core" @@ -35,10 +42,17 @@ #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" #define DM_COOKIE_LENGTH 24 +/* + * For REQ_POLLED fs bio, this flag is set if we link mapped underlying + * dm_io into one list, and reuse bio->bi_private as the list head. Before + * ending this fs bio, we will recover its ->bi_private. + */ +#define REQ_DM_POLL_LIST REQ_DRV + static const char *_name = DM_NAME; -static unsigned int major = 0; -static unsigned int _major = 0; +static unsigned int major; +static unsigned int _major; static DEFINE_IDR(_minor_idr); @@ -59,6 +73,10 @@ void dm_issue_global_event(void) wake_up(&dm_global_eventq); } +DEFINE_STATIC_KEY_FALSE(stats_enabled); +DEFINE_STATIC_KEY_FALSE(swap_bios_enabled); +DEFINE_STATIC_KEY_FALSE(zoned_enabled); + /* * One of these is allocated (on-stack) per original bio. */ @@ -67,61 +85,36 @@ struct clone_info { struct bio *bio; struct dm_io *io; sector_t sector; - unsigned sector_count; -}; - -/* - * One of these is allocated per clone bio. - */ -#define DM_TIO_MAGIC 7282014 -struct dm_target_io { - unsigned magic; - struct dm_io *io; - struct dm_target *ti; - unsigned target_bio_nr; - unsigned *len_ptr; - bool inside_dm_io; - struct bio clone; + unsigned int sector_count; + bool is_abnormal_io:1; + bool submit_as_polled:1; }; -/* - * One of these is allocated per original bio. - * It contains the first clone used for that original. - */ -#define DM_IO_MAGIC 5191977 -struct dm_io { - unsigned magic; - struct mapped_device *md; - blk_status_t status; - atomic_t io_count; - struct bio *orig_bio; - unsigned long start_time; - spinlock_t endio_lock; - struct dm_stats_aux stats_aux; - /* last member of dm_target_io is 'struct bio' */ - struct dm_target_io tio; -}; +static inline struct dm_target_io *clone_to_tio(struct bio *clone) +{ + return container_of(clone, struct dm_target_io, clone); +} void *dm_per_bio_data(struct bio *bio, size_t data_size) { - struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); - if (!tio->inside_dm_io) - return (char *)bio - offsetof(struct dm_target_io, clone) - data_size; - return (char *)bio - offsetof(struct dm_target_io, clone) - offsetof(struct dm_io, tio) - data_size; + if (!dm_tio_flagged(clone_to_tio(bio), DM_TIO_INSIDE_DM_IO)) + return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size; + return (char *)bio - DM_IO_BIO_OFFSET - data_size; } EXPORT_SYMBOL_GPL(dm_per_bio_data); struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) { struct dm_io *io = (struct dm_io *)((char *)data + data_size); + if (io->magic == DM_IO_MAGIC) - return (struct bio *)((char *)io + offsetof(struct dm_io, tio) + offsetof(struct dm_target_io, clone)); + return (struct bio *)((char *)io + DM_IO_BIO_OFFSET); BUG_ON(io->magic != DM_TIO_MAGIC); - return (struct bio *)((char *)io + offsetof(struct dm_target_io, clone)); + return (struct bio *)((char *)io + DM_TARGET_IO_BIO_OFFSET); } EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data); -unsigned dm_bio_get_target_bio_nr(const struct bio *bio) +unsigned int dm_bio_get_target_bio_nr(const struct bio *bio) { return container_of(bio, struct dm_target_io, clone)->target_bio_nr; } @@ -129,28 +122,19 @@ EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr); #define MINOR_ALLOCED ((void *)-1) -/* - * Bits for the md->flags field. - */ -#define DMF_BLOCK_IO_FOR_SUSPEND 0 -#define DMF_SUSPENDED 1 -#define DMF_FROZEN 2 -#define DMF_FREEING 3 -#define DMF_DELETING 4 -#define DMF_NOFLUSH_SUSPENDING 5 -#define DMF_DEFERRED_REMOVE 6 -#define DMF_SUSPENDED_INTERNALLY 7 - #define DM_NUMA_NODE NUMA_NO_NODE static int dm_numa_node = DM_NUMA_NODE; -/* - * For mempools pre-allocation at the table loading time. - */ -struct dm_md_mempools { - struct bio_set bs; - struct bio_set io_bs; -}; +#define DEFAULT_SWAP_BIOS (8 * 1048576 / PAGE_SIZE) +static int swap_bios = DEFAULT_SWAP_BIOS; +static int get_swap_bios(void) +{ + int latch = READ_ONCE(swap_bios); + + if (unlikely(latch <= 0)) + latch = DEFAULT_SWAP_BIOS; + return latch; +} struct table_device { struct list_head list; @@ -158,14 +142,11 @@ struct table_device { struct dm_dev dm_dev; }; -static struct kmem_cache *_rq_tio_cache; -static struct kmem_cache *_rq_cache; - /* * Bio-based DM's mempools' reserved IOs set by the user. */ #define RESERVED_BIO_BASED_IOS 16 -static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; +static unsigned int reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; static int __dm_get_module_param_int(int *module_param, int min, int max) { @@ -188,11 +169,10 @@ static int __dm_get_module_param_int(int *module_param, int min, int max) return param; } -unsigned __dm_get_module_param(unsigned *module_param, - unsigned def, unsigned max) +unsigned int __dm_get_module_param(unsigned int *module_param, unsigned int def, unsigned int max) { - unsigned param = READ_ONCE(*module_param); - unsigned modified_param = 0; + unsigned int param = READ_ONCE(*module_param); + unsigned int modified_param = 0; if (!param) modified_param = def; @@ -207,14 +187,14 @@ unsigned __dm_get_module_param(unsigned *module_param, return param; } -unsigned dm_get_reserved_bio_based_ios(void) +unsigned int dm_get_reserved_bio_based_ios(void) { return __dm_get_module_param(&reserved_bio_based_ios, RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS); } EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); -static unsigned dm_get_numa_node(void) +static unsigned int dm_get_numa_node(void) { return __dm_get_module_param_int(&dm_numa_node, DM_NUMA_NODE, num_online_nodes() - 1); @@ -222,22 +202,13 @@ static unsigned dm_get_numa_node(void) static int __init local_init(void) { - int r = -ENOMEM; - - _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); - if (!_rq_tio_cache) - return r; - - _rq_cache = kmem_cache_create("dm_old_clone_request", sizeof(struct request), - __alignof__(struct request), 0, NULL); - if (!_rq_cache) - goto out_free_rq_tio_cache; + int r; r = dm_uevent_init(); if (r) - goto out_free_rq_cache; + return r; - deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); + deferred_remove_workqueue = alloc_ordered_workqueue("kdmremove", 0); if (!deferred_remove_workqueue) { r = -ENOMEM; goto out_uevent_exit; @@ -257,21 +228,14 @@ out_free_workqueue: destroy_workqueue(deferred_remove_workqueue); out_uevent_exit: dm_uevent_exit(); -out_free_rq_cache: - kmem_cache_destroy(_rq_cache); -out_free_rq_tio_cache: - kmem_cache_destroy(_rq_tio_cache); return r; } static void local_exit(void) { - flush_scheduled_work(); destroy_workqueue(deferred_remove_workqueue); - kmem_cache_destroy(_rq_cache); - kmem_cache_destroy(_rq_tio_cache); unregister_blkdev(_major, _name); dm_uevent_exit(); @@ -305,9 +269,13 @@ static void (*_exits[])(void) = { static int __init dm_init(void) { const int count = ARRAY_SIZE(_inits); - int r, i; +#if (IS_ENABLED(CONFIG_IMA) && !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE)) + DMWARN("CONFIG_IMA_DISABLE_HTABLE is disabled." + " Duplicate IMA measurements will not be recorded in the IMA log."); +#endif + for (i = 0; i < count; i++) { r = _inits[i](); if (r) @@ -315,8 +283,7 @@ static int __init dm_init(void) } return 0; - - bad: +bad: while (i--) _exits[i](); @@ -344,13 +311,13 @@ int dm_deleting_md(struct mapped_device *md) return test_bit(DMF_DELETING, &md->flags); } -static int dm_blk_open(struct block_device *bdev, fmode_t mode) +static int dm_blk_open(struct gendisk *disk, blk_mode_t mode) { struct mapped_device *md; spin_lock(&_minor_lock); - md = bdev->bd_disk->private_data; + md = disk->private_data; if (!md) goto out; @@ -368,7 +335,7 @@ out: return md ? 0 : -ENXIO; } -static void dm_blk_close(struct gendisk *disk, fmode_t mode) +static void dm_blk_close(struct gendisk *disk) { struct mapped_device *md; @@ -436,84 +403,18 @@ static void do_deferred_remove(struct work_struct *w) dm_deferred_remove(); } -sector_t dm_get_size(struct mapped_device *md) -{ - return get_capacity(md->disk); -} - -struct request_queue *dm_get_md_queue(struct mapped_device *md) -{ - return md->queue; -} - -struct dm_stats *dm_get_stats(struct mapped_device *md) -{ - return &md->stats; -} - -static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) -{ - struct mapped_device *md = bdev->bd_disk->private_data; - - return dm_get_geometry(md, geo); -} - -static int dm_blk_report_zones(struct gendisk *disk, sector_t sector, - struct blk_zone *zones, unsigned int *nr_zones, - gfp_t gfp_mask) +static int dm_blk_getgeo(struct gendisk *disk, struct hd_geometry *geo) { -#ifdef CONFIG_BLK_DEV_ZONED struct mapped_device *md = disk->private_data; - struct dm_target *tgt; - struct dm_table *map; - int srcu_idx, ret; - - if (dm_suspended_md(md)) - return -EAGAIN; - - map = dm_get_live_table(md, &srcu_idx); - if (!map) - return -EIO; - - tgt = dm_table_find_target(map, sector); - if (!dm_target_is_valid(tgt)) { - ret = -EIO; - goto out; - } - /* - * If we are executing this, we already know that the block device - * is a zoned device and so each target should have support for that - * type of drive. A missing report_zones method means that the target - * driver has a problem. - */ - if (WARN_ON(!tgt->type->report_zones)) { - ret = -EIO; - goto out; - } - - /* - * blkdev_report_zones() will loop and call this again to cover all the - * zones of the target, eventually moving on to the next target. - * So there is no need to loop here trying to fill the entire array - * of zones. - */ - ret = tgt->type->report_zones(tgt, sector, zones, - nr_zones, gfp_mask); - -out: - dm_put_live_table(md, srcu_idx); - return ret; -#else - return -ENOTSUPP; -#endif + return dm_get_geometry(md, geo); } static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, - struct block_device **bdev) - __acquires(md->io_barrier) + struct block_device **bdev, unsigned int cmd, + unsigned long arg, bool *forward) { - struct dm_target *tgt; + struct dm_target *ti; struct dm_table *map; int r; @@ -524,20 +425,20 @@ retry: return r; /* We only support devices that have a single target */ - if (dm_table_get_num_targets(map) != 1) + if (map->num_targets != 1) return r; - tgt = dm_table_get_target(map, 0); - if (!tgt->type->prepare_ioctl) + ti = dm_table_get_target(map, 0); + if (!ti->type->prepare_ioctl) return r; if (dm_suspended_md(md)) return -EAGAIN; - r = tgt->type->prepare_ioctl(tgt, bdev); - if (r == -ENOTCONN && !fatal_signal_pending(current)) { + r = ti->type->prepare_ioctl(ti, bdev, cmd, arg, forward); + if (r == -ENOTCONN && *forward && !fatal_signal_pending(current)) { dm_put_live_table(md, *srcu_idx); - msleep(10); + fsleep(10000); goto retry; } @@ -545,19 +446,19 @@ retry: } static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) - __releases(md->io_barrier) { dm_put_live_table(md, srcu_idx); } -static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, +static int dm_blk_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { struct mapped_device *md = bdev->bd_disk->private_data; int r, srcu_idx; + bool forward = true; - r = dm_prepare_ioctl(md, &srcu_idx, &bdev); - if (r < 0) + r = dm_prepare_ioctl(md, &srcu_idx, &bdev, cmd, arg, &forward); + if (!forward || r < 0) goto out; if (r > 0) { @@ -566,7 +467,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, * subset of the parent bdev; require extra privileges. */ if (!capable(CAP_SYS_RAWIO)) { - DMWARN_LIMIT( + DMDEBUG_LIMIT( "%s: sending ioctl %x to DM device without required privilege.", current->comm, cmd); r = -ENOIOCTLCMD; @@ -574,133 +475,192 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, } } - r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); + if (!bdev->bd_disk->fops->ioctl) + r = -ENOTTY; + else + r = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg); out: dm_unprepare_ioctl(md, srcu_idx); return r; } -static void start_io_acct(struct dm_io *io); +u64 dm_start_time_ns_from_clone(struct bio *bio) +{ + return jiffies_to_nsecs(clone_to_tio(bio)->io->start_time); +} +EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone); + +static inline unsigned int dm_io_sectors(struct dm_io *io, struct bio *bio) +{ + /* + * If REQ_PREFLUSH set, don't account payload, it will be + * submitted (and accounted) after this flush completes. + */ + if (io->requeue_flush_with_data) + return 0; + if (unlikely(dm_io_flagged(io, DM_IO_WAS_SPLIT))) + return io->sectors; + return bio_sectors(bio); +} -static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) +static void dm_io_acct(struct dm_io *io, bool end) +{ + struct bio *bio = io->orig_bio; + + if (dm_io_flagged(io, DM_IO_BLK_STAT)) { + if (!end) + bdev_start_io_acct(bio->bi_bdev, bio_op(bio), + io->start_time); + else + bdev_end_io_acct(bio->bi_bdev, bio_op(bio), + dm_io_sectors(io, bio), + io->start_time); + } + + if (static_branch_unlikely(&stats_enabled) && + unlikely(dm_stats_used(&io->md->stats))) { + sector_t sector; + + if (unlikely(dm_io_flagged(io, DM_IO_WAS_SPLIT))) + sector = bio_end_sector(bio) - io->sector_offset; + else + sector = bio->bi_iter.bi_sector; + + dm_stats_account_io(&io->md->stats, bio_data_dir(bio), + sector, dm_io_sectors(io, bio), + end, io->start_time, &io->stats_aux); + } +} + +static void __dm_start_io_acct(struct dm_io *io) +{ + dm_io_acct(io, false); +} + +static void dm_start_io_acct(struct dm_io *io, struct bio *clone) +{ + /* + * Ensure IO accounting is only ever started once. + */ + if (dm_io_flagged(io, DM_IO_ACCOUNTED)) + return; + + /* Expect no possibility for race unless DM_TIO_IS_DUPLICATE_BIO. */ + if (!clone || likely(dm_tio_is_normal(clone_to_tio(clone)))) { + dm_io_set_flag(io, DM_IO_ACCOUNTED); + } else { + unsigned long flags; + /* Can afford locking given DM_TIO_IS_DUPLICATE_BIO */ + spin_lock_irqsave(&io->lock, flags); + if (dm_io_flagged(io, DM_IO_ACCOUNTED)) { + spin_unlock_irqrestore(&io->lock, flags); + return; + } + dm_io_set_flag(io, DM_IO_ACCOUNTED); + spin_unlock_irqrestore(&io->lock, flags); + } + + __dm_start_io_acct(io); +} + +static void dm_end_io_acct(struct dm_io *io) +{ + dm_io_acct(io, true); +} + +static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio, gfp_t gfp_mask) { struct dm_io *io; struct dm_target_io *tio; struct bio *clone; - clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs); - if (!clone) + clone = bio_alloc_clone(NULL, bio, gfp_mask, &md->mempools->io_bs); + if (unlikely(!clone)) return NULL; - - tio = container_of(clone, struct dm_target_io, clone); - tio->inside_dm_io = true; + tio = clone_to_tio(clone); + tio->flags = 0; + dm_tio_set_flag(tio, DM_TIO_INSIDE_DM_IO); tio->io = NULL; io = container_of(tio, struct dm_io, tio); io->magic = DM_IO_MAGIC; - io->status = 0; - atomic_set(&io->io_count, 1); + io->status = BLK_STS_OK; + io->requeue_flush_with_data = false; + + /* one ref is for submission, the other is for completion */ + atomic_set(&io->io_count, 2); + this_cpu_inc(*md->pending_io); io->orig_bio = bio; io->md = md; - spin_lock_init(&io->endio_lock); + spin_lock_init(&io->lock); + io->start_time = jiffies; + io->flags = 0; + if (blk_queue_io_stat(md->queue)) + dm_io_set_flag(io, DM_IO_BLK_STAT); - start_io_acct(io); + if (static_branch_unlikely(&stats_enabled) && + unlikely(dm_stats_used(&md->stats))) + dm_stats_record_start(&md->stats, &io->stats_aux); return io; } -static void free_io(struct mapped_device *md, struct dm_io *io) +static void free_io(struct dm_io *io) { bio_put(&io->tio.clone); } -static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti, - unsigned target_bio_nr, gfp_t gfp_mask) +static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti, + unsigned int target_bio_nr, unsigned int *len, gfp_t gfp_mask) { + struct mapped_device *md = ci->io->md; struct dm_target_io *tio; + struct bio *clone; if (!ci->io->tio.io) { /* the dm_target_io embedded in ci->io is available */ tio = &ci->io->tio; + /* alloc_io() already initialized embedded clone */ + clone = &tio->clone; } else { - struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs); + clone = bio_alloc_clone(NULL, ci->bio, gfp_mask, + &md->mempools->bs); if (!clone) return NULL; - tio = container_of(clone, struct dm_target_io, clone); - tio->inside_dm_io = false; + /* REQ_DM_POLL_LIST shouldn't be inherited */ + clone->bi_opf &= ~REQ_DM_POLL_LIST; + + tio = clone_to_tio(clone); + tio->flags = 0; /* also clears DM_TIO_INSIDE_DM_IO */ } tio->magic = DM_TIO_MAGIC; tio->io = ci->io; tio->ti = ti; tio->target_bio_nr = target_bio_nr; + tio->len_ptr = len; + tio->old_sector = 0; - return tio; -} - -static void free_tio(struct dm_target_io *tio) -{ - if (tio->inside_dm_io) - return; - bio_put(&tio->clone); -} - -static bool md_in_flight_bios(struct mapped_device *md) -{ - int cpu; - struct hd_struct *part = &dm_disk(md)->part0; - long sum = 0; + /* Set default bdev, but target must bio_set_dev() before issuing IO */ + clone->bi_bdev = md->disk->part0; + if (likely(ti != NULL) && unlikely(ti->needs_bio_set_dev)) + bio_set_dev(clone, md->disk->part0); - for_each_possible_cpu(cpu) { - sum += part_stat_local_read_cpu(part, in_flight[0], cpu); - sum += part_stat_local_read_cpu(part, in_flight[1], cpu); + if (len) { + clone->bi_iter.bi_size = to_bytes(*len); + if (bio_integrity(clone)) + bio_integrity_trim(clone); } - return sum != 0; -} - -static bool md_in_flight(struct mapped_device *md) -{ - if (queue_is_mq(md->queue)) - return blk_mq_queue_inflight(md->queue); - else - return md_in_flight_bios(md); -} - -static void start_io_acct(struct dm_io *io) -{ - struct mapped_device *md = io->md; - struct bio *bio = io->orig_bio; - - io->start_time = jiffies; - - generic_start_io_acct(md->queue, bio_op(bio), bio_sectors(bio), - &dm_disk(md)->part0); - - if (unlikely(dm_stats_used(&md->stats))) - dm_stats_account_io(&md->stats, bio_data_dir(bio), - bio->bi_iter.bi_sector, bio_sectors(bio), - false, 0, &io->stats_aux); + return clone; } -static void end_io_acct(struct dm_io *io) +static void free_tio(struct bio *clone) { - struct mapped_device *md = io->md; - struct bio *bio = io->orig_bio; - unsigned long duration = jiffies - io->start_time; - - generic_end_io_acct(md->queue, bio_op(bio), &dm_disk(md)->part0, - io->start_time); - - if (unlikely(dm_stats_used(&md->stats))) - dm_stats_account_io(&md->stats, bio_data_dir(bio), - bio->bi_iter.bi_sector, bio_sectors(bio), - true, duration, &io->stats_aux); - - /* nudge anyone waiting on suspend queue */ - if (unlikely(waitqueue_active(&md->wait))) - wake_up(&md->wait); + if (dm_tio_flagged(clone_to_tio(clone), DM_TIO_INSIDE_DM_IO)) + return; + bio_put(clone); } /* @@ -721,14 +681,16 @@ static void queue_io(struct mapped_device *md, struct bio *bio) * function to access the md->map field, and make sure they call * dm_put_live_table() when finished. */ -struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) +struct dm_table *dm_get_live_table(struct mapped_device *md, + int *srcu_idx) __acquires(md->io_barrier) { *srcu_idx = srcu_read_lock(&md->io_barrier); return srcu_dereference(md->map, &md->io_barrier); } -void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) +void dm_put_live_table(struct mapped_device *md, + int srcu_idx) __releases(md->io_barrier) { srcu_read_unlock(&md->io_barrier, srcu_idx); } @@ -759,28 +721,53 @@ static char *_dm_claim_ptr = "I belong to device-mapper"; /* * Open a table device so we can use it as a map destination. */ -static int open_table_device(struct table_device *td, dev_t dev, - struct mapped_device *md) +static struct table_device *open_table_device(struct mapped_device *md, + dev_t dev, blk_mode_t mode) { + struct table_device *td; + struct file *bdev_file; struct block_device *bdev; - + u64 part_off; int r; - BUG_ON(td->dm_dev.bdev); + td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); + if (!td) + return ERR_PTR(-ENOMEM); + refcount_set(&td->count, 1); - bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr); - if (IS_ERR(bdev)) - return PTR_ERR(bdev); + bdev_file = bdev_file_open_by_dev(dev, mode, _dm_claim_ptr, NULL); + if (IS_ERR(bdev_file)) { + r = PTR_ERR(bdev_file); + goto out_free_td; + } - r = bd_link_disk_holder(bdev, dm_disk(md)); - if (r) { - blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); - return r; + bdev = file_bdev(bdev_file); + + /* + * We can be called before the dm disk is added. In that case we can't + * register the holder relation here. It will be done once add_disk was + * called. + */ + if (md->disk->slave_dir) { + r = bd_link_disk_holder(bdev, md->disk); + if (r) + goto out_blkdev_put; } + td->dm_dev.mode = mode; td->dm_dev.bdev = bdev; - td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); - return 0; + td->dm_dev.bdev_file = bdev_file; + td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off, + NULL, NULL); + format_dev_t(td->dm_dev.name, dev); + list_add(&td->list, &md->table_devices); + return td; + +out_blkdev_put: + __fput_sync(bdev_file); +out_free_td: + kfree(td); + return ERR_PTR(r); } /* @@ -788,18 +775,23 @@ static int open_table_device(struct table_device *td, dev_t dev, */ static void close_table_device(struct table_device *td, struct mapped_device *md) { - if (!td->dm_dev.bdev) - return; + if (md->disk->slave_dir) + bd_unlink_disk_holder(td->dm_dev.bdev, md->disk); + + /* Leverage async fput() if DMF_DEFERRED_REMOVE set */ + if (unlikely(test_bit(DMF_DEFERRED_REMOVE, &md->flags))) + fput(td->dm_dev.bdev_file); + else + __fput_sync(td->dm_dev.bdev_file); - bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); - blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); put_dax(td->dm_dev.dax_dev); - td->dm_dev.bdev = NULL; - td->dm_dev.dax_dev = NULL; + list_del(&td->list); + kfree(td); } static struct table_device *find_table_device(struct list_head *l, dev_t dev, - fmode_t mode) { + blk_mode_t mode) +{ struct table_device *td; list_for_each_entry(td, l, list) @@ -809,33 +801,19 @@ static struct table_device *find_table_device(struct list_head *l, dev_t dev, return NULL; } -int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, - struct dm_dev **result) { - int r; +int dm_get_table_device(struct mapped_device *md, dev_t dev, blk_mode_t mode, + struct dm_dev **result) +{ struct table_device *td; mutex_lock(&md->table_devices_lock); td = find_table_device(&md->table_devices, dev, mode); if (!td) { - td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); - if (!td) { - mutex_unlock(&md->table_devices_lock); - return -ENOMEM; - } - - td->dm_dev.mode = mode; - td->dm_dev.bdev = NULL; - - if ((r = open_table_device(td, dev, md))) { + td = open_table_device(md, dev, mode); + if (IS_ERR(td)) { mutex_unlock(&md->table_devices_lock); - kfree(td); - return r; + return PTR_ERR(td); } - - format_dev_t(td->dm_dev.name, dev); - - refcount_set(&td->count, 1); - list_add(&td->list, &md->table_devices); } else { refcount_inc(&td->count); } @@ -844,34 +822,16 @@ int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, *result = &td->dm_dev; return 0; } -EXPORT_SYMBOL_GPL(dm_get_table_device); void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) { struct table_device *td = container_of(d, struct table_device, dm_dev); mutex_lock(&md->table_devices_lock); - if (refcount_dec_and_test(&td->count)) { + if (refcount_dec_and_test(&td->count)) close_table_device(td, md); - list_del(&td->list); - kfree(td); - } mutex_unlock(&md->table_devices_lock); } -EXPORT_SYMBOL(dm_put_table_device); - -static void free_table_devices(struct list_head *devices) -{ - struct list_head *tmp, *next; - - list_for_each_safe(tmp, next, devices) { - struct table_device *td = list_entry(tmp, struct table_device, list); - - DMWARN("dm_destroy: %s still exists with %d references", - td->dm_dev.name, refcount_read(&td->count)); - kfree(td); - } -} /* * Get the geometry associated with a dm device @@ -891,7 +851,7 @@ int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; if (geo->start > sz) { - DMWARN("Start sector is beyond the geometry limits."); + DMERR("Start sector is beyond the geometry limits."); return -EINVAL; } @@ -905,150 +865,313 @@ static int __noflush_suspending(struct mapped_device *md) return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); } +static void dm_requeue_add_io(struct dm_io *io, bool first_stage) +{ + struct mapped_device *md = io->md; + + if (first_stage) { + struct dm_io *next = md->requeue_list; + + md->requeue_list = io; + io->next = next; + } else { + bio_list_add_head(&md->deferred, io->orig_bio); + } +} + +static void dm_kick_requeue(struct mapped_device *md, bool first_stage) +{ + if (first_stage) + queue_work(md->wq, &md->requeue_work); + else + queue_work(md->wq, &md->work); +} + /* - * Decrements the number of outstanding ios that a bio has been - * cloned into, completing the original io if necc. + * Return true if the dm_io's original bio is requeued. + * io->status is updated with error if requeue disallowed. */ -static void dec_pending(struct dm_io *io, blk_status_t error) +static bool dm_handle_requeue(struct dm_io *io, bool first_stage) { - unsigned long flags; - blk_status_t io_error; - struct bio *bio; + struct bio *bio = io->orig_bio; + bool handle_requeue = (io->status == BLK_STS_DM_REQUEUE); + bool handle_polled_eagain = ((io->status == BLK_STS_AGAIN) && + (bio->bi_opf & REQ_POLLED)); struct mapped_device *md = io->md; + bool requeued = false; - /* Push-back supersedes any I/O errors */ - if (unlikely(error)) { - spin_lock_irqsave(&io->endio_lock, flags); - if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md))) - io->status = error; - spin_unlock_irqrestore(&io->endio_lock, flags); - } + if (handle_requeue || handle_polled_eagain) { + unsigned long flags; - if (atomic_dec_and_test(&io->io_count)) { - if (io->status == BLK_STS_DM_REQUEUE) { + if (bio->bi_opf & REQ_POLLED) { /* - * Target requested pushing back the I/O. + * Upper layer won't help us poll split bio + * (io->orig_bio may only reflect a subset of the + * pre-split original) so clear REQ_POLLED. */ - spin_lock_irqsave(&md->deferred_lock, flags); - if (__noflush_suspending(md)) - /* NOTE early return due to BLK_STS_DM_REQUEUE below */ - bio_list_add_head(&md->deferred, io->orig_bio); - else - /* noflush suspend was interrupted. */ - io->status = BLK_STS_IOERR; - spin_unlock_irqrestore(&md->deferred_lock, flags); + bio_clear_polled(bio); } - io_error = io->status; - bio = io->orig_bio; - end_io_acct(io); - free_io(md, io); - - if (io_error == BLK_STS_DM_REQUEUE) - return; - - if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) { + /* + * Target requested pushing back the I/O or + * polled IO hit BLK_STS_AGAIN. + */ + spin_lock_irqsave(&md->deferred_lock, flags); + if ((__noflush_suspending(md) && + !WARN_ON_ONCE(dm_is_zone_write(md, bio))) || + handle_polled_eagain || first_stage) { + dm_requeue_add_io(io, first_stage); + requeued = true; + } else { /* - * Preflush done for flush with data, reissue - * without REQ_PREFLUSH. + * noflush suspend was interrupted or this is + * a write to a zoned target. */ - bio->bi_opf &= ~REQ_PREFLUSH; - queue_io(md, bio); - } else { - /* done with normal IO or empty flush */ - if (io_error) - bio->bi_status = io_error; - bio_endio(bio); + io->status = BLK_STS_IOERR; } + spin_unlock_irqrestore(&md->deferred_lock, flags); + } + + if (requeued) + dm_kick_requeue(md, first_stage); + + return requeued; +} + +static void __dm_io_complete(struct dm_io *io, bool first_stage) +{ + struct bio *bio = io->orig_bio; + struct mapped_device *md = io->md; + blk_status_t io_error; + bool requeued; + bool requeue_flush_with_data; + + requeued = dm_handle_requeue(io, first_stage); + if (requeued && first_stage) + return; + + io_error = io->status; + if (dm_io_flagged(io, DM_IO_ACCOUNTED)) + dm_end_io_acct(io); + else if (!io_error) { + /* + * Must handle target that DM_MAPIO_SUBMITTED only to + * then bio_endio() rather than dm_submit_bio_remap() + */ + __dm_start_io_acct(io); + dm_end_io_acct(io); + } + requeue_flush_with_data = io->requeue_flush_with_data; + free_io(io); + smp_wmb(); + this_cpu_dec(*md->pending_io); + + /* nudge anyone waiting on suspend queue */ + if (unlikely(wq_has_sleeper(&md->wait))) + wake_up(&md->wait); + + /* Return early if the original bio was requeued */ + if (requeued) + return; + + if (unlikely(requeue_flush_with_data)) { + /* + * Preflush done for flush with data, reissue + * without REQ_PREFLUSH. + */ + bio->bi_opf &= ~REQ_PREFLUSH; + queue_io(md, bio); + } else { + /* done with normal IO or empty flush */ + if (io_error) + bio->bi_status = io_error; + bio_endio(bio); + } +} + +static void dm_wq_requeue_work(struct work_struct *work) +{ + struct mapped_device *md = container_of(work, struct mapped_device, + requeue_work); + unsigned long flags; + struct dm_io *io; + + /* reuse deferred lock to simplify dm_handle_requeue */ + spin_lock_irqsave(&md->deferred_lock, flags); + io = md->requeue_list; + md->requeue_list = NULL; + spin_unlock_irqrestore(&md->deferred_lock, flags); + + while (io) { + struct dm_io *next = io->next; + + dm_io_rewind(io, &md->disk->bio_split); + + io->next = NULL; + __dm_io_complete(io, false); + io = next; + cond_resched(); + } +} + +/* + * Two staged requeue: + * + * 1) io->orig_bio points to the real original bio, and the part mapped to + * this io must be requeued, instead of other parts of the original bio. + * + * 2) io->orig_bio points to new cloned bio which matches the requeued dm_io. + */ +static inline void dm_io_complete(struct dm_io *io) +{ + /* + * Only dm_io that has been split needs two stage requeue, otherwise + * we may run into long bio clone chain during suspend and OOM could + * be triggered. + * + * Also flush data dm_io won't be marked as DM_IO_WAS_SPLIT, so they + * also aren't handled via the first stage requeue. + */ + __dm_io_complete(io, dm_io_flagged(io, DM_IO_WAS_SPLIT)); +} + +/* + * Decrements the number of outstanding ios that a bio has been + * cloned into, completing the original io if necc. + */ +static inline void __dm_io_dec_pending(struct dm_io *io) +{ + if (atomic_dec_and_test(&io->io_count)) + dm_io_complete(io); +} + +static void dm_io_set_error(struct dm_io *io, blk_status_t error) +{ + unsigned long flags; + + /* Push-back supersedes any I/O errors */ + spin_lock_irqsave(&io->lock, flags); + if (!(io->status == BLK_STS_DM_REQUEUE && + __noflush_suspending(io->md))) { + io->status = error; } + spin_unlock_irqrestore(&io->lock, flags); } -void disable_write_same(struct mapped_device *md) +static void dm_io_dec_pending(struct dm_io *io, blk_status_t error) { - struct queue_limits *limits = dm_get_queue_limits(md); + if (unlikely(error)) + dm_io_set_error(io, error); - /* device doesn't really support WRITE SAME, disable it */ - limits->max_write_same_sectors = 0; + __dm_io_dec_pending(io); } -void disable_write_zeroes(struct mapped_device *md) +/* + * The queue_limits are only valid as long as you have a reference + * count on 'md'. But _not_ imposing verification to avoid atomic_read(), + */ +static inline struct queue_limits *dm_get_queue_limits(struct mapped_device *md) { - struct queue_limits *limits = dm_get_queue_limits(md); + return &md->queue->limits; +} - /* device doesn't really support WRITE ZEROES, disable it */ - limits->max_write_zeroes_sectors = 0; +static bool swap_bios_limit(struct dm_target *ti, struct bio *bio) +{ + return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios); } static void clone_endio(struct bio *bio) { blk_status_t error = bio->bi_status; - struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); + struct dm_target_io *tio = clone_to_tio(bio); + struct dm_target *ti = tio->ti; + dm_endio_fn endio = likely(ti != NULL) ? ti->type->end_io : NULL; struct dm_io *io = tio->io; - struct mapped_device *md = tio->io->md; - dm_endio_fn endio = tio->ti->type->end_io; + struct mapped_device *md = io->md; - if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) { - if (bio_op(bio) == REQ_OP_WRITE_SAME && - !bio->bi_disk->queue->limits.max_write_same_sectors) - disable_write_same(md); - if (bio_op(bio) == REQ_OP_WRITE_ZEROES && - !bio->bi_disk->queue->limits.max_write_zeroes_sectors) - disable_write_zeroes(md); + if (unlikely(error == BLK_STS_TARGET)) { + if (bio_op(bio) == REQ_OP_DISCARD && + !bdev_max_discard_sectors(bio->bi_bdev)) + blk_queue_disable_discard(md->queue); + else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && + !bdev_write_zeroes_sectors(bio->bi_bdev)) + blk_queue_disable_write_zeroes(md->queue); } + if (static_branch_unlikely(&zoned_enabled) && + unlikely(bdev_is_zoned(bio->bi_bdev))) + dm_zone_endio(io, bio); + if (endio) { - int r = endio(tio->ti, bio, &error); + int r = endio(ti, bio, &error); + switch (r) { case DM_ENDIO_REQUEUE: - error = BLK_STS_DM_REQUEUE; - /*FALLTHRU*/ + if (static_branch_unlikely(&zoned_enabled)) { + /* + * Requeuing writes to a sequential zone of a zoned + * target will break the sequential write pattern: + * fail such IO. + */ + if (WARN_ON_ONCE(dm_is_zone_write(md, bio))) + error = BLK_STS_IOERR; + else + error = BLK_STS_DM_REQUEUE; + } else + error = BLK_STS_DM_REQUEUE; + fallthrough; case DM_ENDIO_DONE: break; case DM_ENDIO_INCOMPLETE: /* The target will handle the io */ return; default: - DMWARN("unimplemented target endio return value: %d", r); + DMCRIT("unimplemented target endio return value: %d", r); BUG(); } } - free_tio(tio); - dec_pending(io, error); + if (static_branch_unlikely(&swap_bios_enabled) && + likely(ti != NULL) && unlikely(swap_bios_limit(ti, bio))) + up(&md->swap_bios_semaphore); + + free_tio(bio); + dm_io_dec_pending(io, error); } /* * Return maximum size of I/O possible at the supplied sector up to the current * target boundary. */ -static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) +static inline sector_t max_io_len_target_boundary(struct dm_target *ti, + sector_t target_offset) { - sector_t target_offset = dm_target_offset(ti, sector); - return ti->len - target_offset; } -static sector_t max_io_len(sector_t sector, struct dm_target *ti) +static sector_t __max_io_len(struct dm_target *ti, sector_t sector, + unsigned int max_granularity, + unsigned int max_sectors) { - sector_t len = max_io_len_target_boundary(sector, ti); - sector_t offset, max_len; + sector_t target_offset = dm_target_offset(ti, sector); + sector_t len = max_io_len_target_boundary(ti, target_offset); /* - * Does the target need to split even further? + * Does the target need to split IO even further? + * - varied (per target) IO splitting is a tenet of DM; this + * explains why stacked chunk_sectors based splitting via + * bio_split_to_limits() isn't possible here. */ - if (ti->max_io_len) { - offset = dm_target_offset(ti, sector); - if (unlikely(ti->max_io_len & (ti->max_io_len - 1))) - max_len = sector_div(offset, ti->max_io_len); - else - max_len = offset & (ti->max_io_len - 1); - max_len = ti->max_io_len - max_len; - - if (len > max_len) - len = max_len; - } + if (!max_granularity) + return len; + return min_t(sector_t, len, + min(max_sectors ? : queue_max_sectors(ti->table->md->queue), + blk_boundary_sectors_left(target_offset, max_granularity))); +} - return len; +static inline sector_t max_io_len(struct dm_target *ti, sector_t sector) +{ + return __max_io_len(ti, sector, ti->max_io_len, 0); } int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) @@ -1060,15 +1183,7 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) return -EINVAL; } - /* - * BIO based queue uses its own splitting. When multipage bvecs - * is switched on, size of the incoming bio may be too big to - * be handled in some targets, such as crypt. - * - * When these targets are ready for the big bio, we can remove - * the limit. - */ - ti->max_io_len = min_t(uint32_t, len, BIO_MAX_PAGES * PAGE_SIZE); + ti->max_io_len = (uint32_t) len; return 0; } @@ -1086,14 +1201,15 @@ static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, return NULL; ti = dm_table_find_target(map, sector); - if (!dm_target_is_valid(ti)) + if (!ti) return NULL; return ti; } static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, - long nr_pages, void **kaddr, pfn_t *pfn) + long nr_pages, enum dax_access_mode mode, void **kaddr, + unsigned long *pfn) { struct mapped_device *md = dax_get_private(dax_dev); sector_t sector = pgoff * PAGE_SECTORS; @@ -1107,11 +1223,11 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, goto out; if (!ti->type->direct_access) goto out; - len = max_io_len(sector, ti) / PAGE_SECTORS; + len = max_io_len(ti, sector) / PAGE_SECTORS; if (len < 1) goto out; nr_pages = min(len, nr_pages); - ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); + ret = ti->type->direct_access(ti, pgoff, nr_pages, mode, kaddr, pfn); out: dm_put_live_table(md, srcu_idx); @@ -1119,57 +1235,58 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, return ret; } -static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, - void *addr, size_t bytes, struct iov_iter *i) +static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, + size_t nr_pages) { struct mapped_device *md = dax_get_private(dax_dev); sector_t sector = pgoff * PAGE_SECTORS; struct dm_target *ti; - long ret = 0; + int ret = -EIO; int srcu_idx; ti = dm_dax_get_live_target(md, sector, &srcu_idx); if (!ti) goto out; - if (!ti->type->dax_copy_from_iter) { - ret = copy_from_iter(addr, bytes, i); + if (WARN_ON(!ti->type->dax_zero_page_range)) { + /* + * ->zero_page_range() is mandatory dax operation. If we are + * here, something is wrong. + */ goto out; } - ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i); + ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages); out: dm_put_live_table(md, srcu_idx); return ret; } -static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, +static size_t dm_dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i) { struct mapped_device *md = dax_get_private(dax_dev); sector_t sector = pgoff * PAGE_SECTORS; struct dm_target *ti; - long ret = 0; int srcu_idx; + long ret = 0; ti = dm_dax_get_live_target(md, sector, &srcu_idx); - - if (!ti) - goto out; - if (!ti->type->dax_copy_to_iter) { - ret = copy_to_iter(addr, bytes, i); + if (!ti || !ti->type->dax_recovery_write) goto out; - } - ret = ti->type->dax_copy_to_iter(ti, pgoff, addr, bytes, i); - out: - dm_put_live_table(md, srcu_idx); + ret = ti->type->dax_recovery_write(ti, pgoff, addr, bytes, i); +out: + dm_put_live_table(md, srcu_idx); return ret; } /* * A target may call dm_accept_partial_bio only from the map routine. It is - * allowed for all bio types except REQ_PREFLUSH and REQ_OP_ZONE_RESET. + * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management + * operations, zone append writes (native with REQ_OP_ZONE_APPEND or emulated + * with write BIOs flagged with BIO_EMULATES_ZONE_APPEND) and any bio serviced + * by __send_duplicate_bios(). * * dm_accept_partial_bio informs the dm that the target only wants to process * additional n_sectors sectors of the bio and the rest of the data should be @@ -1181,7 +1298,7 @@ static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, * +--------------------+---------------+-------+ * * <-------------- *tio->len_ptr ---------------> - * <------- bi_size -------> + * <----- bio_sectors -----> * <-- n_sectors --> * * Region 1 was already iterated over with bio_advance or similar function. @@ -1195,300 +1312,349 @@ static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, * the partially processed part (the sum of regions 1+2) must be the same for all * copies of the bio. */ -void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) -{ - struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); - unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; - BUG_ON(bio->bi_opf & REQ_PREFLUSH); - BUG_ON(bi_size > *tio->len_ptr); - BUG_ON(n_sectors > bi_size); - *tio->len_ptr -= bi_size - n_sectors; +void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors) +{ + struct dm_target_io *tio = clone_to_tio(bio); + struct dm_io *io = tio->io; + unsigned int bio_sectors = bio_sectors(bio); + + BUG_ON(dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO)); + BUG_ON(bio_sectors > *tio->len_ptr); + BUG_ON(n_sectors > bio_sectors); + + if (static_branch_unlikely(&zoned_enabled) && + unlikely(bdev_is_zoned(bio->bi_bdev))) { + enum req_op op = bio_op(bio); + + BUG_ON(op_is_zone_mgmt(op)); + BUG_ON(op == REQ_OP_WRITE); + BUG_ON(op == REQ_OP_WRITE_ZEROES); + BUG_ON(op == REQ_OP_ZONE_APPEND); + } + + *tio->len_ptr -= bio_sectors - n_sectors; bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; + + /* + * __split_and_process_bio() may have already saved mapped part + * for accounting but it is being reduced so update accordingly. + */ + dm_io_set_flag(io, DM_IO_WAS_SPLIT); + io->sectors = n_sectors; + io->sector_offset = bio_sectors(io->orig_bio); } EXPORT_SYMBOL_GPL(dm_accept_partial_bio); /* - * The zone descriptors obtained with a zone report indicate - * zone positions within the underlying device of the target. The zone - * descriptors must be remapped to match their position within the dm device. - * The caller target should obtain the zones information using - * blkdev_report_zones() to ensure that remapping for partition offset is - * already handled. + * @clone: clone bio that DM core passed to target's .map function + * @tgt_clone: clone of @clone bio that target needs submitted + * + * Targets should use this interface to submit bios they take + * ownership of when returning DM_MAPIO_SUBMITTED. + * + * Target should also enable ti->accounts_remapped_io */ -void dm_remap_zone_report(struct dm_target *ti, sector_t start, - struct blk_zone *zones, unsigned int *nr_zones) +void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone) { -#ifdef CONFIG_BLK_DEV_ZONED - struct blk_zone *zone; - unsigned int nrz = *nr_zones; - int i; + struct dm_target_io *tio = clone_to_tio(clone); + struct dm_io *io = tio->io; + + /* establish bio that will get submitted */ + if (!tgt_clone) + tgt_clone = clone; /* - * Remap the start sector and write pointer position of the zones in - * the array. Since we may have obtained from the target underlying - * device more zones that the target size, also adjust the number - * of zones. + * Account io->origin_bio to DM dev on behalf of target + * that took ownership of IO with DM_MAPIO_SUBMITTED. */ - for (i = 0; i < nrz; i++) { - zone = zones + i; - if (zone->start >= start + ti->len) { - memset(zone, 0, sizeof(struct blk_zone) * (nrz - i)); - break; - } + dm_start_io_acct(io, clone); - zone->start = zone->start + ti->begin - start; - if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) - continue; + trace_block_bio_remap(tgt_clone, disk_devt(io->md->disk), + tio->old_sector); + submit_bio_noacct(tgt_clone); +} +EXPORT_SYMBOL_GPL(dm_submit_bio_remap); - if (zone->cond == BLK_ZONE_COND_FULL) - zone->wp = zone->start + zone->len; - else if (zone->cond == BLK_ZONE_COND_EMPTY) - zone->wp = zone->start; - else - zone->wp = zone->wp + ti->begin - start; +static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch) +{ + mutex_lock(&md->swap_bios_lock); + while (latch < md->swap_bios) { + cond_resched(); + down(&md->swap_bios_semaphore); + md->swap_bios--; } - - *nr_zones = i; -#else /* !CONFIG_BLK_DEV_ZONED */ - *nr_zones = 0; -#endif + while (latch > md->swap_bios) { + cond_resched(); + up(&md->swap_bios_semaphore); + md->swap_bios++; + } + mutex_unlock(&md->swap_bios_lock); } -EXPORT_SYMBOL_GPL(dm_remap_zone_report); -static blk_qc_t __map_bio(struct dm_target_io *tio) +static void __map_bio(struct bio *clone) { - int r; - sector_t sector; - struct bio *clone = &tio->clone; + struct dm_target_io *tio = clone_to_tio(clone); + struct dm_target *ti = tio->ti; struct dm_io *io = tio->io; struct mapped_device *md = io->md; - struct dm_target *ti = tio->ti; - blk_qc_t ret = BLK_QC_T_NONE; + int r; clone->bi_end_io = clone_endio; /* - * Map the clone. If r == 0 we don't need to do - * anything, the target has assumed ownership of - * this io. + * Map the clone. */ - atomic_inc(&io->io_count); - sector = clone->bi_iter.bi_sector; + tio->old_sector = clone->bi_iter.bi_sector; + + if (static_branch_unlikely(&swap_bios_enabled) && + unlikely(swap_bios_limit(ti, clone))) { + int latch = get_swap_bios(); + + if (unlikely(latch != md->swap_bios)) + __set_swap_bios_limit(md, latch); + down(&md->swap_bios_semaphore); + } + + if (likely(ti->type->map == linear_map)) + r = linear_map(ti, clone); + else if (ti->type->map == stripe_map) + r = stripe_map(ti, clone); + else + r = ti->type->map(ti, clone); - r = ti->type->map(ti, clone); switch (r) { case DM_MAPIO_SUBMITTED: + /* target has assumed ownership of this io */ + if (!ti->accounts_remapped_io) + dm_start_io_acct(io, clone); break; case DM_MAPIO_REMAPPED: - /* the bio has been remapped so dispatch it */ - trace_block_bio_remap(clone->bi_disk->queue, clone, - bio_dev(io->orig_bio), sector); - if (md->type == DM_TYPE_NVME_BIO_BASED) - ret = direct_make_request(clone); - else - ret = generic_make_request(clone); + dm_submit_bio_remap(clone, NULL); break; case DM_MAPIO_KILL: - free_tio(tio); - dec_pending(io, BLK_STS_IOERR); - break; case DM_MAPIO_REQUEUE: - free_tio(tio); - dec_pending(io, BLK_STS_DM_REQUEUE); + if (static_branch_unlikely(&swap_bios_enabled) && + unlikely(swap_bios_limit(ti, clone))) + up(&md->swap_bios_semaphore); + free_tio(clone); + if (r == DM_MAPIO_KILL) + dm_io_dec_pending(io, BLK_STS_IOERR); + else + dm_io_dec_pending(io, BLK_STS_DM_REQUEUE); break; default: - DMWARN("unimplemented target map return value: %d", r); + DMCRIT("unimplemented target map return value: %d", r); BUG(); } - - return ret; -} - -static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) -{ - bio->bi_iter.bi_sector = sector; - bio->bi_iter.bi_size = to_bytes(len); } -/* - * Creates a bio that consists of range of complete bvecs. - */ -static int clone_bio(struct dm_target_io *tio, struct bio *bio, - sector_t sector, unsigned len) +static void setup_split_accounting(struct clone_info *ci, unsigned int len) { - struct bio *clone = &tio->clone; - - __bio_clone_fast(clone, bio); + struct dm_io *io = ci->io; - if (unlikely(bio_integrity(bio) != NULL)) { - int r; - - if (unlikely(!dm_target_has_integrity(tio->ti->type) && - !dm_target_passes_integrity(tio->ti->type))) { - DMWARN("%s: the target %s doesn't support integrity data.", - dm_device_name(tio->io->md), - tio->ti->type->name); - return -EIO; - } - - r = bio_integrity_clone(clone, bio, GFP_NOIO); - if (r < 0) - return r; + if (ci->sector_count > len) { + /* + * Split needed, save the mapped part for accounting. + * NOTE: dm_accept_partial_bio() will update accordingly. + */ + dm_io_set_flag(io, DM_IO_WAS_SPLIT); + io->sectors = len; + io->sector_offset = bio_sectors(ci->bio); } - - bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); - clone->bi_iter.bi_size = to_bytes(len); - - if (unlikely(bio_integrity(bio) != NULL)) - bio_integrity_trim(clone); - - return 0; } static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, - struct dm_target *ti, unsigned num_bios) + struct dm_target *ti, unsigned int num_bios, + unsigned *len) { - struct dm_target_io *tio; + struct bio *bio; int try; - if (!num_bios) - return; - - if (num_bios == 1) { - tio = alloc_tio(ci, ti, 0, GFP_NOIO); - bio_list_add(blist, &tio->clone); - return; - } - for (try = 0; try < 2; try++) { int bio_nr; - struct bio *bio; - if (try) + if (try && num_bios > 1) mutex_lock(&ci->io->md->table_devices_lock); for (bio_nr = 0; bio_nr < num_bios; bio_nr++) { - tio = alloc_tio(ci, ti, bio_nr, try ? GFP_NOIO : GFP_NOWAIT); - if (!tio) + bio = alloc_tio(ci, ti, bio_nr, len, + try ? GFP_NOIO : GFP_NOWAIT); + if (!bio) break; - bio_list_add(blist, &tio->clone); + bio_list_add(blist, bio); } - if (try) + if (try && num_bios > 1) mutex_unlock(&ci->io->md->table_devices_lock); if (bio_nr == num_bios) return; - while ((bio = bio_list_pop(blist))) { - tio = container_of(bio, struct dm_target_io, clone); - free_tio(tio); - } + while ((bio = bio_list_pop(blist))) + free_tio(bio); } } -static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci, - struct dm_target_io *tio, unsigned *len) +static unsigned int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, + unsigned int num_bios, unsigned int *len) { - struct bio *clone = &tio->clone; + struct bio_list blist = BIO_EMPTY_LIST; + struct bio *clone; + unsigned int ret = 0; - tio->len_ptr = len; + if (WARN_ON_ONCE(num_bios == 0)) /* num_bios = 0 is a bug in caller */ + return 0; - __bio_clone_fast(clone, ci->bio); + /* dm_accept_partial_bio() is not supported with shared tio->len_ptr */ if (len) - bio_setup_sector(clone, ci->sector, *len); - - return __map_bio(tio); -} + setup_split_accounting(ci, *len); -static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, - unsigned num_bios, unsigned *len) -{ - struct bio_list blist = BIO_EMPTY_LIST; - struct bio *bio; - struct dm_target_io *tio; - - alloc_multiple_bios(&blist, ci, ti, num_bios); - - while ((bio = bio_list_pop(&blist))) { - tio = container_of(bio, struct dm_target_io, clone); - (void) __clone_and_map_simple_bio(ci, tio, len); + /* + * Using alloc_multiple_bios(), even if num_bios is 1, to consistently + * support allocating using GFP_NOWAIT with GFP_NOIO fallback. + */ + alloc_multiple_bios(&blist, ci, ti, num_bios, len); + while ((clone = bio_list_pop(&blist))) { + if (num_bios > 1) + dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO); + __map_bio(clone); + ret += 1; } + + return ret; } -static int __send_empty_flush(struct clone_info *ci) +static void __send_empty_flush(struct clone_info *ci) { - unsigned target_nr = 0; - struct dm_target *ti; + struct dm_table *t = ci->map; + struct bio flush_bio; + blk_opf_t opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; + + if ((ci->io->orig_bio->bi_opf & (REQ_IDLE | REQ_SYNC)) == + (REQ_IDLE | REQ_SYNC)) + opf |= REQ_IDLE; /* - * Empty flush uses a statically initialized bio, as the base for - * cloning. However, blkg association requires that a bdev is - * associated with a gendisk, which doesn't happen until the bdev is - * opened. So, blkg association is done at issue time of the flush - * rather than when the device is created in alloc_dev(). + * Use an on-stack bio for this, it's safe since we don't + * need to reference it after submit. It's just used as + * the basis for the clone(s). */ - bio_set_dev(ci->bio, ci->io->md->bdev); + bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0, opf); - BUG_ON(bio_has_data(ci->bio)); - while ((ti = dm_table_get_target(ci->map, target_nr++))) - __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); + ci->bio = &flush_bio; + ci->sector_count = 0; + ci->io->tio.clone.bi_iter.bi_size = 0; - bio_disassociate_blkg(ci->bio); + if (!t->flush_bypasses_map) { + for (unsigned int i = 0; i < t->num_targets; i++) { + unsigned int bios; + struct dm_target *ti = dm_table_get_target(t, i); - return 0; -} - -static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, - sector_t sector, unsigned *len) -{ - struct bio *bio = ci->bio; - struct dm_target_io *tio; - int r; + if (unlikely(ti->num_flush_bios == 0)) + continue; - tio = alloc_tio(ci, ti, 0, GFP_NOIO); - tio->len_ptr = len; - r = clone_bio(tio, bio, sector, *len); - if (r < 0) { - free_tio(tio); - return r; + atomic_add(ti->num_flush_bios, &ci->io->io_count); + bios = __send_duplicate_bios(ci, ti, ti->num_flush_bios, + NULL); + atomic_sub(ti->num_flush_bios - bios, &ci->io->io_count); + } + } else { + /* + * Note that there's no need to grab t->devices_lock here + * because the targets that support flush optimization don't + * modify the list of devices. + */ + struct list_head *devices = dm_table_get_devices(t); + unsigned int len = 0; + struct dm_dev_internal *dd; + list_for_each_entry(dd, devices, list) { + struct bio *clone; + /* + * Note that the structure dm_target_io is not + * associated with any target (because the device may be + * used by multiple targets), so we set tio->ti = NULL. + * We must check for NULL in the I/O processing path, to + * avoid NULL pointer dereference. + */ + clone = alloc_tio(ci, NULL, 0, &len, GFP_NOIO); + atomic_add(1, &ci->io->io_count); + bio_set_dev(clone, dd->dm_dev->bdev); + clone->bi_end_io = clone_endio; + dm_submit_bio_remap(clone, NULL); + } } - (void) __map_bio(tio); - return 0; -} - -typedef unsigned (*get_num_bios_fn)(struct dm_target *ti); + /* + * alloc_io() takes one extra reference for submission, so the + * reference won't reach 0 without the following subtraction + */ + atomic_sub(1, &ci->io->io_count); -static unsigned get_num_discard_bios(struct dm_target *ti) -{ - return ti->num_discard_bios; + bio_uninit(ci->bio); } -static unsigned get_num_secure_erase_bios(struct dm_target *ti) +static void __send_abnormal_io(struct clone_info *ci, struct dm_target *ti, + unsigned int num_bios, unsigned int max_granularity, + unsigned int max_sectors) { - return ti->num_secure_erase_bios; -} + unsigned int len, bios; -static unsigned get_num_write_same_bios(struct dm_target *ti) -{ - return ti->num_write_same_bios; -} + len = min_t(sector_t, ci->sector_count, + __max_io_len(ti, ci->sector, max_granularity, max_sectors)); -static unsigned get_num_write_zeroes_bios(struct dm_target *ti) -{ - return ti->num_write_zeroes_bios; -} + atomic_add(num_bios, &ci->io->io_count); + bios = __send_duplicate_bios(ci, ti, num_bios, &len); + /* + * alloc_io() takes one extra reference for submission, so the + * reference won't reach 0 without the following (+1) subtraction + */ + atomic_sub(num_bios - bios + 1, &ci->io->io_count); -typedef bool (*is_split_required_fn)(struct dm_target *ti); + ci->sector += len; + ci->sector_count -= len; +} -static bool is_split_required_for_discard(struct dm_target *ti) +static bool is_abnormal_io(struct bio *bio) { - return ti->split_discard_bios; + switch (bio_op(bio)) { + case REQ_OP_READ: + case REQ_OP_WRITE: + case REQ_OP_FLUSH: + return false; + case REQ_OP_DISCARD: + case REQ_OP_SECURE_ERASE: + case REQ_OP_WRITE_ZEROES: + case REQ_OP_ZONE_RESET_ALL: + return true; + default: + return false; + } } -static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, - unsigned num_bios, bool is_split_required) +static blk_status_t __process_abnormal_io(struct clone_info *ci, + struct dm_target *ti) { - unsigned len; + unsigned int num_bios = 0; + unsigned int max_granularity = 0; + unsigned int max_sectors = 0; + struct queue_limits *limits = dm_get_queue_limits(ti->table->md); + + switch (bio_op(ci->bio)) { + case REQ_OP_DISCARD: + num_bios = ti->num_discard_bios; + max_sectors = limits->max_discard_sectors; + if (ti->max_discard_granularity) + max_granularity = max_sectors; + break; + case REQ_OP_SECURE_ERASE: + num_bios = ti->num_secure_erase_bios; + max_sectors = limits->max_secure_erase_sectors; + break; + case REQ_OP_WRITE_ZEROES: + num_bios = ti->num_write_zeroes_bios; + max_sectors = limits->max_write_zeroes_sectors; + break; + default: + break; + } /* * Even though the device advertised support for this type of @@ -1496,279 +1662,496 @@ static int __send_changing_extent_only(struct clone_info *ci, struct dm_target * * reconfiguration might also have changed that since the * check was performed. */ - if (!num_bios) - return -EOPNOTSUPP; - - if (!is_split_required) - len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); - else - len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti)); - - __send_duplicate_bios(ci, ti, num_bios, &len); - - ci->sector += len; - ci->sector_count -= len; + if (unlikely(!num_bios)) + return BLK_STS_NOTSUPP; - return 0; -} + __send_abnormal_io(ci, ti, num_bios, max_granularity, max_sectors); -static int __send_discard(struct clone_info *ci, struct dm_target *ti) -{ - return __send_changing_extent_only(ci, ti, get_num_discard_bios(ti), - is_split_required_for_discard(ti)); + return BLK_STS_OK; } -static int __send_secure_erase(struct clone_info *ci, struct dm_target *ti) +/* + * Reuse ->bi_private as dm_io list head for storing all dm_io instances + * associated with this bio, and this bio's bi_private needs to be + * stored in dm_io->data before the reuse. + * + * bio->bi_private is owned by fs or upper layer, so block layer won't + * touch it after splitting. Meantime it won't be changed by anyone after + * bio is submitted. So this reuse is safe. + */ +static inline struct dm_io **dm_poll_list_head(struct bio *bio) { - return __send_changing_extent_only(ci, ti, get_num_secure_erase_bios(ti), false); + return (struct dm_io **)&bio->bi_private; } -static int __send_write_same(struct clone_info *ci, struct dm_target *ti) +static void dm_queue_poll_io(struct bio *bio, struct dm_io *io) { - return __send_changing_extent_only(ci, ti, get_num_write_same_bios(ti), false); -} + struct dm_io **head = dm_poll_list_head(bio); -static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti) -{ - return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios(ti), false); -} + if (!(bio->bi_opf & REQ_DM_POLL_LIST)) { + bio->bi_opf |= REQ_DM_POLL_LIST; + /* + * Save .bi_private into dm_io, so that we can reuse + * .bi_private as dm_io list head for storing dm_io list + */ + io->data = bio->bi_private; -static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti, - int *result) -{ - struct bio *bio = ci->bio; + /* tell block layer to poll for completion */ + bio->bi_cookie = ~BLK_QC_T_NONE; - if (bio_op(bio) == REQ_OP_DISCARD) - *result = __send_discard(ci, ti); - else if (bio_op(bio) == REQ_OP_SECURE_ERASE) - *result = __send_secure_erase(ci, ti); - else if (bio_op(bio) == REQ_OP_WRITE_SAME) - *result = __send_write_same(ci, ti); - else if (bio_op(bio) == REQ_OP_WRITE_ZEROES) - *result = __send_write_zeroes(ci, ti); - else - return false; + io->next = NULL; + } else { + /* + * bio recursed due to split, reuse original poll list, + * and save bio->bi_private too. + */ + io->data = (*head)->data; + io->next = *head; + } - return true; + *head = io; } /* * Select the correct strategy for processing a non-flush bio. */ -static int __split_and_process_non_flush(struct clone_info *ci) +static blk_status_t __split_and_process_bio(struct clone_info *ci) { + struct bio *clone; struct dm_target *ti; - unsigned len; - int r; + unsigned int len; ti = dm_table_find_target(ci->map, ci->sector); - if (!dm_target_is_valid(ti)) - return -EIO; + if (unlikely(!ti)) + return BLK_STS_IOERR; - if (unlikely(__process_abnormal_io(ci, ti, &r))) - return r; + if (unlikely(ci->is_abnormal_io)) + return __process_abnormal_io(ci, ti); + + /* + * Only support bio polling for normal IO, and the target io is + * exactly inside the dm_io instance (verified in dm_poll_dm_io) + */ + ci->submit_as_polled = !!(ci->bio->bi_opf & REQ_POLLED); - len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count); + len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count); + if (ci->bio->bi_opf & REQ_ATOMIC && len != ci->sector_count) + return BLK_STS_IOERR; - r = __clone_and_map_data_bio(ci, ti, ci->sector, &len); - if (r < 0) - return r; + setup_split_accounting(ci, len); + + if (unlikely(ci->bio->bi_opf & REQ_NOWAIT)) { + if (unlikely(!dm_target_supports_nowait(ti->type))) + return BLK_STS_NOTSUPP; + + clone = alloc_tio(ci, ti, 0, &len, GFP_NOWAIT); + if (unlikely(!clone)) + return BLK_STS_AGAIN; + } else { + clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO); + } + __map_bio(clone); ci->sector += len; ci->sector_count -= len; - return 0; + return BLK_STS_OK; } -static void init_clone_info(struct clone_info *ci, struct mapped_device *md, - struct dm_table *map, struct bio *bio) +static void init_clone_info(struct clone_info *ci, struct dm_io *io, + struct dm_table *map, struct bio *bio, bool is_abnormal) { ci->map = map; - ci->io = alloc_io(md, bio); + ci->io = io; + ci->bio = bio; + ci->is_abnormal_io = is_abnormal; + ci->submit_as_polled = false; ci->sector = bio->bi_iter.bi_sector; + ci->sector_count = bio_sectors(bio); + + /* Shouldn't happen but sector_count was being set to 0 so... */ + if (static_branch_unlikely(&zoned_enabled) && + WARN_ON_ONCE(op_is_zone_mgmt(bio_op(bio)) && ci->sector_count)) + ci->sector_count = 0; } -/* - * Entry point to split a bio into clones and submit them to the targets. - */ -static blk_qc_t __split_and_process_bio(struct mapped_device *md, - struct dm_table *map, struct bio *bio) +#ifdef CONFIG_BLK_DEV_ZONED +static inline bool dm_zone_bio_needs_split(struct bio *bio) { - struct clone_info ci; - blk_qc_t ret = BLK_QC_T_NONE; - int error = 0; + /* + * Special case the zone operations that cannot or should not be split. + */ + switch (bio_op(bio)) { + case REQ_OP_ZONE_APPEND: + case REQ_OP_ZONE_FINISH: + case REQ_OP_ZONE_RESET: + case REQ_OP_ZONE_RESET_ALL: + return false; + default: + break; + } - if (unlikely(!map)) { - bio_io_error(bio); - return ret; + /* + * When mapped devices use the block layer zone write plugging, we must + * split any large BIO to the mapped device limits to not submit BIOs + * that span zone boundaries and to avoid potential deadlocks with + * queue freeze operations. + */ + return bio_needs_zone_write_plugging(bio) || bio_straddles_zones(bio); +} + +static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio) +{ + if (!bio_needs_zone_write_plugging(bio)) + return false; + return blk_zone_plug_bio(bio, 0); +} + +static blk_status_t __send_zone_reset_all_emulated(struct clone_info *ci, + struct dm_target *ti) +{ + struct bio_list blist = BIO_EMPTY_LIST; + struct mapped_device *md = ci->io->md; + unsigned int zone_sectors = md->disk->queue->limits.chunk_sectors; + unsigned long *need_reset; + unsigned int i, nr_zones, nr_reset; + unsigned int num_bios = 0; + blk_status_t sts = BLK_STS_OK; + sector_t sector = ti->begin; + struct bio *clone; + int ret; + + nr_zones = ti->len >> ilog2(zone_sectors); + need_reset = bitmap_zalloc(nr_zones, GFP_NOIO); + if (!need_reset) + return BLK_STS_RESOURCE; + + ret = dm_zone_get_reset_bitmap(md, ci->map, ti->begin, + nr_zones, need_reset); + if (ret) { + sts = BLK_STS_IOERR; + goto free_bitmap; } - blk_queue_split(md->queue, &bio); + /* If we have no zone to reset, we are done. */ + nr_reset = bitmap_weight(need_reset, nr_zones); + if (!nr_reset) + goto free_bitmap; - init_clone_info(&ci, md, map, bio); + atomic_add(nr_zones, &ci->io->io_count); - if (bio->bi_opf & REQ_PREFLUSH) { - struct bio flush_bio; + for (i = 0; i < nr_zones; i++) { - /* - * Use an on-stack bio for this, it's safe since we don't - * need to reference it after submit. It's just used as - * the basis for the clone(s). - */ - bio_init(&flush_bio, NULL, 0); - flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; - ci.bio = &flush_bio; - ci.sector_count = 0; - error = __send_empty_flush(&ci); - /* dec_pending submits any data associated with flush */ - } else if (bio_op(bio) == REQ_OP_ZONE_RESET) { - ci.bio = bio; - ci.sector_count = 0; - error = __split_and_process_non_flush(&ci); - } else { - ci.bio = bio; - ci.sector_count = bio_sectors(bio); - while (ci.sector_count && !error) { - error = __split_and_process_non_flush(&ci); - if (current->bio_list && ci.sector_count && !error) { - /* - * Remainder must be passed to generic_make_request() - * so that it gets handled *after* bios already submitted - * have been completely processed. - * We take a clone of the original to store in - * ci.io->orig_bio to be used by end_io_acct() and - * for dec_pending to use for completion handling. - */ - struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count, - GFP_NOIO, &md->queue->bio_split); - ci.io->orig_bio = b; - bio_chain(b, bio); - ret = generic_make_request(bio); - break; - } + if (!test_bit(i, need_reset)) { + sector += zone_sectors; + continue; + } + + if (bio_list_empty(&blist)) { + /* This may take a while, so be nice to others */ + if (num_bios) + cond_resched(); + + /* + * We may need to reset thousands of zones, so let's + * not go crazy with the clone allocation. + */ + alloc_multiple_bios(&blist, ci, ti, min(nr_reset, 32), + NULL); } + + /* Get a clone and change it to a regular reset operation. */ + clone = bio_list_pop(&blist); + clone->bi_opf &= ~REQ_OP_MASK; + clone->bi_opf |= REQ_OP_ZONE_RESET | REQ_SYNC; + clone->bi_iter.bi_sector = sector; + clone->bi_iter.bi_size = 0; + __map_bio(clone); + + sector += zone_sectors; + num_bios++; + nr_reset--; } - /* drop the extra reference count */ - dec_pending(ci.io, errno_to_blk_status(error)); - return ret; + WARN_ON_ONCE(!bio_list_empty(&blist)); + atomic_sub(nr_zones - num_bios, &ci->io->io_count); + ci->sector_count = 0; + +free_bitmap: + bitmap_free(need_reset); + + return sts; +} + +static void __send_zone_reset_all_native(struct clone_info *ci, + struct dm_target *ti) +{ + unsigned int bios; + + atomic_add(1, &ci->io->io_count); + bios = __send_duplicate_bios(ci, ti, 1, NULL); + atomic_sub(1 - bios, &ci->io->io_count); + + ci->sector_count = 0; +} + +static blk_status_t __send_zone_reset_all(struct clone_info *ci) +{ + struct dm_table *t = ci->map; + blk_status_t sts = BLK_STS_OK; + + for (unsigned int i = 0; i < t->num_targets; i++) { + struct dm_target *ti = dm_table_get_target(t, i); + + if (ti->zone_reset_all_supported) { + __send_zone_reset_all_native(ci, ti); + continue; + } + + sts = __send_zone_reset_all_emulated(ci, ti); + if (sts != BLK_STS_OK) + break; + } + + /* Release the reference that alloc_io() took for submission. */ + atomic_sub(1, &ci->io->io_count); + + return sts; +} + +#else +static inline bool dm_zone_bio_needs_split(struct bio *bio) +{ + return false; +} +static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio) +{ + return false; } +static blk_status_t __send_zone_reset_all(struct clone_info *ci) +{ + return BLK_STS_NOTSUPP; +} +#endif /* - * Optimized variant of __split_and_process_bio that leverages the - * fact that targets that use it do _not_ have a need to split bios. + * Entry point to split a bio into clones and submit them to the targets. */ -static blk_qc_t __process_bio(struct mapped_device *md, - struct dm_table *map, struct bio *bio) +static void dm_split_and_process_bio(struct mapped_device *md, + struct dm_table *map, struct bio *bio) { struct clone_info ci; - blk_qc_t ret = BLK_QC_T_NONE; - int error = 0; + struct dm_io *io; + blk_status_t error = BLK_STS_OK; + bool is_abnormal, need_split; - if (unlikely(!map)) { - bio_io_error(bio); - return ret; + is_abnormal = is_abnormal_io(bio); + if (static_branch_unlikely(&zoned_enabled)) { + need_split = is_abnormal || dm_zone_bio_needs_split(bio); + } else { + need_split = is_abnormal; } - init_clone_info(&ci, md, map, bio); + if (unlikely(need_split)) { + /* + * Use bio_split_to_limits() for abnormal IO (e.g. discard, etc) + * otherwise associated queue_limits won't be imposed. + * Also split the BIO for mapped devices needing zone append + * emulation to ensure that the BIO does not cross zone + * boundaries. + */ + bio = bio_split_to_limits(bio); + if (!bio) + return; + } - if (bio->bi_opf & REQ_PREFLUSH) { - struct bio flush_bio; + /* + * Use the block layer zone write plugging for mapped devices that + * need zone append emulation (e.g. dm-crypt). + */ + if (static_branch_unlikely(&zoned_enabled) && dm_zone_plug_bio(md, bio)) + return; + /* Only support nowait for normal IO */ + if (unlikely(bio->bi_opf & REQ_NOWAIT) && !is_abnormal) { /* - * Use an on-stack bio for this, it's safe since we don't - * need to reference it after submit. It's just used as - * the basis for the clone(s). + * Don't support NOWAIT for FLUSH because it may allocate + * multiple bios and there's no easy way how to undo the + * allocations. */ - bio_init(&flush_bio, NULL, 0); - flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; - ci.bio = &flush_bio; - ci.sector_count = 0; - error = __send_empty_flush(&ci); - /* dec_pending submits any data associated with flush */ + if (bio->bi_opf & REQ_PREFLUSH) { + bio_wouldblock_error(bio); + return; + } + io = alloc_io(md, bio, GFP_NOWAIT); + if (unlikely(!io)) { + /* Unable to do anything without dm_io. */ + bio_wouldblock_error(bio); + return; + } } else { - struct dm_target *ti = md->immutable_target; - struct dm_target_io *tio; + io = alloc_io(md, bio, GFP_NOIO); + } + init_clone_info(&ci, io, map, bio, is_abnormal); + if (unlikely((bio->bi_opf & REQ_PREFLUSH) != 0)) { /* - * Defend against IO still getting in during teardown - * - as was seen for a time with nvme-fcloop + * The "flush_bypasses_map" is set on targets where it is safe + * to skip the map function and submit bios directly to the + * underlying block devices - currently, it is set for dm-linear + * and dm-stripe. + * + * If we have just one underlying device (i.e. there is one + * linear target or multiple linear targets pointing to the same + * device), we can send the flush with data directly to it. */ - if (WARN_ON_ONCE(!ti || !dm_target_is_valid(ti))) { - error = -EIO; - goto out; + if (bio->bi_iter.bi_size && map->flush_bypasses_map) { + struct list_head *devices = dm_table_get_devices(map); + if (devices->next == devices->prev) + goto send_preflush_with_data; } + if (bio->bi_iter.bi_size) + io->requeue_flush_with_data = true; + __send_empty_flush(&ci); + /* dm_io_complete submits any data associated with flush */ + goto out; + } - ci.bio = bio; - ci.sector_count = bio_sectors(bio); - if (unlikely(__process_abnormal_io(&ci, ti, &error))) - goto out; - - tio = alloc_tio(&ci, ti, 0, GFP_NOIO); - ret = __clone_and_map_simple_bio(&ci, tio, NULL); +send_preflush_with_data: + if (static_branch_unlikely(&zoned_enabled) && + (bio_op(bio) == REQ_OP_ZONE_RESET_ALL)) { + error = __send_zone_reset_all(&ci); + goto out; } + + error = __split_and_process_bio(&ci); + if (error || !ci.sector_count) + goto out; + /* + * Remainder must be passed to submit_bio_noacct() so it gets handled + * *after* bios already submitted have been completely processed. + */ + bio_trim(bio, io->sectors, ci.sector_count); + trace_block_split(bio, bio->bi_iter.bi_sector); + bio_inc_remaining(bio); + submit_bio_noacct(bio); out: - /* drop the extra reference count */ - dec_pending(ci.io, errno_to_blk_status(error)); - return ret; + /* + * Drop the extra reference count for non-POLLED bio, and hold one + * reference for POLLED bio, which will be released in dm_poll_bio + * + * Add every dm_io instance into the dm_io list head which is stored + * in bio->bi_private, so that dm_poll_bio can poll them all. + */ + if (error || !ci.submit_as_polled) { + /* + * In case of submission failure, the extra reference for + * submitting io isn't consumed yet + */ + if (error) + atomic_dec(&io->io_count); + dm_io_dec_pending(io, error); + } else + dm_queue_poll_io(bio, io); } -static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) +static void dm_submit_bio(struct bio *bio) { - struct mapped_device *md = q->queuedata; - blk_qc_t ret = BLK_QC_T_NONE; + struct mapped_device *md = bio->bi_bdev->bd_disk->private_data; int srcu_idx; struct dm_table *map; map = dm_get_live_table(md, &srcu_idx); + if (unlikely(!map)) { + DMERR_LIMIT("%s: mapping table unavailable, erroring io", + dm_device_name(md)); + bio_io_error(bio); + goto out; + } - /* if we're suspended, we have to queue this io for later */ + /* If suspended, queue this IO for later */ if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { - dm_put_live_table(md, srcu_idx); - - if (!(bio->bi_opf & REQ_RAHEAD)) - queue_io(md, bio); - else + if (bio->bi_opf & REQ_NOWAIT) + bio_wouldblock_error(bio); + else if (bio->bi_opf & REQ_RAHEAD) bio_io_error(bio); - return ret; + else + queue_io(md, bio); + goto out; } - if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED) - ret = __process_bio(md, map, bio); - else - ret = __split_and_process_bio(md, map, bio); - + dm_split_and_process_bio(md, map, bio); +out: dm_put_live_table(md, srcu_idx); - return ret; } -static int dm_any_congested(void *congested_data, int bdi_bits) +static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob, + unsigned int flags) { - int r = bdi_bits; - struct mapped_device *md = congested_data; - struct dm_table *map; + WARN_ON_ONCE(!dm_tio_is_normal(&io->tio)); + + /* don't poll if the mapped io is done */ + if (atomic_read(&io->io_count) > 1) + bio_poll(&io->tio.clone, iob, flags); + + /* bio_poll holds the last reference */ + return atomic_read(&io->io_count) == 1; +} + +static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob, + unsigned int flags) +{ + struct dm_io **head = dm_poll_list_head(bio); + struct dm_io *list = *head; + struct dm_io *tmp = NULL; + struct dm_io *curr, *next; + + /* Only poll normal bio which was marked as REQ_DM_POLL_LIST */ + if (!(bio->bi_opf & REQ_DM_POLL_LIST)) + return 0; + + WARN_ON_ONCE(!list); - if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { - if (dm_request_based(md)) { + /* + * Restore .bi_private before possibly completing dm_io. + * + * bio_poll() is only possible once @bio has been completely + * submitted via submit_bio_noacct()'s depth-first submission. + * So there is no dm_queue_poll_io() race associated with + * clearing REQ_DM_POLL_LIST here. + */ + bio->bi_opf &= ~REQ_DM_POLL_LIST; + bio->bi_private = list->data; + + for (curr = list, next = curr->next; curr; curr = next, next = + curr ? curr->next : NULL) { + if (dm_poll_dm_io(curr, iob, flags)) { /* - * With request-based DM we only need to check the - * top-level queue for congestion. + * clone_endio() has already occurred, so no + * error handling is needed here. */ - r = md->queue->backing_dev_info->wb.state & bdi_bits; + __dm_io_dec_pending(curr); } else { - map = dm_get_live_table_fast(md); - if (map) - r = dm_table_any_congested(map, bdi_bits); - dm_put_live_table_fast(md); + curr->next = tmp; + tmp = curr; } } - return r; + /* Not done? */ + if (tmp) { + bio->bi_opf |= REQ_DM_POLL_LIST; + /* Reset bio->bi_private to dm_io list head */ + *head = tmp; + return 0; + } + return 1; } -/*----------------------------------------------------------------- +/* + *--------------------------------------------------------------- * An IDR is used to keep track of allocated minor numbers. - *---------------------------------------------------------------*/ + *--------------------------------------------------------------- + */ static void free_minor(int minor) { spin_lock(&_minor_lock); @@ -1816,26 +2199,32 @@ static int next_free_minor(int *minor) } static const struct block_device_operations dm_blk_dops; +static const struct block_device_operations dm_rq_blk_dops; static const struct dax_operations dm_dax_ops; static void dm_wq_work(struct work_struct *work); -static void dm_init_normal_md_queue(struct mapped_device *md) +#ifdef CONFIG_BLK_INLINE_ENCRYPTION +static void dm_queue_destroy_crypto_profile(struct request_queue *q) +{ + dm_destroy_crypto_profile(q->crypto_profile); +} + +#else /* CONFIG_BLK_INLINE_ENCRYPTION */ + +static inline void dm_queue_destroy_crypto_profile(struct request_queue *q) { - /* - * Initialize aspects of queue that aren't relevant for blk-mq - */ - md->queue->backing_dev_info->congested_fn = dm_any_congested; } +#endif /* !CONFIG_BLK_INLINE_ENCRYPTION */ static void cleanup_mapped_device(struct mapped_device *md) { if (md->wq) destroy_workqueue(md->wq); - bioset_exit(&md->bs); - bioset_exit(&md->io_bs); + dm_free_md_mempools(md->mempools); if (md->dax_dev) { + dax_remove_host(md->disk); kill_dax(md->dax_dev); put_dax(md->dax_dev); md->dax_dev = NULL; @@ -1845,23 +2234,38 @@ static void cleanup_mapped_device(struct mapped_device *md) spin_lock(&_minor_lock); md->disk->private_data = NULL; spin_unlock(&_minor_lock); - del_gendisk(md->disk); + if (dm_get_md_type(md) != DM_TYPE_NONE) { + struct table_device *td; + + dm_sysfs_exit(md); + list_for_each_entry(td, &md->table_devices, list) { + bd_unlink_disk_holder(td->dm_dev.bdev, + md->disk); + } + + /* + * Hold lock to make sure del_gendisk() won't concurrent + * with open/close_table_device(). + */ + mutex_lock(&md->table_devices_lock); + del_gendisk(md->disk); + mutex_unlock(&md->table_devices_lock); + } + dm_queue_destroy_crypto_profile(md->queue); put_disk(md->disk); } - if (md->queue) - blk_cleanup_queue(md->queue); + if (md->pending_io) { + free_percpu(md->pending_io); + md->pending_io = NULL; + } cleanup_srcu_struct(&md->io_barrier); - if (md->bdev) { - bdput(md->bdev); - md->bdev = NULL; - } - mutex_destroy(&md->suspend_lock); mutex_destroy(&md->type_lock); mutex_destroy(&md->table_devices_lock); + mutex_destroy(&md->swap_bios_lock); dm_mq_cleanup_mapped_device(md); } @@ -1872,13 +2276,13 @@ static void cleanup_mapped_device(struct mapped_device *md) static struct mapped_device *alloc_dev(int minor) { int r, numa_node_id = dm_get_numa_node(); - struct dax_device *dax_dev = NULL; + struct dax_device *dax_dev; struct mapped_device *md; void *old_md; md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); if (!md) { - DMWARN("unable to allocate device, out of memory."); + DMERR("unable to allocate device, out of memory."); return NULL; } @@ -1912,47 +2316,62 @@ static struct mapped_device *alloc_dev(int minor) INIT_LIST_HEAD(&md->table_devices); spin_lock_init(&md->uevent_lock); - md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id); - if (!md->queue) - goto bad; - md->queue->queuedata = md; - md->queue->backing_dev_info->congested_data = md; - - md->disk = alloc_disk_node(1, md->numa_node_id); - if (!md->disk) + /* + * default to bio-based until DM table is loaded and md->type + * established. If request-based table is loaded: blk-mq will + * override accordingly. + */ + md->disk = blk_alloc_disk(NULL, md->numa_node_id); + if (IS_ERR(md->disk)) { + md->disk = NULL; goto bad; + } + md->queue = md->disk->queue; init_waitqueue_head(&md->wait); INIT_WORK(&md->work, dm_wq_work); + INIT_WORK(&md->requeue_work, dm_wq_requeue_work); init_waitqueue_head(&md->eventq); init_completion(&md->kobj_holder.completion); + md->requeue_list = NULL; + md->swap_bios = get_swap_bios(); + sema_init(&md->swap_bios_semaphore, md->swap_bios); + mutex_init(&md->swap_bios_lock); + md->disk->major = _major; md->disk->first_minor = minor; + md->disk->minors = 1; + md->disk->flags |= GENHD_FL_NO_PART; md->disk->fops = &dm_blk_dops; - md->disk->queue = md->queue; md->disk->private_data = md; sprintf(md->disk->disk_name, "dm-%d", minor); - if (IS_ENABLED(CONFIG_DAX_DRIVER)) { - dax_dev = alloc_dax(md, md->disk->disk_name, &dm_dax_ops); - if (!dax_dev) + dax_dev = alloc_dax(md, &dm_dax_ops); + if (IS_ERR(dax_dev)) { + if (PTR_ERR(dax_dev) != -EOPNOTSUPP) + goto bad; + } else { + set_dax_nocache(dax_dev); + set_dax_nomc(dax_dev); + md->dax_dev = dax_dev; + if (dax_add_host(dax_dev, md->disk)) goto bad; } - md->dax_dev = dax_dev; - add_disk_no_queue_reg(md->disk); format_dev_t(md->name, MKDEV(_major, minor)); - md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); + md->wq = alloc_workqueue("kdmflush/%s", WQ_MEM_RECLAIM, 0, md->name); if (!md->wq) goto bad; - md->bdev = bdget_disk(md->disk, 0); - if (!md->bdev) + md->pending_io = alloc_percpu(unsigned long); + if (!md->pending_io) goto bad; - dm_stats_init(&md->stats); + r = dm_stats_init(&md->stats); + if (r < 0) + goto bad; /* Populate the mapping, nobody knows we exist yet */ spin_lock(&_minor_lock); @@ -1984,7 +2403,7 @@ static void free_dev(struct mapped_device *md) cleanup_mapped_device(md); - free_table_devices(&md->table_devices); + WARN_ON_ONCE(!list_empty(&md->table_devices)); dm_stats_cleanup(&md->stats); free_minor(minor); @@ -1992,48 +2411,6 @@ static void free_dev(struct mapped_device *md) kvfree(md); } -static int __bind_mempools(struct mapped_device *md, struct dm_table *t) -{ - struct dm_md_mempools *p = dm_table_get_md_mempools(t); - int ret = 0; - - if (dm_table_bio_based(t)) { - /* - * The md may already have mempools that need changing. - * If so, reload bioset because front_pad may have changed - * because a different table was loaded. - */ - bioset_exit(&md->bs); - bioset_exit(&md->io_bs); - - } else if (bioset_initialized(&md->bs)) { - /* - * There's no need to reload with request-based dm - * because the size of front_pad doesn't change. - * Note for future: If you are to reload bioset, - * prep-ed requests in the queue may refer - * to bio from the old bioset, so you must walk - * through the queue to unprep. - */ - goto out; - } - - BUG_ON(!p || - bioset_initialized(&md->bs) || - bioset_initialized(&md->io_bs)); - - ret = bioset_init_from_src(&md->bs, &p->bs); - if (ret) - goto out; - ret = bioset_init_from_src(&md->io_bs, &p->io_bs); - if (ret) - bioset_exit(&md->bs); -out: - /* mempool bind completed, no longer need any mempools in the table */ - dm_table_free_md_mempools(t); - return ret; -} - /* * Bind a table to the device. */ @@ -2041,7 +2418,7 @@ static void event_callback(void *context) { unsigned long flags; LIST_HEAD(uevents); - struct mapped_device *md = (struct mapped_device *) context; + struct mapped_device *md = context; spin_lock_irqsave(&md->uevent_lock, flags); list_splice_init(&md->uevent_list, &uevents); @@ -2055,77 +2432,79 @@ static void event_callback(void *context) } /* - * Protected by md->suspend_lock obtained by dm_swap_table(). - */ -static void __set_size(struct mapped_device *md, sector_t size) -{ - lockdep_assert_held(&md->suspend_lock); - - set_capacity(md->disk, size); - - i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); -} - -/* * Returns old map, which caller must destroy. */ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, struct queue_limits *limits) { struct dm_table *old_map; - struct request_queue *q = md->queue; - bool request_based = dm_table_request_based(t); - sector_t size; + sector_t size, old_size; int ret; lockdep_assert_held(&md->suspend_lock); size = dm_table_get_size(t); + old_size = dm_get_size(md); + + if (!dm_table_supports_size_change(t, old_size, size)) { + old_map = ERR_PTR(-EINVAL); + goto out; + } + + set_capacity(md->disk, size); + + ret = dm_table_set_restrictions(t, md->queue, limits); + if (ret) { + set_capacity(md->disk, old_size); + old_map = ERR_PTR(ret); + goto out; + } + /* * Wipe any geometry if the size of the table changed. */ - if (size != dm_get_size(md)) + if (size != old_size) memset(&md->geometry, 0, sizeof(md->geometry)); - __set_size(md, size); - dm_table_event_callback(t, event_callback, md); - /* - * The queue hasn't been stopped yet, if the old table type wasn't - * for request-based during suspension. So stop it to prevent - * I/O mapping before resume. - * This must be done before setting the queue restrictions, - * because request-based dm may be run just after the setting. - */ - if (request_based) - dm_stop_queue(q); - - if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) { + if (dm_table_request_based(t)) { /* - * Leverage the fact that request-based DM targets and - * NVMe bio based targets are immutable singletons - * - used to optimize both dm_request_fn and dm_mq_queue_rq; - * and __process_bio. + * Leverage the fact that request-based DM targets are + * immutable singletons - used to optimize dm_mq_queue_rq. */ md->immutable_target = dm_table_get_immutable_target(t); - } - ret = __bind_mempools(md, t); - if (ret) { - old_map = ERR_PTR(ret); - goto out; + /* + * There is no need to reload with request-based dm because the + * size of front_pad doesn't change. + * + * Note for future: If you are to reload bioset, prep-ed + * requests in the queue may refer to bio from the old bioset, + * so you must walk through the queue to unprep. + */ + if (!md->mempools) + md->mempools = t->mempools; + else + dm_free_md_mempools(t->mempools); + } else { + /* + * The md may already have mempools that need changing. + * If so, reload bioset because front_pad may have changed + * because a different table was loaded. + */ + dm_free_md_mempools(md->mempools); + md->mempools = t->mempools; } + t->mempools = NULL; old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); rcu_assign_pointer(md->map, (void *)t); md->immutable_target_type = dm_table_get_immutable_target_type(t); - dm_table_set_restrictions(t, q, limits); if (old_map) dm_sync_table(md); - out: return old_map; } @@ -2152,18 +2531,13 @@ static struct dm_table *__unbind(struct mapped_device *md) */ int dm_create(int minor, struct mapped_device **result) { - int r; struct mapped_device *md; md = alloc_dev(minor); if (!md) return -ENXIO; - r = dm_sysfs_init(md); - if (r) { - free_dev(md); - return r; - } + dm_ima_reset_data(md); *result = md; return 0; @@ -2183,12 +2557,6 @@ void dm_unlock_md_type(struct mapped_device *md) mutex_unlock(&md->type_lock); } -void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) -{ - BUG_ON(!mutex_is_locked(&md->type_lock)); - md->type = type; -} - enum dm_queue_mode dm_get_md_type(struct mapped_device *md) { return md->type; @@ -2200,42 +2568,24 @@ struct target_type *dm_get_immutable_target_type(struct mapped_device *md) } /* - * The queue_limits are only valid as long as you have a reference - * count on 'md'. - */ -struct queue_limits *dm_get_queue_limits(struct mapped_device *md) -{ - BUG_ON(!atomic_read(&md->holders)); - return &md->queue->limits; -} -EXPORT_SYMBOL_GPL(dm_get_queue_limits); - -/* * Setup the DM device's queue based on md's type */ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) { - int r; + enum dm_queue_mode type = dm_table_get_type(t); struct queue_limits limits; - enum dm_queue_mode type = dm_get_md_type(md); + struct table_device *td; + int r; - switch (type) { - case DM_TYPE_REQUEST_BASED: + WARN_ON_ONCE(type == DM_TYPE_NONE); + + if (type == DM_TYPE_REQUEST_BASED) { + md->disk->fops = &dm_rq_blk_dops; r = dm_mq_init_request_queue(md, t); if (r) { - DMERR("Cannot initialize queue for request-based dm-mq mapped device"); + DMERR("Cannot initialize queue for request-based dm mapped device"); return r; } - break; - case DM_TYPE_BIO_BASED: - case DM_TYPE_DAX_BIO_BASED: - case DM_TYPE_NVME_BIO_BASED: - dm_init_normal_md_queue(md); - blk_queue_make_request(md->queue, dm_make_request); - break; - case DM_TYPE_NONE: - WARN_ON_ONCE(true); - break; } r = dm_calculate_queue_limits(t, &limits); @@ -2243,16 +2593,50 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) DMERR("Cannot calculate initial queue limits"); return r; } - dm_table_set_restrictions(t, md->queue, &limits); - blk_register_queue(md->disk); + r = dm_table_set_restrictions(t, md->queue, &limits); + if (r) + return r; + /* + * Hold lock to make sure add_disk() and del_gendisk() won't concurrent + * with open_table_device() and close_table_device(). + */ + mutex_lock(&md->table_devices_lock); + r = add_disk(md->disk); + mutex_unlock(&md->table_devices_lock); + if (r) + return r; + + /* + * Register the holder relationship for devices added before the disk + * was live. + */ + list_for_each_entry(td, &md->table_devices, list) { + r = bd_link_disk_holder(td->dm_dev.bdev, md->disk); + if (r) + goto out_undo_holders; + } + + r = dm_sysfs_init(md); + if (r) + goto out_undo_holders; + + md->type = type; return 0; + +out_undo_holders: + list_for_each_entry_continue_reverse(td, &md->table_devices, list) + bd_unlink_disk_holder(td->dm_dev.bdev, md->disk); + mutex_lock(&md->table_devices_lock); + del_gendisk(md->disk); + mutex_unlock(&md->table_devices_lock); + return r; } struct mapped_device *dm_get_md(dev_t dev) { struct mapped_device *md; - unsigned minor = MINOR(dev); + unsigned int minor = MINOR(dev); if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) return NULL; @@ -2320,7 +2704,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait) set_bit(DMF_FREEING, &md->flags); spin_unlock(&_minor_lock); - blk_set_queue_dying(md->queue); + blk_mark_disk_dead(md->disk); /* * Take suspend_lock so that presuspend and postsuspend methods @@ -2330,9 +2714,11 @@ static void __dm_destroy(struct mapped_device *md, bool wait) map = dm_get_live_table(md, &srcu_idx); if (!dm_suspended_md(md)) { dm_table_presuspend_targets(map); + set_bit(DMF_SUSPENDED, &md->flags); + set_bit(DMF_POST_SUSPENDING, &md->flags); dm_table_postsuspend_targets(map); } - /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ + /* dm_put_live_table must be before fsleep, otherwise deadlock is possible */ dm_put_live_table(md, srcu_idx); mutex_unlock(&md->suspend_lock); @@ -2344,12 +2730,11 @@ static void __dm_destroy(struct mapped_device *md, bool wait) */ if (wait) while (atomic_read(&md->holders)) - msleep(1); + fsleep(1000); else if (atomic_read(&md->holders)) DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", dm_device_name(md), atomic_read(&md->holders)); - dm_sysfs_exit(md); dm_table_destroy(__unbind(md)); free_dev(md); } @@ -2370,19 +2755,30 @@ void dm_put(struct mapped_device *md) } EXPORT_SYMBOL_GPL(dm_put); -static int dm_wait_for_completion(struct mapped_device *md, long task_state) +static bool dm_in_flight_bios(struct mapped_device *md) +{ + int cpu; + unsigned long sum = 0; + + for_each_possible_cpu(cpu) + sum += *per_cpu_ptr(md->pending_io, cpu); + + return sum != 0; +} + +static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int task_state) { int r = 0; DEFINE_WAIT(wait); - while (1) { + while (true) { prepare_to_wait(&md->wait, &wait, task_state); - if (!md_in_flight(md)) + if (!dm_in_flight_bios(md)) break; if (signal_pending_state(task_state, current)) { - r = -EINTR; + r = -ERESTARTSYS; break; } @@ -2390,6 +2786,30 @@ static int dm_wait_for_completion(struct mapped_device *md, long task_state) } finish_wait(&md->wait, &wait); + smp_rmb(); + + return r; +} + +static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_state) +{ + int r = 0; + + if (!queue_is_mq(md->queue)) + return dm_wait_for_bios_completion(md, task_state); + + while (true) { + if (!blk_mq_queue_inflight(md->queue)) + break; + + if (signal_pending_state(task_state, current)) { + r = -ERESTARTSYS; + break; + } + + fsleep(5000); + } + return r; } @@ -2398,29 +2818,20 @@ static int dm_wait_for_completion(struct mapped_device *md, long task_state) */ static void dm_wq_work(struct work_struct *work) { - struct mapped_device *md = container_of(work, struct mapped_device, - work); - struct bio *c; - int srcu_idx; - struct dm_table *map; - - map = dm_get_live_table(md, &srcu_idx); + struct mapped_device *md = container_of(work, struct mapped_device, work); + struct bio *bio; while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { spin_lock_irq(&md->deferred_lock); - c = bio_list_pop(&md->deferred); + bio = bio_list_pop(&md->deferred); spin_unlock_irq(&md->deferred_lock); - if (!c) + if (!bio) break; - if (dm_request_based(md)) - generic_make_request(c); - else - __split_and_process_bio(md, map, c); + submit_bio_noacct(bio); + cond_resched(); } - - dm_put_live_table(md, srcu_idx); } static void dm_queue_flush(struct mapped_device *md) @@ -2482,27 +2893,19 @@ static int lock_fs(struct mapped_device *md) { int r; - WARN_ON(md->frozen_sb); - - md->frozen_sb = freeze_bdev(md->bdev); - if (IS_ERR(md->frozen_sb)) { - r = PTR_ERR(md->frozen_sb); - md->frozen_sb = NULL; - return r; - } - - set_bit(DMF_FROZEN, &md->flags); + WARN_ON(test_bit(DMF_FROZEN, &md->flags)); - return 0; + r = bdev_freeze(md->disk->part0); + if (!r) + set_bit(DMF_FROZEN, &md->flags); + return r; } static void unlock_fs(struct mapped_device *md) { if (!test_bit(DMF_FROZEN, &md->flags)) return; - - thaw_bdev(md->bdev, md->frozen_sb); - md->frozen_sb = NULL; + bdev_thaw(md->disk->part0); clear_bit(DMF_FROZEN, &md->flags); } @@ -2516,12 +2919,12 @@ static void unlock_fs(struct mapped_device *md) * are being added to md->deferred list. */ static int __dm_suspend(struct mapped_device *md, struct dm_table *map, - unsigned suspend_flags, long task_state, + unsigned int suspend_flags, unsigned int task_state, int dmf_suspended_flag) { bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; - int r; + int r = 0; lockdep_assert_held(&md->suspend_lock); @@ -2532,7 +2935,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map, if (noflush) set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); else - pr_debug("%s: suspending with flush\n", dm_device_name(md)); + DMDEBUG("%s: suspending with flush", dm_device_name(md)); /* * This gets reverted if there's an error later and the targets @@ -2557,13 +2960,12 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map, /* * Here we must make sure that no processes are submitting requests * to target drivers i.e. no one may be executing - * __split_and_process_bio. This is called from dm_request and - * dm_wq_work. + * dm_split_and_process_bio from dm_submit_bio. * - * To get all processes out of __split_and_process_bio in dm_request, + * To get all processes out of dm_split_and_process_bio in dm_submit_bio, * we take the write lock. To prevent any process from reentering - * __split_and_process_bio from dm_request and quiesce the thread - * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call + * dm_split_and_process_bio from dm_submit_bio and quiesce the thread + * (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND and call * flush_workqueue(md->wq). */ set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); @@ -2574,8 +2976,10 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map, * Stop md->queue before flushing md->wq in case request-based * dm defers requests to md->wq from md->queue. */ - if (dm_request_based(md)) + if (map && dm_request_based(md)) { dm_stop_queue(md->queue); + set_bit(DMF_QUEUE_STOPPED, &md->flags); + } flush_workqueue(md->wq); @@ -2584,7 +2988,8 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map, * We call dm_wait_for_completion to wait for all existing requests * to finish. */ - r = dm_wait_for_completion(md, task_state); + if (map) + r = dm_wait_for_completion(md, task_state); if (!r) set_bit(dmf_suspended_flag, &md->flags); @@ -2597,7 +3002,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map, if (r < 0) { dm_queue_flush(md); - if (dm_request_based(md)) + if (test_and_clear_bit(DMF_QUEUE_STOPPED, &md->flags)) dm_start_queue(md->queue); unlock_fs(md); @@ -2624,7 +3029,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map, * * To abort suspend, start the request_queue. */ -int dm_suspend(struct mapped_device *md, unsigned suspend_flags) +int dm_suspend(struct mapped_device *md, unsigned int suspend_flags) { struct dm_table *map = NULL; int r = 0; @@ -2647,12 +3052,18 @@ retry: } map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); + if (!map) { + /* avoid deadlock with fs/namespace.c:do_mount() */ + suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; + } r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); if (r) goto out_unlock; + set_bit(DMF_POST_SUSPENDING, &md->flags); dm_table_postsuspend_targets(map); + clear_bit(DMF_POST_SUSPENDING, &md->flags); out_unlock: mutex_unlock(&md->suspend_lock); @@ -2663,6 +3074,7 @@ static int __dm_resume(struct mapped_device *md, struct dm_table *map) { if (map) { int r = dm_table_resume_targets(map); + if (r) return r; } @@ -2674,7 +3086,7 @@ static int __dm_resume(struct mapped_device *md, struct dm_table *map) * so that mapping of targets can work correctly. * Request-based dm is queueing the deferred I/Os in its request_queue. */ - if (dm_request_based(md)) + if (test_and_clear_bit(DMF_QUEUE_STOPPED, &md->flags)) dm_start_queue(md->queue); unlock_fs(md); @@ -2724,7 +3136,7 @@ out: * It may be used only from the kernel. */ -static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) +static void __dm_internal_suspend(struct mapped_device *md, unsigned int suspend_flags) { struct dm_table *map = NULL; @@ -2749,11 +3161,16 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, DMF_SUSPENDED_INTERNALLY); + set_bit(DMF_POST_SUSPENDING, &md->flags); dm_table_postsuspend_targets(map); + clear_bit(DMF_POST_SUSPENDING, &md->flags); } static void __dm_internal_resume(struct mapped_device *md) { + int r; + struct dm_table *map; + BUG_ON(!md->internal_suspend_count); if (--md->internal_suspend_count) @@ -2762,12 +3179,23 @@ static void __dm_internal_resume(struct mapped_device *md) if (dm_suspended_md(md)) goto done; /* resume from nested suspend */ - /* - * NOTE: existing callers don't need to call dm_table_resume_targets - * (which may fail -- so best to avoid it for now by passing NULL map) - */ - (void) __dm_resume(md, NULL); - + map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); + r = __dm_resume(md, map); + if (r) { + /* + * If a preresume method of some target failed, we are in a + * tricky situation. We can't return an error to the caller. We + * can't fake success because then the "resume" and + * "postsuspend" methods would not be paired correctly, and it + * would break various targets, for example it would cause list + * corruption in the "origin" target. + * + * So, we fake normal suspend here, to make sure that the + * "resume" and "postsuspend" methods will be paired correctly. + */ + DMERR("Preresume method failed: %d", r); + set_bit(DMF_SUSPENDED, &md->flags); + } done: clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); smp_mb__after_atomic(); @@ -2820,23 +3248,35 @@ done: } EXPORT_SYMBOL_GPL(dm_internal_resume_fast); -/*----------------------------------------------------------------- +/* + *--------------------------------------------------------------- * Event notification. - *---------------------------------------------------------------*/ + *--------------------------------------------------------------- + */ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, - unsigned cookie) + unsigned int cookie, bool need_resize_uevent) { + int r; + unsigned int noio_flag; char udev_cookie[DM_COOKIE_LENGTH]; - char *envp[] = { udev_cookie, NULL }; - - if (!cookie) - return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); - else { + char *envp[3] = { NULL, NULL, NULL }; + char **envpp = envp; + if (cookie) { snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", DM_COOKIE_ENV_VAR_NAME, cookie); - return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, - action, envp); + *envpp++ = udev_cookie; + } + if (need_resize_uevent) { + *envpp++ = "RESIZE=1"; } + + noio_flag = memalloc_noio_save(); + + r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp); + + memalloc_noio_restore(noio_flag); + + return r; } uint32_t dm_next_uevent_seq(struct mapped_device *md) @@ -2902,6 +3342,11 @@ int dm_suspended_md(struct mapped_device *md) return test_bit(DMF_SUSPENDED, &md->flags); } +static int dm_post_suspending_md(struct mapped_device *md) +{ + return test_bit(DMF_POST_SUSPENDING, &md->flags); +} + int dm_suspended_internally_md(struct mapped_device *md) { return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); @@ -2914,85 +3359,100 @@ int dm_test_deferred_remove_flag(struct mapped_device *md) int dm_suspended(struct dm_target *ti) { - return dm_suspended_md(dm_table_get_md(ti->table)); + return dm_suspended_md(ti->table->md); } EXPORT_SYMBOL_GPL(dm_suspended); +int dm_post_suspending(struct dm_target *ti) +{ + return dm_post_suspending_md(ti->table->md); +} +EXPORT_SYMBOL_GPL(dm_post_suspending); + int dm_noflush_suspending(struct dm_target *ti) { - return __noflush_suspending(dm_table_get_md(ti->table)); + return __noflush_suspending(ti->table->md); } EXPORT_SYMBOL_GPL(dm_noflush_suspending); -struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, - unsigned integrity, unsigned per_io_data_size, - unsigned min_pool_size) +void dm_free_md_mempools(struct dm_md_mempools *pools) { - struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); - unsigned int pool_size = 0; - unsigned int front_pad, io_front_pad; - int ret; - if (!pools) - return NULL; + return; - switch (type) { - case DM_TYPE_BIO_BASED: - case DM_TYPE_DAX_BIO_BASED: - case DM_TYPE_NVME_BIO_BASED: - pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); - front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); - io_front_pad = roundup(front_pad, __alignof__(struct dm_io)) + offsetof(struct dm_io, tio); - ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0); - if (ret) - goto out; - if (integrity && bioset_integrity_create(&pools->io_bs, pool_size)) - goto out; - break; - case DM_TYPE_REQUEST_BASED: - pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size); - front_pad = offsetof(struct dm_rq_clone_bio_info, clone); - /* per_io_data_size is used for blk-mq pdu at queue allocation */ - break; - default: - BUG(); - } + bioset_exit(&pools->bs); + bioset_exit(&pools->io_bs); - ret = bioset_init(&pools->bs, pool_size, front_pad, 0); - if (ret) - goto out; + kfree(pools); +} - if (integrity && bioset_integrity_create(&pools->bs, pool_size)) - goto out; +struct dm_blkdev_id { + u8 *id; + enum blk_unique_id type; +}; - return pools; +static int __dm_get_unique_id(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) +{ + struct dm_blkdev_id *dm_id = data; + const struct block_device_operations *fops = dev->bdev->bd_disk->fops; -out: - dm_free_md_mempools(pools); + if (!fops->get_unique_id) + return 0; - return NULL; + return fops->get_unique_id(dev->bdev->bd_disk, dm_id->id, dm_id->type); } -void dm_free_md_mempools(struct dm_md_mempools *pools) +/* + * Allow access to get_unique_id() for the first device returning a + * non-zero result. Reasonable use expects all devices to have the + * same unique id. + */ +static int dm_blk_get_unique_id(struct gendisk *disk, u8 *id, + enum blk_unique_id type) { - if (!pools) - return; + struct mapped_device *md = disk->private_data; + struct dm_table *table; + struct dm_target *ti; + int ret = 0, srcu_idx; - bioset_exit(&pools->bs); - bioset_exit(&pools->io_bs); + struct dm_blkdev_id dm_id = { + .id = id, + .type = type, + }; - kfree(pools); + table = dm_get_live_table(md, &srcu_idx); + if (!table || !dm_table_get_size(table)) + goto out; + + /* We only support devices that have a single target */ + if (table->num_targets != 1) + goto out; + ti = dm_table_get_target(table, 0); + + if (!ti->type->iterate_devices) + goto out; + + ret = ti->type->iterate_devices(ti, __dm_get_unique_id, &dm_id); +out: + dm_put_live_table(md, srcu_idx); + return ret; } struct dm_pr { u64 old_key; u64 new_key; u32 flags; + bool abort; bool fail_early; + int ret; + enum pr_type type; + struct pr_keys *read_keys; + struct pr_held_reservation *rsv; }; static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, - void *data) + struct dm_pr *pr) { struct mapped_device *md = bdev->bd_disk->private_data; struct dm_table *table; @@ -3004,15 +3464,21 @@ static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, goto out; /* We only support devices that have a single target */ - if (dm_table_get_num_targets(table) != 1) + if (table->num_targets != 1) goto out; ti = dm_table_get_target(table, 0); + if (dm_suspended_md(md)) { + ret = -EAGAIN; + goto out; + } + ret = -EINVAL; if (!ti->type->iterate_devices) goto out; - ret = ti->type->iterate_devices(ti, fn, data); + ti->type->iterate_devices(ti, fn, pr); + ret = 0; out: dm_put_live_table(md, srcu_idx); return ret; @@ -3026,10 +3492,24 @@ static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev, { struct dm_pr *pr = data; const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; + int ret; + + if (!ops || !ops->pr_register) { + pr->ret = -EOPNOTSUPP; + return -1; + } + + ret = ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); + if (!ret) + return 0; - if (!ops || !ops->pr_register) - return -EOPNOTSUPP; - return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); + if (!pr->ret) + pr->ret = ret; + + if (pr->fail_early) + return -1; + + return 0; } static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, @@ -3040,82 +3520,145 @@ static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, .new_key = new_key, .flags = flags, .fail_early = true, + .ret = 0, }; int ret; ret = dm_call_pr(bdev, __dm_pr_register, &pr); - if (ret && new_key) { - /* unregister all paths if we failed to register any path */ - pr.old_key = new_key; - pr.new_key = 0; - pr.flags = 0; - pr.fail_early = false; - dm_call_pr(bdev, __dm_pr_register, &pr); + if (ret) { + /* Didn't even get to register a path */ + return ret; } + if (!pr.ret) + return 0; + ret = pr.ret; + + if (!new_key) + return ret; + + /* unregister all paths if we failed to register any path */ + pr.old_key = new_key; + pr.new_key = 0; + pr.flags = 0; + pr.fail_early = false; + (void) dm_call_pr(bdev, __dm_pr_register, &pr); return ret; } + +static int __dm_pr_reserve(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) +{ + struct dm_pr *pr = data; + const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; + + if (!ops || !ops->pr_reserve) { + pr->ret = -EOPNOTSUPP; + return -1; + } + + pr->ret = ops->pr_reserve(dev->bdev, pr->old_key, pr->type, pr->flags); + if (!pr->ret) + return -1; + + return 0; +} + static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, u32 flags) { - struct mapped_device *md = bdev->bd_disk->private_data; - const struct pr_ops *ops; - int r, srcu_idx; + struct dm_pr pr = { + .old_key = key, + .flags = flags, + .type = type, + .fail_early = false, + .ret = 0, + }; + int ret; - r = dm_prepare_ioctl(md, &srcu_idx, &bdev); - if (r < 0) - goto out; + ret = dm_call_pr(bdev, __dm_pr_reserve, &pr); + if (ret) + return ret; - ops = bdev->bd_disk->fops->pr_ops; - if (ops && ops->pr_reserve) - r = ops->pr_reserve(bdev, key, type, flags); - else - r = -EOPNOTSUPP; -out: - dm_unprepare_ioctl(md, srcu_idx); - return r; + return pr.ret; +} + +/* + * If there is a non-All Registrants type of reservation, the release must be + * sent down the holding path. For the cases where there is no reservation or + * the path is not the holder the device will also return success, so we must + * try each path to make sure we got the correct path. + */ +static int __dm_pr_release(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) +{ + struct dm_pr *pr = data; + const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; + + if (!ops || !ops->pr_release) { + pr->ret = -EOPNOTSUPP; + return -1; + } + + pr->ret = ops->pr_release(dev->bdev, pr->old_key, pr->type); + if (pr->ret) + return -1; + + return 0; } static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) { - struct mapped_device *md = bdev->bd_disk->private_data; - const struct pr_ops *ops; - int r, srcu_idx; + struct dm_pr pr = { + .old_key = key, + .type = type, + .fail_early = false, + }; + int ret; - r = dm_prepare_ioctl(md, &srcu_idx, &bdev); - if (r < 0) - goto out; + ret = dm_call_pr(bdev, __dm_pr_release, &pr); + if (ret) + return ret; - ops = bdev->bd_disk->fops->pr_ops; - if (ops && ops->pr_release) - r = ops->pr_release(bdev, key, type); - else - r = -EOPNOTSUPP; -out: - dm_unprepare_ioctl(md, srcu_idx); - return r; + return pr.ret; +} + +static int __dm_pr_preempt(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) +{ + struct dm_pr *pr = data; + const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; + + if (!ops || !ops->pr_preempt) { + pr->ret = -EOPNOTSUPP; + return -1; + } + + pr->ret = ops->pr_preempt(dev->bdev, pr->old_key, pr->new_key, pr->type, + pr->abort); + if (!pr->ret) + return -1; + + return 0; } static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, enum pr_type type, bool abort) { - struct mapped_device *md = bdev->bd_disk->private_data; - const struct pr_ops *ops; - int r, srcu_idx; + struct dm_pr pr = { + .new_key = new_key, + .old_key = old_key, + .type = type, + .fail_early = false, + }; + int ret; - r = dm_prepare_ioctl(md, &srcu_idx, &bdev); - if (r < 0) - goto out; + ret = dm_call_pr(bdev, __dm_pr_preempt, &pr); + if (ret) + return ret; - ops = bdev->bd_disk->fops->pr_ops; - if (ops && ops->pr_preempt) - r = ops->pr_preempt(bdev, old_key, new_key, type, abort); - else - r = -EOPNOTSUPP; -out: - dm_unprepare_ioctl(md, srcu_idx); - return r; + return pr.ret; } static int dm_pr_clear(struct block_device *bdev, u64 key) @@ -3123,10 +3666,13 @@ static int dm_pr_clear(struct block_device *bdev, u64 key) struct mapped_device *md = bdev->bd_disk->private_data; const struct pr_ops *ops; int r, srcu_idx; + bool forward = true; - r = dm_prepare_ioctl(md, &srcu_idx, &bdev); + /* Not a real ioctl, but targets must not interpret non-DM ioctls */ + r = dm_prepare_ioctl(md, &srcu_idx, &bdev, 0, 0, &forward); if (r < 0) goto out; + WARN_ON_ONCE(!forward); ops = bdev->bd_disk->fops->pr_ops; if (ops && ops->pr_clear) @@ -3138,28 +3684,108 @@ out: return r; } +static int __dm_pr_read_keys(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) +{ + struct dm_pr *pr = data; + const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; + + if (!ops || !ops->pr_read_keys) { + pr->ret = -EOPNOTSUPP; + return -1; + } + + pr->ret = ops->pr_read_keys(dev->bdev, pr->read_keys); + if (!pr->ret) + return -1; + + return 0; +} + +static int dm_pr_read_keys(struct block_device *bdev, struct pr_keys *keys) +{ + struct dm_pr pr = { + .read_keys = keys, + }; + int ret; + + ret = dm_call_pr(bdev, __dm_pr_read_keys, &pr); + if (ret) + return ret; + + return pr.ret; +} + +static int __dm_pr_read_reservation(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) +{ + struct dm_pr *pr = data; + const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; + + if (!ops || !ops->pr_read_reservation) { + pr->ret = -EOPNOTSUPP; + return -1; + } + + pr->ret = ops->pr_read_reservation(dev->bdev, pr->rsv); + if (!pr->ret) + return -1; + + return 0; +} + +static int dm_pr_read_reservation(struct block_device *bdev, + struct pr_held_reservation *rsv) +{ + struct dm_pr pr = { + .rsv = rsv, + }; + int ret; + + ret = dm_call_pr(bdev, __dm_pr_read_reservation, &pr); + if (ret) + return ret; + + return pr.ret; +} + static const struct pr_ops dm_pr_ops = { .pr_register = dm_pr_register, .pr_reserve = dm_pr_reserve, .pr_release = dm_pr_release, .pr_preempt = dm_pr_preempt, .pr_clear = dm_pr_clear, + .pr_read_keys = dm_pr_read_keys, + .pr_read_reservation = dm_pr_read_reservation, }; static const struct block_device_operations dm_blk_dops = { + .submit_bio = dm_submit_bio, + .poll_bio = dm_poll_bio, .open = dm_blk_open, .release = dm_blk_close, .ioctl = dm_blk_ioctl, .getgeo = dm_blk_getgeo, .report_zones = dm_blk_report_zones, + .get_unique_id = dm_blk_get_unique_id, + .pr_ops = &dm_pr_ops, + .owner = THIS_MODULE +}; + +static const struct block_device_operations dm_rq_blk_dops = { + .open = dm_blk_open, + .release = dm_blk_close, + .ioctl = dm_blk_ioctl, + .getgeo = dm_blk_getgeo, + .get_unique_id = dm_blk_get_unique_id, .pr_ops = &dm_pr_ops, .owner = THIS_MODULE }; static const struct dax_operations dm_dax_ops = { .direct_access = dm_dax_direct_access, - .copy_from_iter = dm_dax_copy_from_iter, - .copy_to_iter = dm_dax_copy_to_iter, + .zero_page_range = dm_dax_zero_page_range, + .recovery_write = dm_dax_recovery_write, }; /* @@ -3171,12 +3797,15 @@ module_exit(dm_exit); module_param(major, uint, 0); MODULE_PARM_DESC(major, "The major number of the device mapper"); -module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); +module_param(reserved_bio_based_ios, uint, 0644); MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); -module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); +module_param(dm_numa_node, int, 0644); MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); +module_param(swap_bios, int, 0644); +MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs"); + MODULE_DESCRIPTION(DM_NAME " driver"); -MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); +MODULE_AUTHOR("Joe Thornber <dm-devel@lists.linux.dev>"); MODULE_LICENSE("GPL"); |
