diff options
Diffstat (limited to 'drivers/md/dm-core.h')
| -rw-r--r-- | drivers/md/dm-core.h | 260 |
1 files changed, 234 insertions, 26 deletions
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 95c6d86ab5e8..a3c9f74fe2dc 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Internal header file _only_ for device mapper core * @@ -12,12 +13,19 @@ #include <linux/kthread.h> #include <linux/ktime.h> #include <linux/blk-mq.h> +#include <linux/blk-crypto-profile.h> +#include <linux/jump_label.h> #include <trace/events/block.h> #include "dm.h" +#include "dm-ima.h" #define DM_RESERVED_MAX_IOS 1024 +#define DM_MAX_TARGETS 1048576 +#define DM_MAX_TARGET_PARAMS 1024 + +struct dm_io; struct dm_kobject_holder { struct kobject kobj; @@ -25,9 +33,19 @@ struct dm_kobject_holder { }; /* - * DM core internal structure that used directly by dm.c and dm-rq.c - * DM targets must _not_ deference a mapped_device to directly access its members! + * DM core internal structures used directly by dm.c, dm-rq.c and dm-table.c. + * DM targets must _not_ deference a mapped_device or dm_table to directly + * access their members! + */ + +/* + * For mempools pre-allocation at the table loading time. */ +struct dm_md_mempools { + struct bio_set bs; + struct bio_set io_bs; +}; + struct mapped_device { struct mutex suspend_lock; @@ -60,14 +78,32 @@ struct mapped_device { struct gendisk *disk; struct dax_device *dax_dev; + wait_queue_head_t wait; + unsigned long __percpu *pending_io; + + /* forced geometry settings */ + struct hd_geometry geometry; + + /* + * Processing queue (flush) + */ + struct workqueue_struct *wq; + /* * A list of ios that arrived while we were suspended. */ struct work_struct work; - wait_queue_head_t wait; spinlock_t deferred_lock; struct bio_list deferred; + /* + * requeue work context is needed for cloning one new bio + * to represent the dm_io to be requeued, since each + * dm_io may point to the original bio from FS. + */ + struct work_struct requeue_work; + struct dm_io *requeue_list; + void *interface_ptr; /* @@ -79,53 +115,225 @@ struct mapped_device { struct list_head uevent_list; spinlock_t uevent_lock; /* Protect access to uevent_list */ + /* for blk-mq request-based DM support */ + bool init_tio_pdu:1; + struct blk_mq_tag_set *tag_set; + + struct dm_stats stats; + /* the number of internal suspends */ - unsigned internal_suspend_count; + unsigned int internal_suspend_count; + + int swap_bios; + struct semaphore swap_bios_semaphore; + struct mutex swap_bios_lock; /* * io objects are allocated from here. */ - struct bio_set io_bs; - struct bio_set bs; + struct dm_md_mempools *mempools; - /* - * Processing queue (flush) - */ - struct workqueue_struct *wq; + /* kobject and completion */ + struct dm_kobject_holder kobj_holder; + + struct srcu_struct io_barrier; + +#ifdef CONFIG_BLK_DEV_ZONED + unsigned int nr_zones; + void *zone_revalidate_map; + struct task_struct *revalidate_map_task; +#endif + +#ifdef CONFIG_IMA + struct dm_ima_measurements ima; +#endif +}; + +/* + * Bits for the flags field of struct mapped_device. + */ +#define DMF_BLOCK_IO_FOR_SUSPEND 0 +#define DMF_SUSPENDED 1 +#define DMF_FROZEN 2 +#define DMF_FREEING 3 +#define DMF_DELETING 4 +#define DMF_NOFLUSH_SUSPENDING 5 +#define DMF_DEFERRED_REMOVE 6 +#define DMF_SUSPENDED_INTERNALLY 7 +#define DMF_POST_SUSPENDING 8 +#define DMF_EMULATE_ZONE_APPEND 9 +#define DMF_QUEUE_STOPPED 10 + +static inline sector_t dm_get_size(struct mapped_device *md) +{ + return get_capacity(md->disk); +} + +static inline struct dm_stats *dm_get_stats(struct mapped_device *md) +{ + return &md->stats; +} + +DECLARE_STATIC_KEY_FALSE(stats_enabled); +DECLARE_STATIC_KEY_FALSE(swap_bios_enabled); +DECLARE_STATIC_KEY_FALSE(zoned_enabled); + +static inline bool dm_emulate_zone_append(struct mapped_device *md) +{ + if (blk_queue_is_zoned(md->queue)) + return test_bit(DMF_EMULATE_ZONE_APPEND, &md->flags); + return false; +} + +#define DM_TABLE_MAX_DEPTH 16 + +struct dm_table { + struct mapped_device *md; + enum dm_queue_mode type; + + /* btree table */ + unsigned int depth; + unsigned int counts[DM_TABLE_MAX_DEPTH]; /* in nodes */ + sector_t *index[DM_TABLE_MAX_DEPTH]; + + unsigned int num_targets; + unsigned int num_allocated; + sector_t *highs; + struct dm_target *targets; + + struct target_type *immutable_target_type; + + bool integrity_supported:1; + bool singleton:1; + /* set if all the targets in the table have "flush_bypasses_map" set */ + bool flush_bypasses_map:1; /* - * freeze/thaw support require holding onto a super block + * Indicates the rw permissions for the new logical device. This + * should be a combination of BLK_OPEN_READ and BLK_OPEN_WRITE. */ - struct super_block *frozen_sb; + blk_mode_t mode; - /* forced geometry settings */ - struct hd_geometry geometry; + /* a list of devices used by this table */ + struct list_head devices; + struct rw_semaphore devices_lock; - /* kobject and completion */ - struct dm_kobject_holder kobj_holder; + /* events get handed up using this callback */ + void (*event_fn)(void *data); + void *event_context; - struct block_device *bdev; + struct dm_md_mempools *mempools; - struct dm_stats stats; +#ifdef CONFIG_BLK_INLINE_ENCRYPTION + struct blk_crypto_profile *crypto_profile; +#endif +}; - /* for blk-mq request-based DM support */ - struct blk_mq_tag_set *tag_set; - bool init_tio_pdu:1; +static inline struct dm_target *dm_table_get_target(struct dm_table *t, + unsigned int index) +{ + BUG_ON(index >= t->num_targets); + return t->targets + index; +} - struct srcu_struct io_barrier; +/* + * One of these is allocated per clone bio. + */ +#define DM_TIO_MAGIC 28714 +struct dm_target_io { + unsigned short magic; + blk_short_t flags; + unsigned int target_bio_nr; + struct dm_io *io; + struct dm_target *ti; + unsigned int *len_ptr; + sector_t old_sector; + struct bio clone; }; +#define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone)) +#define DM_IO_BIO_OFFSET \ + (offsetof(struct dm_target_io, clone) + offsetof(struct dm_io, tio)) + +/* + * dm_target_io flags + */ +enum { + DM_TIO_INSIDE_DM_IO, + DM_TIO_IS_DUPLICATE_BIO +}; + +static inline bool dm_tio_flagged(struct dm_target_io *tio, unsigned int bit) +{ + return (tio->flags & (1U << bit)) != 0; +} + +static inline void dm_tio_set_flag(struct dm_target_io *tio, unsigned int bit) +{ + tio->flags |= (1U << bit); +} + +static inline bool dm_tio_is_normal(struct dm_target_io *tio) +{ + return (dm_tio_flagged(tio, DM_TIO_INSIDE_DM_IO) && + !dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO)); +} + +/* + * One of these is allocated per original bio. + * It contains the first clone used for that original. + */ +#define DM_IO_MAGIC 19577 +struct dm_io { + unsigned short magic; + blk_short_t flags; + spinlock_t lock; + unsigned long start_time; + void *data; + struct dm_io *next; + struct dm_stats_aux stats_aux; + blk_status_t status; + bool requeue_flush_with_data; + atomic_t io_count; + struct mapped_device *md; + + /* The three fields represent mapped part of original bio */ + struct bio *orig_bio; + unsigned int sector_offset; /* offset to end of orig_bio */ + unsigned int sectors; + + /* last member of dm_target_io is 'struct bio' */ + struct dm_target_io tio; +}; + +/* + * dm_io flags + */ +enum { + DM_IO_ACCOUNTED, + DM_IO_WAS_SPLIT, + DM_IO_BLK_STAT +}; + +static inline bool dm_io_flagged(struct dm_io *io, unsigned int bit) +{ + return (io->flags & (1U << bit)) != 0; +} + +static inline void dm_io_set_flag(struct dm_io *io, unsigned int bit) +{ + io->flags |= (1U << bit); +} -void disable_write_same(struct mapped_device *md); -void disable_write_zeroes(struct mapped_device *md); +void dm_io_rewind(struct dm_io *io, struct bio_set *bs); static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj) { return &container_of(kobj, struct dm_kobject_holder, kobj)->completion; } -unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max); +unsigned int __dm_get_module_param(unsigned int *module_param, unsigned int def, unsigned int max); -static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen) +static inline bool dm_message_test_buffer_overflow(char *result, unsigned int maxlen) { return !maxlen || strlen(result) + 1 >= maxlen; } |
