diff options
Diffstat (limited to 'drivers/md')
33 files changed, 351 insertions, 190 deletions
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index f45fb372e51b..b5ea378e66cb 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -610,6 +610,7 @@ config DM_INTEGRITY select CRYPTO select CRYPTO_SKCIPHER select ASYNC_XOR + select DM_AUDIT if AUDIT help This device-mapper target emulates a block device that has additional per-sector tags that can be used for storing @@ -642,4 +643,13 @@ config DM_ZONED If unsure, say N. +config DM_AUDIT + bool "DM audit events" + depends on AUDIT + help + Generate audit events for device-mapper. + + Enables audit logging of several security relevant events in the + particular device-mapper targets, especially the integrity target. + endif # MD diff --git a/drivers/md/Makefile b/drivers/md/Makefile index 816945eeed7f..0454b0885b01 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile @@ -107,3 +107,7 @@ endif ifeq ($(CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG),y) dm-verity-objs += dm-verity-verify-sig.o endif + +ifeq ($(CONFIG_DM_AUDIT),y) +dm-mod-objs += dm-audit.o +endif diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index a7bb3355b776..86b9e355c583 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -1002,7 +1002,7 @@ static void calc_cached_dev_sectors(struct cache_set *c) struct cached_dev *dc; list_for_each_entry(dc, &c->cached_devs, list) - sectors += bdev_sectors(dc->bdev); + sectors += bdev_nr_sectors(dc->bdev); c->cached_dev_sectors = sectors; } diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h index cdb165517d0b..6f3cb7c92130 100644 --- a/drivers/md/bcache/util.h +++ b/drivers/md/bcache/util.h @@ -559,8 +559,4 @@ static inline unsigned int fract_exp_two(unsigned int x, void bch_bio_map(struct bio *bio, void *base); int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp_mask); -static inline sector_t bdev_sectors(struct block_device *bdev) -{ - return bdev->bd_inode->i_size >> 9; -} #endif /* _BCACHE_UTIL_H */ diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 8120da278161..c7560f66dca8 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -45,7 +45,7 @@ static uint64_t __calc_target_rate(struct cached_dev *dc) * backing volume uses about 2% of the cache for dirty data. */ uint32_t bdev_share = - div64_u64(bdev_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT, + div64_u64(bdev_nr_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT, c->cached_dev_sectors); uint64_t cache_dirty_target = diff --git a/drivers/md/dm-audit.c b/drivers/md/dm-audit.c new file mode 100644 index 000000000000..3049dfe67e50 --- /dev/null +++ b/drivers/md/dm-audit.c @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Creating audit records for mapped devices. + * + * Copyright (C) 2021 Fraunhofer AISEC. All rights reserved. + * + * Authors: Michael Weiß <michael.weiss@aisec.fraunhofer.de> + */ + +#include <linux/audit.h> +#include <linux/module.h> +#include <linux/device-mapper.h> +#include <linux/bio.h> +#include <linux/blkdev.h> + +#include "dm-audit.h" +#include "dm-core.h" + +static struct audit_buffer *dm_audit_log_start(int audit_type, + const char *dm_msg_prefix, + const char *op) +{ + struct audit_buffer *ab; + + if (audit_enabled == AUDIT_OFF) + return NULL; + + ab = audit_log_start(audit_context(), GFP_KERNEL, audit_type); + if (unlikely(!ab)) + return NULL; + + audit_log_format(ab, "module=%s op=%s", dm_msg_prefix, op); + return ab; +} + +void dm_audit_log_ti(int audit_type, const char *dm_msg_prefix, const char *op, + struct dm_target *ti, int result) +{ + struct audit_buffer *ab = NULL; + struct mapped_device *md = dm_table_get_md(ti->table); + int dev_major = dm_disk(md)->major; + int dev_minor = dm_disk(md)->first_minor; + + switch (audit_type) { + case AUDIT_DM_CTRL: + ab = dm_audit_log_start(audit_type, dm_msg_prefix, op); + if (unlikely(!ab)) + return; + audit_log_task_info(ab); + audit_log_format(ab, " dev=%d:%d error_msg='%s'", dev_major, + dev_minor, !result ? ti->error : "success"); + break; + case AUDIT_DM_EVENT: + ab = dm_audit_log_start(audit_type, dm_msg_prefix, op); + if (unlikely(!ab)) + return; + audit_log_format(ab, " dev=%d:%d sector=?", dev_major, + dev_minor); + break; + default: /* unintended use */ + return; + } + + audit_log_format(ab, " res=%d", result); + audit_log_end(ab); +} +EXPORT_SYMBOL_GPL(dm_audit_log_ti); + +void dm_audit_log_bio(const char *dm_msg_prefix, const char *op, + struct bio *bio, sector_t sector, int result) +{ + struct audit_buffer *ab; + int dev_major = MAJOR(bio->bi_bdev->bd_dev); + int dev_minor = MINOR(bio->bi_bdev->bd_dev); + + ab = dm_audit_log_start(AUDIT_DM_EVENT, dm_msg_prefix, op); + if (unlikely(!ab)) + return; + + audit_log_format(ab, " dev=%d:%d sector=%llu res=%d", + dev_major, dev_minor, sector, result); + audit_log_end(ab); +} +EXPORT_SYMBOL_GPL(dm_audit_log_bio); diff --git a/drivers/md/dm-audit.h b/drivers/md/dm-audit.h new file mode 100644 index 000000000000..2385f2b659be --- /dev/null +++ b/drivers/md/dm-audit.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Creating audit records for mapped devices. + * + * Copyright (C) 2021 Fraunhofer AISEC. All rights reserved. + * + * Authors: Michael Weiß <michael.weiss@aisec.fraunhofer.de> + */ + +#ifndef DM_AUDIT_H +#define DM_AUDIT_H + +#include <linux/device-mapper.h> +#include <linux/audit.h> + +#ifdef CONFIG_DM_AUDIT +void dm_audit_log_bio(const char *dm_msg_prefix, const char *op, + struct bio *bio, sector_t sector, int result); + +/* + * dm_audit_log_ti() is not intended to be used directly in dm modules, + * the wrapper functions below should be called by dm modules instead. + */ +void dm_audit_log_ti(int audit_type, const char *dm_msg_prefix, const char *op, + struct dm_target *ti, int result); + +static inline void dm_audit_log_ctr(const char *dm_msg_prefix, + struct dm_target *ti, int result) +{ + dm_audit_log_ti(AUDIT_DM_CTRL, dm_msg_prefix, "ctr", ti, result); +} + +static inline void dm_audit_log_dtr(const char *dm_msg_prefix, + struct dm_target *ti, int result) +{ + dm_audit_log_ti(AUDIT_DM_CTRL, dm_msg_prefix, "dtr", ti, result); +} + +static inline void dm_audit_log_target(const char *dm_msg_prefix, const char *op, + struct dm_target *ti, int result) +{ + dm_audit_log_ti(AUDIT_DM_EVENT, dm_msg_prefix, op, ti, result); +} +#else +static inline void dm_audit_log_bio(const char *dm_msg_prefix, const char *op, + struct bio *bio, sector_t sector, + int result) +{ +} +static inline void dm_audit_log_target(const char *dm_msg_prefix, + const char *op, struct dm_target *ti, + int result) +{ +} +static inline void dm_audit_log_ctr(const char *dm_msg_prefix, + struct dm_target *ti, int result) +{ +} + +static inline void dm_audit_log_dtr(const char *dm_msg_prefix, + struct dm_target *ti, int result) +{ +} +#endif + +#endif diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 50f3e673729c..e9cbc70d5a0e 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -1525,7 +1525,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_get_block_size); sector_t dm_bufio_get_device_size(struct dm_bufio_client *c) { - sector_t s = i_size_read(c->bdev->bd_inode) >> SECTOR_SHIFT; + sector_t s = bdev_nr_sectors(c->bdev); if (s >= c->start) s -= c->start; else @@ -2082,7 +2082,6 @@ static void __exit dm_bufio_exit(void) int bug = 0; cancel_delayed_work_sync(&dm_bufio_cleanup_old_work); - flush_workqueue(dm_bufio_wq); destroy_workqueue(dm_bufio_wq); if (dm_bufio_client_count) { diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 89a73204dbf4..2874f222c313 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -334,7 +334,7 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd) int r; struct dm_block *sblock; struct cache_disk_superblock *disk_super; - sector_t bdev_size = i_size_read(cmd->bdev->bd_inode) >> SECTOR_SHIFT; + sector_t bdev_size = bdev_nr_sectors(cmd->bdev); /* FIXME: see if we can lose the max sectors limit */ if (bdev_size > DM_CACHE_METADATA_MAX_SECTORS) diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index bdd500447dea..447d030036d1 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -1940,7 +1940,7 @@ static void cache_dtr(struct dm_target *ti) static sector_t get_dev_size(struct dm_dev *dev) { - return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT; + return bdev_nr_sectors(dev->bdev); } /*----------------------------------------------------------------*/ diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c index edd22e4d65df..4599632d7a84 100644 --- a/drivers/md/dm-clone-target.c +++ b/drivers/md/dm-clone-target.c @@ -1514,7 +1514,7 @@ error: static sector_t get_dev_size(struct dm_dev *dev) { - return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT; + return bdev_nr_sectors(dev->bdev); } /*---------------------------------------------------------------------------*/ diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 55dccdfbcb22..b855fef4f38a 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -13,7 +13,7 @@ #include <linux/ktime.h> #include <linux/genhd.h> #include <linux/blk-mq.h> -#include <linux/keyslot-manager.h> +#include <linux/blk-crypto-profile.h> #include <trace/events/block.h> @@ -200,7 +200,7 @@ struct dm_table { struct dm_md_mempools *mempools; #ifdef CONFIG_BLK_INLINE_ENCRYPTION - struct blk_keyslot_manager *ksm; + struct blk_crypto_profile *crypto_profile; #endif }; diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 292f7896f733..d4ae31558826 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -42,6 +42,8 @@ #include <linux/device-mapper.h> +#include "dm-audit.h" + #define DM_MSG_PREFIX "crypt" /* @@ -1363,8 +1365,12 @@ static int crypt_convert_block_aead(struct crypt_config *cc, if (r == -EBADMSG) { char b[BDEVNAME_SIZE]; - DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b), - (unsigned long long)le64_to_cpu(*sector)); + sector_t s = le64_to_cpu(*sector); + + DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", + bio_devname(ctx->bio_in, b), s); + dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead", + ctx->bio_in, s, 0); } if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post) @@ -2174,8 +2180,12 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, if (error == -EBADMSG) { char b[BDEVNAME_SIZE]; - DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b), - (unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq))); + sector_t s = le64_to_cpu(*org_sector_of_dmreq(cc, dmreq)); + + DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", + bio_devname(ctx->bio_in, b), s); + dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead", + ctx->bio_in, s, 0); io->error = BLK_STS_PROTECTION; } else if (error < 0) io->error = BLK_STS_IOERR; @@ -2735,6 +2745,8 @@ static void crypt_dtr(struct dm_target *ti) dm_crypt_clients_n--; crypt_calculate_pages_per_client(); spin_unlock(&dm_crypt_clients_lock); + + dm_audit_log_dtr(DM_MSG_PREFIX, ti, 1); } static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode) @@ -3351,21 +3363,22 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) spin_lock_init(&cc->write_thread_lock); cc->write_tree = RB_ROOT; - cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write/%s", devname); + cc->write_thread = kthread_run(dmcrypt_write, cc, "dmcrypt_write/%s", devname); if (IS_ERR(cc->write_thread)) { ret = PTR_ERR(cc->write_thread); cc->write_thread = NULL; ti->error = "Couldn't spawn write thread"; goto bad; } - wake_up_process(cc->write_thread); ti->num_flush_bios = 1; ti->limit_swap_bios = true; + dm_audit_log_ctr(DM_MSG_PREFIX, ti, 1); return 0; bad: + dm_audit_log_ctr(DM_MSG_PREFIX, ti, 0); crypt_dtr(ti); return ret; } diff --git a/drivers/md/dm-dust.c b/drivers/md/dm-dust.c index 3163e2b1418e..03672204b0e3 100644 --- a/drivers/md/dm-dust.c +++ b/drivers/md/dm-dust.c @@ -415,7 +415,7 @@ static int dust_message(struct dm_target *ti, unsigned int argc, char **argv, char *result, unsigned int maxlen) { struct dust_device *dd = ti->private; - sector_t size = i_size_read(dd->dev->bdev->bd_inode) >> SECTOR_SHIFT; + sector_t size = bdev_nr_sectors(dd->dev->bdev); bool invalid_msg = false; int r = -EINVAL; unsigned long long tmp, block; @@ -544,8 +544,7 @@ static int dust_prepare_ioctl(struct dm_target *ti, struct block_device **bdev) /* * Only pass ioctls through if the device sizes match exactly. */ - if (dd->start || - ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT) + if (dd->start || ti->len != bdev_nr_sectors(dev->bdev)) return 1; return 0; diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c index d25989660a76..7ce5d509b940 100644 --- a/drivers/md/dm-ebs-target.c +++ b/drivers/md/dm-ebs-target.c @@ -416,7 +416,7 @@ static int ebs_prepare_ioctl(struct dm_target *ti, struct block_device **bdev) * Only pass ioctls through if the device sizes match exactly. */ *bdev = dev->bdev; - return !!(ec->start || ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT); + return !!(ec->start || ti->len != bdev_nr_sectors(dev->bdev)); } static void ebs_io_hints(struct dm_target *ti, struct queue_limits *limits) diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c index 2a78f6874143..1f6bf152b3c7 100644 --- a/drivers/md/dm-era-target.c +++ b/drivers/md/dm-era-target.c @@ -1681,7 +1681,7 @@ static int era_message(struct dm_target *ti, unsigned argc, char **argv, static sector_t get_dev_size(struct dm_dev *dev) { - return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT; + return bdev_nr_sectors(dev->bdev); } static int era_iterate_devices(struct dm_target *ti, diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h index 3f4139ac1f60..b5f20eba3641 100644 --- a/drivers/md/dm-exception-store.h +++ b/drivers/md/dm-exception-store.h @@ -168,7 +168,7 @@ static inline void dm_consecutive_chunk_count_dec(struct dm_exception *e) */ static inline sector_t get_dev_size(struct block_device *bdev) { - return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; + return bdev_nr_sectors(bdev); } static inline chunk_t sector_to_chunk(struct dm_exception_store *store, diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index 4b94ffe6f2d4..345229d7e59c 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -456,8 +456,7 @@ static int flakey_prepare_ioctl(struct dm_target *ti, struct block_device **bdev /* * Only pass ioctls through if the device sizes match exactly. */ - if (fc->start || - ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT) + if (fc->start || ti->len != bdev_nr_sectors((*bdev))) return 1; return 0; } diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index dc03b70f6e65..6319deccbe09 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -23,6 +23,8 @@ #include <linux/async_tx.h> #include <linux/dm-bufio.h> +#include "dm-audit.h" + #define DM_MSG_PREFIX "integrity" #define DEFAULT_INTERLEAVE_SECTORS 32768 @@ -539,6 +541,7 @@ static int sb_mac(struct dm_integrity_c *ic, bool wr) } if (memcmp((__u8 *)ic->sb + (1 << SECTOR_SHIFT) - size, result, size)) { dm_integrity_io_error(ic, "superblock mac", -EILSEQ); + dm_audit_log_target(DM_MSG_PREFIX, "mac-superblock", ic->ti, 0); return -EILSEQ; } } @@ -876,8 +879,10 @@ static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr) if (likely(wr)) memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR); else { - if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR)) + if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR)) { dm_integrity_io_error(ic, "journal mac", -EILSEQ); + dm_audit_log_target(DM_MSG_PREFIX, "mac-journal", ic->ti, 0); + } } } } @@ -1765,7 +1770,7 @@ static void integrity_metadata(struct work_struct *w) char *mem, *checksums_ptr; again: - mem = (char *)kmap_atomic(bv.bv_page) + bv.bv_offset; + mem = bvec_kmap_local(&bv); pos = 0; checksums_ptr = checksums; do { @@ -1775,17 +1780,22 @@ again: pos += ic->sectors_per_block << SECTOR_SHIFT; sector += ic->sectors_per_block; } while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack); - kunmap_atomic(mem); + kunmap_local(mem); r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset, checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE); if (unlikely(r)) { if (r > 0) { char b[BDEVNAME_SIZE]; - DMERR_LIMIT("%s: Checksum failed at sector 0x%llx", bio_devname(bio, b), - (sector - ((r + ic->tag_size - 1) / ic->tag_size))); + sector_t s; + + s = sector - ((r + ic->tag_size - 1) / ic->tag_size); + DMERR_LIMIT("%s: Checksum failed at sector 0x%llx", + bio_devname(bio, b), s); r = -EILSEQ; atomic64_inc(&ic->number_of_mismatches); + dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum", + bio, s, 0); } if (likely(checksums != checksums_onstack)) kfree(checksums); @@ -1953,7 +1963,7 @@ static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio, n_sectors -= bv.bv_len >> SECTOR_SHIFT; bio_advance_iter(bio, &bio->bi_iter, bv.bv_len); retry_kmap: - mem = kmap_atomic(bv.bv_page); + mem = bvec_kmap_local(&bv); if (likely(dio->op == REQ_OP_WRITE)) flush_dcache_page(bv.bv_page); @@ -1967,7 +1977,7 @@ retry_kmap: if (unlikely(journal_entry_is_inprogress(je))) { flush_dcache_page(bv.bv_page); - kunmap_atomic(mem); + kunmap_local(mem); __io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je)); goto retry_kmap; @@ -1991,6 +2001,8 @@ retry_kmap: if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) { DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx", logical_sector); + dm_audit_log_bio(DM_MSG_PREFIX, "journal-checksum", + bio, logical_sector, 0); } } #endif @@ -2058,7 +2070,7 @@ retry_kmap: if (unlikely(dio->op == REQ_OP_READ)) flush_dcache_page(bv.bv_page); - kunmap_atomic(mem); + kunmap_local(mem); } while (n_sectors); if (likely(dio->op == REQ_OP_WRITE)) { @@ -2534,8 +2546,10 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start, integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block), (char *)access_journal_data(ic, i, l), test_tag); - if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size))) + if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size))) { dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ); + dm_audit_log_target(DM_MSG_PREFIX, "integrity-replay-journal", ic->ti, 0); + } } journal_entry_set_unused(je2); @@ -4113,11 +4127,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) } } - ic->data_device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT; + ic->data_device_sectors = bdev_nr_sectors(ic->dev->bdev); if (!ic->meta_dev) ic->meta_device_sectors = ic->data_device_sectors; else - ic->meta_device_sectors = i_size_read(ic->meta_dev->bdev->bd_inode) >> SECTOR_SHIFT; + ic->meta_device_sectors = bdev_nr_sectors(ic->meta_dev->bdev); if (!journal_sectors) { journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS, @@ -4367,7 +4381,7 @@ try_smaller_buffer: DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections)); DEBUG_print(" journal_entries %u\n", ic->journal_entries); DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors); - DEBUG_print(" data_device_sectors 0x%llx\n", i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT); + DEBUG_print(" data_device_sectors 0x%llx\n", bdev_nr_sectors(ic->dev->bdev)); DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors); DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run); DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run); @@ -4514,9 +4528,11 @@ try_smaller_buffer: if (ic->discard) ti->num_discard_bios = 1; + dm_audit_log_ctr(DM_MSG_PREFIX, ti, 1); return 0; bad: + dm_audit_log_ctr(DM_MSG_PREFIX, ti, 0); dm_integrity_dtr(ti); return r; } @@ -4590,6 +4606,7 @@ static void dm_integrity_dtr(struct dm_target *ti) free_alg(&ic->journal_mac_alg); kfree(ic); + dm_audit_log_dtr(DM_MSG_PREFIX, ti, 1); } static struct target_type integrity_target = { diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 679b4c0a2eea..66ba16713f69 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -135,8 +135,7 @@ static int linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev /* * Only pass ioctls through if the device sizes match exactly. */ - if (lc->start || - ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT) + if (lc->start || ti->len != bdev_nr_sectors(dev->bdev)) return 1; return 0; } diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index d93a4db23512..0b3ef977ceeb 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c @@ -446,7 +446,7 @@ static int log_super(struct log_writes_c *lc) static inline sector_t logdev_last_sector(struct log_writes_c *lc) { - return i_size_read(lc->logdev->bdev->bd_inode) >> SECTOR_SHIFT; + return bdev_nr_sectors(lc->logdev->bdev); } static int log_writes_kthread(void *arg) @@ -753,7 +753,7 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio) */ bio_for_each_segment(bv, bio, iter) { struct page *page; - void *src, *dst; + void *dst; page = alloc_page(GFP_NOIO); if (!page) { @@ -765,11 +765,9 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_KILL; } - src = kmap_atomic(bv.bv_page); dst = kmap_atomic(page); - memcpy(dst, src + bv.bv_offset, bv.bv_len); + memcpy_from_bvec(dst, &bv); kunmap_atomic(dst); - kunmap_atomic(src); block->vecs[i].bv_page = page; block->vecs[i].bv_len = bv.bv_len; block->vec_cnt++; @@ -851,7 +849,7 @@ static int log_writes_prepare_ioctl(struct dm_target *ti, /* * Only pass ioctls through if the device sizes match exactly. */ - if (ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT) + if (ti->len != bdev_nr_sectors(dev->bdev)) return 1; return 0; } diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index 1ecf75ef276a..06f328928a7f 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c @@ -447,7 +447,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti, bdev_logical_block_size(lc->header_location. bdev)); - if (buf_size > i_size_read(dev->bdev->bd_inode)) { + if (buf_size > bdev_nr_bytes(dev->bdev)) { DMWARN("log device %s too small: need %llu bytes", dev->name, (unsigned long long)buf_size); kfree(lc); diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 694aaca4eea2..90dc9cc48881 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -530,7 +530,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, bdev = pgpath->path.dev->bdev; q = bdev_get_queue(bdev); - clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE, + clone = blk_mq_alloc_request(q, rq->cmd_flags | REQ_NOMERGE, BLK_MQ_REQ_NOWAIT); if (IS_ERR(clone)) { /* EBUSY, ENODEV or EWOULDBLOCK: requeue */ @@ -579,7 +579,7 @@ static void multipath_release_clone(struct request *clone, clone->io_start_time_ns); } - blk_put_request(clone); + blk_mq_free_request(clone); } /* @@ -2061,7 +2061,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti, /* * Only pass ioctls through if the device sizes match exactly. */ - if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT) + if (!r && ti->len != bdev_nr_sectors((*bdev))) return 1; return r; } diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index d9ef52159a22..2b26435a6946 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -1261,7 +1261,7 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as, md_rdev_init(jdev); jdev->mddev = &rs->md; jdev->bdev = rs->journal_dev.dev->bdev; - jdev->sectors = to_sector(i_size_read(jdev->bdev->bd_inode)); + jdev->sectors = bdev_nr_sectors(jdev->bdev); if (jdev->sectors < MIN_RAID456_JOURNAL_SPACE) { rs->ti->error = "No space for raid4/5/6 journal"; return -ENOSPC; @@ -1607,7 +1607,7 @@ static int _check_data_dev_sectors(struct raid_set *rs) rdev_for_each(rdev, &rs->md) if (!test_bit(Journal, &rdev->flags) && rdev->bdev) { - ds = min(ds, to_sector(i_size_read(rdev->bdev->bd_inode))); + ds = min(ds, bdev_nr_sectors(rdev->bdev)); if (ds < rs->md.dev_sectors) { rs->ti->error = "Component device(s) too small"; return -EINVAL; @@ -2662,7 +2662,7 @@ static int rs_adjust_data_offsets(struct raid_set *rs) * Make sure we got a minimum amount of free sectors per device */ if (rs->data_offset && - to_sector(i_size_read(rdev->bdev->bd_inode)) - rs->md.dev_sectors < MIN_FREE_RESHAPE_SPACE) { + bdev_nr_sectors(rdev->bdev) - rs->md.dev_sectors < MIN_FREE_RESHAPE_SPACE) { rs->ti->error = data_offset ? "No space for forward reshape" : "No space for backward reshape"; return -ENOSPC; diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c index 028a92ff6d57..534dc2ca8bb0 100644 --- a/drivers/md/dm-switch.c +++ b/drivers/md/dm-switch.c @@ -529,7 +529,7 @@ static int switch_prepare_ioctl(struct dm_target *ti, struct block_device **bdev * Only pass ioctls through if the device sizes match exactly. */ if (ti->len + sctx->path_list[path_nr].start != - i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT) + bdev_nr_sectors((*bdev))) return 1; return 0; } diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 1fa4d5582dca..aa173f5bdc3d 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -170,7 +170,7 @@ static void free_devices(struct list_head *devices, struct mapped_device *md) } } -static void dm_table_destroy_keyslot_manager(struct dm_table *t); +static void dm_table_destroy_crypto_profile(struct dm_table *t); void dm_table_destroy(struct dm_table *t) { @@ -200,7 +200,7 @@ void dm_table_destroy(struct dm_table *t) dm_free_md_mempools(t->mempools); - dm_table_destroy_keyslot_manager(t); + dm_table_destroy_crypto_profile(t); kfree(t); } @@ -227,8 +227,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, { struct queue_limits *limits = data; struct block_device *bdev = dev->bdev; - sector_t dev_size = - i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; + sector_t dev_size = bdev_nr_sectors(bdev); unsigned short logical_block_size_sectors = limits->logical_block_size >> SECTOR_SHIFT; char b[BDEVNAME_SIZE]; @@ -707,7 +706,7 @@ int dm_table_add_target(struct dm_table *t, const char *type, r = dm_split_args(&argc, &argv, params); if (r) { - tgt->error = "couldn't split parameters (insufficient memory)"; + tgt->error = "couldn't split parameters"; goto bad; } @@ -725,7 +724,7 @@ int dm_table_add_target(struct dm_table *t, const char *type, return 0; bad: - DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error); + DMERR("%s: %s: %s (%pe)", dm_device_name(t->md), type, tgt->error, ERR_PTR(r)); dm_put_target_type(tgt->type); return r; } @@ -1187,8 +1186,8 @@ static int dm_table_register_integrity(struct dm_table *t) #ifdef CONFIG_BLK_INLINE_ENCRYPTION -struct dm_keyslot_manager { - struct blk_keyslot_manager ksm; +struct dm_crypto_profile { + struct blk_crypto_profile profile; struct mapped_device *md; }; @@ -1214,13 +1213,11 @@ static int dm_keyslot_evict_callback(struct dm_target *ti, struct dm_dev *dev, * When an inline encryption key is evicted from a device-mapper device, evict * it from all the underlying devices. */ -static int dm_keyslot_evict(struct blk_keyslot_manager *ksm, +static int dm_keyslot_evict(struct blk_crypto_profile *profile, const struct blk_crypto_key *key, unsigned int slot) { - struct dm_keyslot_manager *dksm = container_of(ksm, - struct dm_keyslot_manager, - ksm); - struct mapped_device *md = dksm->md; + struct mapped_device *md = + container_of(profile, struct dm_crypto_profile, profile)->md; struct dm_keyslot_evict_args args = { key }; struct dm_table *t; int srcu_idx; @@ -1240,150 +1237,148 @@ static int dm_keyslot_evict(struct blk_keyslot_manager *ksm, return args.err; } -static const struct blk_ksm_ll_ops dm_ksm_ll_ops = { - .keyslot_evict = dm_keyslot_evict, -}; - -static int device_intersect_crypto_modes(struct dm_target *ti, - struct dm_dev *dev, sector_t start, - sector_t len, void *data) +static int +device_intersect_crypto_capabilities(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) { - struct blk_keyslot_manager *parent = data; - struct blk_keyslot_manager *child = bdev_get_queue(dev->bdev)->ksm; + struct blk_crypto_profile *parent = data; + struct blk_crypto_profile *child = + bdev_get_queue(dev->bdev)->crypto_profile; - blk_ksm_intersect_modes(parent, child); + blk_crypto_intersect_capabilities(parent, child); return 0; } -void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm) +void dm_destroy_crypto_profile(struct blk_crypto_profile *profile) { - struct dm_keyslot_manager *dksm = container_of(ksm, - struct dm_keyslot_manager, - ksm); + struct dm_crypto_profile *dmcp = container_of(profile, + struct dm_crypto_profile, + profile); - if (!ksm) + if (!profile) return; - blk_ksm_destroy(ksm); - kfree(dksm); + blk_crypto_profile_destroy(profile); + kfree(dmcp); } -static void dm_table_destroy_keyslot_manager(struct dm_table *t) +static void dm_table_destroy_crypto_profile(struct dm_table *t) { - dm_destroy_keyslot_manager(t->ksm); - t->ksm = NULL; + dm_destroy_crypto_profile(t->crypto_profile); + t->crypto_profile = NULL; } /* - * Constructs and initializes t->ksm with a keyslot manager that - * represents the common set of crypto capabilities of the devices - * described by the dm_table. However, if the constructed keyslot - * manager does not support a superset of the crypto capabilities - * supported by the current keyslot manager of the mapped_device, - * it returns an error instead, since we don't support restricting - * crypto capabilities on table changes. Finally, if the constructed - * keyslot manager doesn't actually support any crypto modes at all, - * it just returns NULL. + * Constructs and initializes t->crypto_profile with a crypto profile that + * represents the common set of crypto capabilities of the devices described by + * the dm_table. However, if the constructed crypto profile doesn't support all + * crypto capabilities that are supported by the current mapped_device, it + * returns an error instead, since we don't support removing crypto capabilities + * on table changes. Finally, if the constructed crypto profile is "empty" (has + * no crypto capabilities at all), it just sets t->crypto_profile to NULL. */ -static int dm_table_construct_keyslot_manager(struct dm_table *t) +static int dm_table_construct_crypto_profile(struct dm_table *t) { - struct dm_keyslot_manager *dksm; - struct blk_keyslot_manager *ksm; + struct dm_crypto_profile *dmcp; + struct blk_crypto_profile *profile; struct dm_target *ti; unsigned int i; - bool ksm_is_empty = true; + bool empty_profile = true; - dksm = kmalloc(sizeof(*dksm), GFP_KERNEL); - if (!dksm) + dmcp = kmalloc(sizeof(*dmcp), GFP_KERNEL); + if (!dmcp) return -ENOMEM; - dksm->md = t->md; + dmcp->md = t->md; - ksm = &dksm->ksm; - blk_ksm_init_passthrough(ksm); - ksm->ksm_ll_ops = dm_ksm_ll_ops; - ksm->max_dun_bytes_supported = UINT_MAX; - memset(ksm->crypto_modes_supported, 0xFF, - sizeof(ksm->crypto_modes_supported)); + profile = &dmcp->profile; + blk_crypto_profile_init(profile, 0); + profile->ll_ops.keyslot_evict = dm_keyslot_evict; + profile->max_dun_bytes_supported = UINT_MAX; + memset(profile->modes_supported, 0xFF, + sizeof(profile->modes_supported)); for (i = 0; i < dm_table_get_num_targets(t); i++) { ti = dm_table_get_target(t, i); if (!dm_target_passes_crypto(ti->type)) { - blk_ksm_intersect_modes(ksm, NULL); + blk_crypto_intersect_capabilities(profile, NULL); break; } if (!ti->type->iterate_devices) continue; - ti->type->iterate_devices(ti, device_intersect_crypto_modes, - ksm); + ti->type->iterate_devices(ti, + device_intersect_crypto_capabilities, + profile); } - if (t->md->queue && !blk_ksm_is_superset(ksm, t->md->queue->ksm)) { + if (t->md->queue && + !blk_crypto_has_capabilities(profile, + t->md->queue->crypto_profile)) { DMWARN("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!"); - dm_destroy_keyslot_manager(ksm); + dm_destroy_crypto_profile(profile); return -EINVAL; } /* - * If the new KSM doesn't actually support any crypto modes, we may as - * well represent it with a NULL ksm. + * If the new profile doesn't actually support any crypto capabilities, + * we may as well represent it with a NULL profile. */ - ksm_is_empty = true; - for (i = 0; i < ARRAY_SIZE(ksm->crypto_modes_supported); i++) { - if (ksm->crypto_modes_supported[i]) { - ksm_is_empty = false; + for (i = 0; i < ARRAY_SIZE(profile->modes_supported); i++) { + if (profile->modes_supported[i]) { + empty_profile = false; break; } } - if (ksm_is_empty) { - dm_destroy_keyslot_manager(ksm); - ksm = NULL; + if (empty_profile) { + dm_destroy_crypto_profile(profile); + profile = NULL; } /* - * t->ksm is only set temporarily while the table is being set - * up, and it gets set to NULL after the capabilities have - * been transferred to the request_queue. + * t->crypto_profile is only set temporarily while the table is being + * set up, and it gets set to NULL after the profile has been + * transferred to the request_queue. */ - t->ksm = ksm; + t->crypto_profile = profile; return 0; } -static void dm_update_keyslot_manager(struct request_queue *q, - struct dm_table *t) +static void dm_update_crypto_profile(struct request_queue *q, + struct dm_table *t) { - if (!t->ksm) + if (!t->crypto_profile) return; - /* Make the ksm less restrictive */ - if (!q->ksm) { - blk_ksm_register(t->ksm, q); + /* Make the crypto profile less restrictive. */ + if (!q->crypto_profile) { + blk_crypto_register(t->crypto_profile, q); } else { - blk_ksm_update_capabilities(q->ksm, t->ksm); - dm_destroy_keyslot_manager(t->ksm); + blk_crypto_update_capabilities(q->crypto_profile, + t->crypto_profile); + dm_destroy_crypto_profile(t->crypto_profile); } - t->ksm = NULL; + t->crypto_profile = NULL; } #else /* CONFIG_BLK_INLINE_ENCRYPTION */ -static int dm_table_construct_keyslot_manager(struct dm_table *t) +static int dm_table_construct_crypto_profile(struct dm_table *t) { return 0; } -void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm) +void dm_destroy_crypto_profile(struct blk_crypto_profile *profile) { } -static void dm_table_destroy_keyslot_manager(struct dm_table *t) +static void dm_table_destroy_crypto_profile(struct dm_table *t) { } -static void dm_update_keyslot_manager(struct request_queue *q, - struct dm_table *t) +static void dm_update_crypto_profile(struct request_queue *q, + struct dm_table *t) { } @@ -1415,9 +1410,9 @@ int dm_table_complete(struct dm_table *t) return r; } - r = dm_table_construct_keyslot_manager(t); + r = dm_table_construct_crypto_profile(t); if (r) { - DMERR("could not construct keyslot manager."); + DMERR("could not construct crypto profile."); return r; } @@ -2071,7 +2066,7 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, return r; } - dm_update_keyslot_manager(q, t); + dm_update_crypto_profile(q, t); disk_update_readahead(t->md->disk); return 0; diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index c88ed14d49e6..1a96a07cbf44 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -549,7 +549,7 @@ static int __write_initial_superblock(struct dm_pool_metadata *pmd) int r; struct dm_block *sblock; struct thin_disk_superblock *disk_super; - sector_t bdev_size = i_size_read(pmd->bdev->bd_inode) >> SECTOR_SHIFT; + sector_t bdev_size = bdev_nr_sectors(pmd->bdev); if (bdev_size > THIN_METADATA_MAX_SECTORS) bdev_size = THIN_METADATA_MAX_SECTORS; diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 4c67b77c23c1..ec119d2422d5 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -3212,7 +3212,7 @@ static int metadata_pre_commit_callback(void *context) static sector_t get_dev_size(struct block_device *bdev) { - return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; + return bdev_nr_sectors(bdev); } static void warn_if_metadata_device_too_big(struct block_device *bdev) diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index aae48a8b1a04..80133aae0db3 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -428,14 +428,14 @@ int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io, unsigned len; struct bio_vec bv = bio_iter_iovec(bio, *iter); - page = kmap_atomic(bv.bv_page); + page = bvec_kmap_local(&bv); len = bv.bv_len; if (likely(len >= todo)) len = todo; - r = process(v, io, page + bv.bv_offset, len); - kunmap_atomic(page); + r = process(v, io, page, len); + kunmap_local(page); if (r < 0) return r; @@ -834,8 +834,7 @@ static int verity_prepare_ioctl(struct dm_target *ti, struct block_device **bdev *bdev = v->data_dev->bdev; - if (v->data_start || - ti->len != i_size_read(v->data_dev->bdev->bd_inode) >> SECTOR_SHIFT) + if (v->data_start || ti->len != bdev_nr_sectors(v->data_dev->bdev)) return 1; return 0; } diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index 18320444fb0a..4b8991cde223 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -2264,14 +2264,13 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv) raw_spin_lock_init(&wc->endio_list_lock); INIT_LIST_HEAD(&wc->endio_list); - wc->endio_thread = kthread_create(writecache_endio_thread, wc, "writecache_endio"); + wc->endio_thread = kthread_run(writecache_endio_thread, wc, "writecache_endio"); if (IS_ERR(wc->endio_thread)) { r = PTR_ERR(wc->endio_thread); wc->endio_thread = NULL; ti->error = "Couldn't spawn endio thread"; goto bad; } - wake_up_process(wc->endio_thread); /* * Parse the mode (pmem or ssd) @@ -2341,7 +2340,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv) ti->error = "Cache data device lookup failed"; goto bad; } - wc->memory_map_size = i_size_read(wc->ssd_dev->bdev->bd_inode); + wc->memory_map_size = bdev_nr_bytes(wc->ssd_dev->bdev); /* * Parse the cache block size @@ -2493,14 +2492,13 @@ invalid_optional: wc->memory_map_size -= (uint64_t)wc->start_sector << SECTOR_SHIFT; bio_list_init(&wc->flush_list); - wc->flush_thread = kthread_create(writecache_flush_thread, wc, "dm_writecache_flush"); + wc->flush_thread = kthread_run(writecache_flush_thread, wc, "dm_writecache_flush"); if (IS_ERR(wc->flush_thread)) { r = PTR_ERR(wc->flush_thread); wc->flush_thread = NULL; ti->error = "Couldn't spawn flush thread"; goto bad; } - wake_up_process(wc->flush_thread); r = calculate_memory_size(wc->memory_map_size, wc->block_size, &n_blocks, &n_metadata_blocks); diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index ae1bc48c0043..166c4e9d99c9 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -733,7 +733,7 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path, dev->dev_idx = idx; (void)bdevname(dev->bdev, dev->name); - dev->capacity = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; + dev->capacity = bdev_nr_sectors(bdev); if (ti->begin) { ti->error = "Partial mapping is not supported"; goto err; @@ -967,7 +967,6 @@ static void dmz_dtr(struct dm_target *ti) struct dmz_target *dmz = ti->private; int i; - flush_workqueue(dmz->chunk_wq); destroy_workqueue(dmz->chunk_wq); for (i = 0; i < dmz->nr_ddevs; i++) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 79d4ac4aab05..662742a310cb 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -29,7 +29,7 @@ #include <linux/refcount.h> #include <linux/part_stat.h> #include <linux/blk-crypto.h> -#include <linux/keyslot-manager.h> +#include <linux/blk-crypto-profile.h> #define DM_MSG_PREFIX "core" @@ -1663,14 +1663,14 @@ static const struct dax_operations dm_dax_ops; static void dm_wq_work(struct work_struct *work); #ifdef CONFIG_BLK_INLINE_ENCRYPTION -static void dm_queue_destroy_keyslot_manager(struct request_queue *q) +static void dm_queue_destroy_crypto_profile(struct request_queue *q) { - dm_destroy_keyslot_manager(q->ksm); + dm_destroy_crypto_profile(q->crypto_profile); } #else /* CONFIG_BLK_INLINE_ENCRYPTION */ -static inline void dm_queue_destroy_keyslot_manager(struct request_queue *q) +static inline void dm_queue_destroy_crypto_profile(struct request_queue *q) { } #endif /* !CONFIG_BLK_INLINE_ENCRYPTION */ @@ -1696,7 +1696,7 @@ static void cleanup_mapped_device(struct mapped_device *md) dm_sysfs_exit(md); del_gendisk(md->disk); } - dm_queue_destroy_keyslot_manager(md->queue); + dm_queue_destroy_crypto_profile(md->queue); blk_cleanup_disk(md->disk); } @@ -1792,7 +1792,7 @@ static struct mapped_device *alloc_dev(int minor) format_dev_t(md->name, MKDEV(_major, minor)); - md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); + md->wq = alloc_workqueue("kdmflush/%s", WQ_MEM_RECLAIM, 0, md->name); if (!md->wq) goto bad; @@ -1927,16 +1927,6 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, dm_table_event_callback(t, event_callback, md); - /* - * The queue hasn't been stopped yet, if the old table type wasn't - * for request-based during suspension. So stop it to prevent - * I/O mapping before resume. - * This must be done before setting the queue restrictions, - * because request-based dm may be run just after the setting. - */ - if (request_based) - dm_stop_queue(q); - if (request_based) { /* * Leverage the fact that request-based DM targets are diff --git a/drivers/md/md.c b/drivers/md/md.c index e8666bdc0d28..5111ed966947 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -888,8 +888,7 @@ static struct md_personality *find_pers(int level, char *clevel) /* return the offset of the super block in 512byte sectors */ static inline sector_t calc_dev_sboffset(struct md_rdev *rdev) { - sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512; - return MD_NEW_SIZE_SECTORS(num_sectors); + return MD_NEW_SIZE_SECTORS(bdev_nr_sectors(rdev->bdev)); } static int alloc_disk_sb(struct md_rdev *rdev) @@ -1631,8 +1630,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_ */ switch(minor_version) { case 0: - sb_start = i_size_read(rdev->bdev->bd_inode) >> 9; - sb_start -= 8*2; + sb_start = bdev_nr_sectors(rdev->bdev) - 8 * 2; sb_start &= ~(sector_t)(4*2-1); break; case 1: @@ -1787,10 +1785,9 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_ else ret = 0; } - if (minor_version) { - sectors = (i_size_read(rdev->bdev->bd_inode) >> 9); - sectors -= rdev->data_offset; - } else + if (minor_version) + sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset; + else sectors = rdev->sb_start; if (sectors < le64_to_cpu(sb->data_size)) return -EINVAL; @@ -2168,8 +2165,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) return 0; /* too confusing */ if (rdev->sb_start < rdev->data_offset) { /* minor versions 1 and 2; superblock before data */ - max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9; - max_sectors -= rdev->data_offset; + max_sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset; if (!num_sectors || num_sectors > max_sectors) num_sectors = max_sectors; } else if (rdev->mddev->bitmap_info.offset) { @@ -2178,7 +2174,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) } else { /* minor version 0; superblock after data */ sector_t sb_start, bm_space; - sector_t dev_size = i_size_read(rdev->bdev->bd_inode) >> 9; + sector_t dev_size = bdev_nr_sectors(rdev->bdev); /* 8K is for superblock */ sb_start = dev_size - 8*2; @@ -3391,7 +3387,7 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len) if (!sectors) return -EBUSY; } else if (!sectors) - sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) - + sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset; if (!my_mddev->pers->resize) /* Cannot change size for RAID0 or Linear etc */ @@ -3718,7 +3714,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe kobject_init(&rdev->kobj, &rdev_ktype); - size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS; + size = bdev_nr_bytes(rdev->bdev) >> BLOCK_SIZE_BITS; if (!size) { pr_warn("md: %s has zero or unknown size, marking faulty!\n", bdevname(rdev->bdev,b)); @@ -6896,7 +6892,7 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info) if (!mddev->persistent) { pr_debug("md: nonpersistent superblock ...\n"); - rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; + rdev->sb_start = bdev_nr_sectors(rdev->bdev); } else rdev->sb_start = calc_dev_sboffset(rdev); rdev->sectors = rdev->sb_start; @@ -6983,7 +6979,7 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev) if (mddev->persistent) rdev->sb_start = calc_dev_sboffset(rdev); else - rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; + rdev->sb_start = bdev_nr_sectors(rdev->bdev); rdev->sectors = rdev->sb_start; |