summaryrefslogtreecommitdiff
path: root/drivers/md/dm-integrity.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-integrity.c')
-rw-r--r--drivers/md/dm-integrity.c745
1 files changed, 629 insertions, 116 deletions
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 1fc901df84eb..c45464b6576a 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -44,6 +44,7 @@
#define BITMAP_FLUSH_INTERVAL (10 * HZ)
#define DISCARD_FILLER 0xf6
#define SALT_SIZE 16
+#define RECHECK_POOL_SIZE 256
/*
* Warning - DEBUG_PRINT prints security-sensitive data to the log,
@@ -62,6 +63,7 @@
#define SB_VERSION_3 3
#define SB_VERSION_4 4
#define SB_VERSION_5 5
+#define SB_VERSION_6 6
#define SB_SECTORS 8
#define MAX_SECTORS_PER_BLOCK 8
@@ -86,6 +88,7 @@ struct superblock {
#define SB_FLAG_DIRTY_BITMAP 0x4
#define SB_FLAG_FIXED_PADDING 0x8
#define SB_FLAG_FIXED_HMAC 0x10
+#define SB_FLAG_INLINE 0x20
#define JOURNAL_ENTRY_ROUNDUP 8
@@ -166,6 +169,7 @@ struct dm_integrity_c {
struct dm_dev *meta_dev;
unsigned int tag_size;
__s8 log2_tag_size;
+ unsigned int tuple_size;
sector_t start;
mempool_t journal_io_mempool;
struct dm_io_client *io;
@@ -279,6 +283,8 @@ struct dm_integrity_c {
atomic64_t number_of_mismatches;
mempool_t recheck_pool;
+ struct bio_set recheck_bios;
+ struct bio_set recalc_bios;
struct notifier_block reboot_notifier;
};
@@ -314,6 +320,11 @@ struct dm_integrity_io {
struct completion *completion;
struct dm_bio_details bio_details;
+
+ char *integrity_payload;
+ unsigned payload_len;
+ bool integrity_payload_from_mempool;
+ bool integrity_range_locked;
};
struct journal_completion {
@@ -350,26 +361,8 @@ static struct kmem_cache *journal_io_cache;
#define DEBUG_bytes(bytes, len, msg, ...) do { } while (0)
#endif
-static void dm_integrity_prepare(struct request *rq)
-{
-}
-
-static void dm_integrity_complete(struct request *rq, unsigned int nr_bytes)
-{
-}
-
-/*
- * DM Integrity profile, protection is performed layer above (dm-crypt)
- */
-static const struct blk_integrity_profile dm_integrity_profile = {
- .name = "DM-DIF-EXT-TAG",
- .generate_fn = NULL,
- .verify_fn = NULL,
- .prepare_fn = dm_integrity_prepare,
- .complete_fn = dm_integrity_complete,
-};
-
static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
+static int dm_integrity_map_inline(struct dm_integrity_io *dio, bool from_map);
static void integrity_bio_wait(struct work_struct *w);
static void dm_integrity_dtr(struct dm_target *ti);
@@ -479,7 +472,9 @@ static void wraparound_section(struct dm_integrity_c *ic, unsigned int *sec_ptr)
static void sb_set_version(struct dm_integrity_c *ic)
{
- if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC))
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_INLINE))
+ ic->sb->version = SB_VERSION_6;
+ else if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC))
ic->sb->version = SB_VERSION_5;
else if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING))
ic->sb->version = SB_VERSION_4;
@@ -499,7 +494,8 @@ static int sb_mac(struct dm_integrity_c *ic, bool wr)
__u8 *sb = (__u8 *)ic->sb;
__u8 *mac = sb + (1 << SECTOR_SHIFT) - mac_size;
- if (sizeof(struct superblock) + mac_size > 1 << SECTOR_SHIFT) {
+ if (sizeof(struct superblock) + mac_size > 1 << SECTOR_SHIFT ||
+ mac_size > HASH_MAX_DIGESTSIZE) {
dm_integrity_io_error(ic, "digest is too long", -EINVAL);
return -EINVAL;
}
@@ -555,7 +551,7 @@ static int sync_rw_sb(struct dm_integrity_c *ic, blk_opf_t opf)
}
}
- r = dm_io(&io_req, 1, &io_loc, NULL);
+ r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
if (unlikely(r))
return r;
@@ -1073,7 +1069,7 @@ static void rw_journal_sectors(struct dm_integrity_c *ic, blk_opf_t opf,
io_loc.sector = ic->start + SB_SECTORS + sector;
io_loc.count = n_sectors;
- r = dm_io(&io_req, 1, &io_loc, NULL);
+ r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
if (unlikely(r)) {
dm_integrity_io_error(ic, (opf & REQ_OP_MASK) == REQ_OP_READ ?
"reading journal" : "writing journal", r);
@@ -1190,7 +1186,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned int section, u
io_loc.sector = target;
io_loc.count = n_sectors;
- r = dm_io(&io_req, 1, &io_loc, NULL);
+ r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
if (unlikely(r)) {
WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
fn(-1UL, data);
@@ -1508,18 +1504,18 @@ static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_dat
if (!ic->meta_dev)
flush_data = false;
if (flush_data) {
- fr.io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
- fr.io_req.mem.type = DM_IO_KMEM,
- fr.io_req.mem.ptr.addr = NULL,
- fr.io_req.notify.fn = flush_notify,
+ fr.io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
+ fr.io_req.mem.type = DM_IO_KMEM;
+ fr.io_req.mem.ptr.addr = NULL;
+ fr.io_req.notify.fn = flush_notify;
fr.io_req.notify.context = &fr;
- fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio),
- fr.io_reg.bdev = ic->dev->bdev,
- fr.io_reg.sector = 0,
- fr.io_reg.count = 0,
+ fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio);
+ fr.io_reg.bdev = ic->dev->bdev;
+ fr.io_reg.sector = 0;
+ fr.io_reg.count = 0;
fr.ic = ic;
init_completion(&fr.comp);
- r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL);
+ r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL, IOPRIO_DEFAULT);
BUG_ON(r);
}
@@ -1699,7 +1695,6 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks
struct bio_vec bv;
sector_t sector, logical_sector, area, offset;
struct page *page;
- void *buffer;
get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset,
@@ -1708,13 +1703,14 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks
logical_sector = dio->range.logical_sector;
page = mempool_alloc(&ic->recheck_pool, GFP_NOIO);
- buffer = page_to_virt(page);
__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
unsigned pos = 0;
do {
+ sector_t alignment;
char *mem;
+ char *buffer = page_to_virt(page);
int r;
struct dm_io_request io_req;
struct dm_io_region io_loc;
@@ -1727,7 +1723,15 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks
io_loc.sector = sector;
io_loc.count = ic->sectors_per_block;
- r = dm_io(&io_req, 1, &io_loc, NULL);
+ /* Align the bio to logical block size */
+ alignment = dio->range.logical_sector | bio_sectors(bio) | (PAGE_SIZE >> SECTOR_SHIFT);
+ alignment &= -alignment;
+ io_loc.sector = round_down(io_loc.sector, alignment);
+ io_loc.count += sector - io_loc.sector;
+ buffer += (sector - io_loc.sector) << SECTOR_SHIFT;
+ io_loc.count = round_up(io_loc.count, alignment);
+
+ r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
if (unlikely(r)) {
dio->bi_status = errno_to_blk_status(r);
goto free_ret;
@@ -1776,7 +1780,7 @@ static void integrity_metadata(struct work_struct *w)
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
char *checksums;
unsigned int extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
- char checksums_onstack[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
+ char checksums_onstack[MAX_T(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
sector_t sector;
unsigned int sectors_to_process;
@@ -1848,12 +1852,12 @@ again:
r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
if (unlikely(r)) {
+ if (likely(checksums != checksums_onstack))
+ kfree(checksums);
if (r > 0) {
- integrity_recheck(dio, checksums);
+ integrity_recheck(dio, checksums_onstack);
goto skip_io;
}
- if (likely(checksums != checksums_onstack))
- kfree(checksums);
goto error;
}
@@ -1905,6 +1909,35 @@ error:
dec_in_flight(dio);
}
+static inline bool dm_integrity_check_limits(struct dm_integrity_c *ic, sector_t logical_sector, struct bio *bio)
+{
+ if (unlikely(logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
+ DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
+ logical_sector, bio_sectors(bio),
+ ic->provided_data_sectors);
+ return false;
+ }
+ if (unlikely((logical_sector | bio_sectors(bio)) & (unsigned int)(ic->sectors_per_block - 1))) {
+ DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
+ ic->sectors_per_block,
+ logical_sector, bio_sectors(bio));
+ return false;
+ }
+ if (ic->sectors_per_block > 1 && likely(bio_op(bio) != REQ_OP_DISCARD)) {
+ struct bvec_iter iter;
+ struct bio_vec bv;
+
+ bio_for_each_segment(bv, bio, iter) {
+ if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
+ DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
+ bv.bv_offset, bv.bv_len, ic->sectors_per_block);
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
{
struct dm_integrity_c *ic = ti->private;
@@ -1917,6 +1950,14 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
dio->bi_status = 0;
dio->op = bio_op(bio);
+ if (ic->mode == 'I') {
+ bio->bi_iter.bi_sector = dm_target_offset(ic->ti, bio->bi_iter.bi_sector);
+ dio->integrity_payload = NULL;
+ dio->integrity_payload_from_mempool = false;
+ dio->integrity_range_locked = false;
+ return dm_integrity_map_inline(dio, true);
+ }
+
if (unlikely(dio->op == REQ_OP_DISCARD)) {
if (ti->max_io_len) {
sector_t sec = dm_target_offset(ti, bio->bi_iter.bi_sector);
@@ -1946,31 +1987,8 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
*/
bio->bi_opf &= ~REQ_FUA;
}
- if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
- DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
- dio->range.logical_sector, bio_sectors(bio),
- ic->provided_data_sectors);
+ if (unlikely(!dm_integrity_check_limits(ic, dio->range.logical_sector, bio)))
return DM_MAPIO_KILL;
- }
- if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned int)(ic->sectors_per_block - 1))) {
- DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
- ic->sectors_per_block,
- dio->range.logical_sector, bio_sectors(bio));
- return DM_MAPIO_KILL;
- }
-
- if (ic->sectors_per_block > 1 && likely(dio->op != REQ_OP_DISCARD)) {
- struct bvec_iter iter;
- struct bio_vec bv;
-
- bio_for_each_segment(bv, bio, iter) {
- if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
- DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
- bv.bv_offset, bv.bv_len, ic->sectors_per_block);
- return DM_MAPIO_KILL;
- }
- }
- }
bip = bio_integrity(bio);
if (!ic->internal_hash) {
@@ -2055,7 +2073,7 @@ retry_kmap:
} while (++s < ic->sectors_per_block);
#ifdef INTERNAL_VERIFY
if (ic->internal_hash) {
- char checksums_onstack[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
+ char checksums_onstack[MAX_T(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
@@ -2165,6 +2183,7 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
unsigned int journal_section, journal_entry;
unsigned int journal_read_pos;
+ sector_t recalc_sector;
struct completion read_comp;
bool discard_retried = false;
bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ;
@@ -2305,6 +2324,7 @@ offload_to_thread:
goto lock_retry;
}
}
+ recalc_sector = le64_to_cpu(ic->sb->recalc_sector);
spin_unlock_irq(&ic->endio_wait.lock);
if (unlikely(journal_read_pos != NOT_FOUND)) {
@@ -2359,7 +2379,7 @@ offload_to_thread:
if (need_sync_io) {
wait_for_completion_io(&read_comp);
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
- dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
+ dio->range.logical_sector + dio->range.n_sectors > recalc_sector)
goto skip_check;
if (ic->mode == 'B') {
if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector,
@@ -2386,12 +2406,278 @@ journal_read_write:
do_endio_flush(ic, dio);
}
+static int dm_integrity_map_inline(struct dm_integrity_io *dio, bool from_map)
+{
+ struct dm_integrity_c *ic = dio->ic;
+ struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
+ struct bio_integrity_payload *bip;
+ unsigned ret;
+ sector_t recalc_sector;
+
+ if (unlikely(bio_integrity(bio))) {
+ bio->bi_status = BLK_STS_NOTSUPP;
+ bio_endio(bio);
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ bio_set_dev(bio, ic->dev->bdev);
+ if (unlikely((bio->bi_opf & REQ_PREFLUSH) != 0))
+ return DM_MAPIO_REMAPPED;
+
+retry:
+ if (!dio->integrity_payload) {
+ unsigned digest_size, extra_size;
+ dio->payload_len = ic->tuple_size * (bio_sectors(bio) >> ic->sb->log2_sectors_per_block);
+ digest_size = crypto_shash_digestsize(ic->internal_hash);
+ extra_size = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
+ dio->payload_len += extra_size;
+ dio->integrity_payload = kmalloc(dio->payload_len, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+ if (unlikely(!dio->integrity_payload)) {
+ const unsigned x_size = PAGE_SIZE << 1;
+ if (dio->payload_len > x_size) {
+ unsigned sectors = ((x_size - extra_size) / ic->tuple_size) << ic->sb->log2_sectors_per_block;
+ if (WARN_ON(!sectors || sectors >= bio_sectors(bio))) {
+ bio->bi_status = BLK_STS_NOTSUPP;
+ bio_endio(bio);
+ return DM_MAPIO_SUBMITTED;
+ }
+ dm_accept_partial_bio(bio, sectors);
+ goto retry;
+ }
+ }
+ }
+
+ dio->range.logical_sector = bio->bi_iter.bi_sector;
+ dio->range.n_sectors = bio_sectors(bio);
+
+ if (!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)))
+ goto skip_spinlock;
+#ifdef CONFIG_64BIT
+ /*
+ * On 64-bit CPUs we can optimize the lock away (so that it won't cause
+ * cache line bouncing) and use acquire/release barriers instead.
+ *
+ * Paired with smp_store_release in integrity_recalc_inline.
+ */
+ recalc_sector = le64_to_cpu(smp_load_acquire(&ic->sb->recalc_sector));
+ if (likely(dio->range.logical_sector + dio->range.n_sectors <= recalc_sector))
+ goto skip_spinlock;
+#endif
+ spin_lock_irq(&ic->endio_wait.lock);
+ recalc_sector = le64_to_cpu(ic->sb->recalc_sector);
+ if (dio->range.logical_sector + dio->range.n_sectors <= recalc_sector)
+ goto skip_unlock;
+ if (unlikely(!add_new_range(ic, &dio->range, true))) {
+ if (from_map) {
+ spin_unlock_irq(&ic->endio_wait.lock);
+ INIT_WORK(&dio->work, integrity_bio_wait);
+ queue_work(ic->wait_wq, &dio->work);
+ return DM_MAPIO_SUBMITTED;
+ }
+ wait_and_add_new_range(ic, &dio->range);
+ }
+ dio->integrity_range_locked = true;
+skip_unlock:
+ spin_unlock_irq(&ic->endio_wait.lock);
+skip_spinlock:
+
+ if (unlikely(!dio->integrity_payload)) {
+ dio->integrity_payload = page_to_virt((struct page *)mempool_alloc(&ic->recheck_pool, GFP_NOIO));
+ dio->integrity_payload_from_mempool = true;
+ }
+
+ dio->bio_details.bi_iter = bio->bi_iter;
+
+ if (unlikely(!dm_integrity_check_limits(ic, bio->bi_iter.bi_sector, bio))) {
+ return DM_MAPIO_KILL;
+ }
+
+ bio->bi_iter.bi_sector += ic->start + SB_SECTORS;
+
+ bip = bio_integrity_alloc(bio, GFP_NOIO, 1);
+ if (IS_ERR(bip)) {
+ bio->bi_status = errno_to_blk_status(PTR_ERR(bip));
+ bio_endio(bio);
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ if (dio->op == REQ_OP_WRITE) {
+ unsigned pos = 0;
+ while (dio->bio_details.bi_iter.bi_size) {
+ struct bio_vec bv = bio_iter_iovec(bio, dio->bio_details.bi_iter);
+ const char *mem = bvec_kmap_local(&bv);
+ if (ic->tag_size < ic->tuple_size)
+ memset(dio->integrity_payload + pos + ic->tag_size, 0, ic->tuple_size - ic->tuple_size);
+ integrity_sector_checksum(ic, dio->bio_details.bi_iter.bi_sector, mem, dio->integrity_payload + pos);
+ kunmap_local(mem);
+ pos += ic->tuple_size;
+ bio_advance_iter_single(bio, &dio->bio_details.bi_iter, ic->sectors_per_block << SECTOR_SHIFT);
+ }
+ }
+
+ ret = bio_integrity_add_page(bio, virt_to_page(dio->integrity_payload),
+ dio->payload_len, offset_in_page(dio->integrity_payload));
+ if (unlikely(ret != dio->payload_len)) {
+ bio->bi_status = BLK_STS_RESOURCE;
+ bio_endio(bio);
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ return DM_MAPIO_REMAPPED;
+}
+
+static inline void dm_integrity_free_payload(struct dm_integrity_io *dio)
+{
+ struct dm_integrity_c *ic = dio->ic;
+ if (unlikely(dio->integrity_payload_from_mempool))
+ mempool_free(virt_to_page(dio->integrity_payload), &ic->recheck_pool);
+ else
+ kfree(dio->integrity_payload);
+ dio->integrity_payload = NULL;
+ dio->integrity_payload_from_mempool = false;
+}
+
+static void dm_integrity_inline_recheck(struct work_struct *w)
+{
+ struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
+ struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
+ struct dm_integrity_c *ic = dio->ic;
+ struct bio *outgoing_bio;
+ void *outgoing_data;
+
+ dio->integrity_payload = page_to_virt((struct page *)mempool_alloc(&ic->recheck_pool, GFP_NOIO));
+ dio->integrity_payload_from_mempool = true;
+
+ outgoing_data = dio->integrity_payload + PAGE_SIZE;
+
+ while (dio->bio_details.bi_iter.bi_size) {
+ char digest[HASH_MAX_DIGESTSIZE];
+ int r;
+ struct bio_integrity_payload *bip;
+ struct bio_vec bv;
+ char *mem;
+
+ outgoing_bio = bio_alloc_bioset(ic->dev->bdev, 1, REQ_OP_READ, GFP_NOIO, &ic->recheck_bios);
+
+ r = bio_add_page(outgoing_bio, virt_to_page(outgoing_data), ic->sectors_per_block << SECTOR_SHIFT, 0);
+ if (unlikely(r != (ic->sectors_per_block << SECTOR_SHIFT))) {
+ bio_put(outgoing_bio);
+ bio->bi_status = BLK_STS_RESOURCE;
+ bio_endio(bio);
+ return;
+ }
+
+ bip = bio_integrity_alloc(outgoing_bio, GFP_NOIO, 1);
+ if (IS_ERR(bip)) {
+ bio_put(outgoing_bio);
+ bio->bi_status = errno_to_blk_status(PTR_ERR(bip));
+ bio_endio(bio);
+ return;
+ }
+
+ r = bio_integrity_add_page(outgoing_bio, virt_to_page(dio->integrity_payload), ic->tuple_size, 0);
+ if (unlikely(r != ic->tuple_size)) {
+ bio_put(outgoing_bio);
+ bio->bi_status = BLK_STS_RESOURCE;
+ bio_endio(bio);
+ return;
+ }
+
+ outgoing_bio->bi_iter.bi_sector = dio->bio_details.bi_iter.bi_sector + ic->start + SB_SECTORS;
+
+ r = submit_bio_wait(outgoing_bio);
+ if (unlikely(r != 0)) {
+ bio_put(outgoing_bio);
+ bio->bi_status = errno_to_blk_status(r);
+ bio_endio(bio);
+ return;
+ }
+ bio_put(outgoing_bio);
+
+ integrity_sector_checksum(ic, dio->bio_details.bi_iter.bi_sector, outgoing_data, digest);
+ if (unlikely(memcmp(digest, dio->integrity_payload, min(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)))) {
+ DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx",
+ ic->dev->bdev, dio->bio_details.bi_iter.bi_sector);
+ atomic64_inc(&ic->number_of_mismatches);
+ dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum",
+ bio, dio->bio_details.bi_iter.bi_sector, 0);
+
+ bio->bi_status = BLK_STS_PROTECTION;
+ bio_endio(bio);
+ return;
+ }
+
+ bv = bio_iter_iovec(bio, dio->bio_details.bi_iter);
+ mem = bvec_kmap_local(&bv);
+ memcpy(mem, outgoing_data, ic->sectors_per_block << SECTOR_SHIFT);
+ kunmap_local(mem);
+
+ bio_advance_iter_single(bio, &dio->bio_details.bi_iter, ic->sectors_per_block << SECTOR_SHIFT);
+ }
+
+ bio_endio(bio);
+}
+
+static int dm_integrity_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *status)
+{
+ struct dm_integrity_c *ic = ti->private;
+ if (ic->mode == 'I') {
+ struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
+ if (dio->op == REQ_OP_READ && likely(*status == BLK_STS_OK)) {
+ unsigned pos = 0;
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
+ unlikely(dio->integrity_range_locked))
+ goto skip_check;
+ while (dio->bio_details.bi_iter.bi_size) {
+ char digest[HASH_MAX_DIGESTSIZE];
+ struct bio_vec bv = bio_iter_iovec(bio, dio->bio_details.bi_iter);
+ char *mem = bvec_kmap_local(&bv);
+ //memset(mem, 0xff, ic->sectors_per_block << SECTOR_SHIFT);
+ integrity_sector_checksum(ic, dio->bio_details.bi_iter.bi_sector, mem, digest);
+ if (unlikely(memcmp(digest, dio->integrity_payload + pos,
+ min(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)))) {
+ kunmap_local(mem);
+ dm_integrity_free_payload(dio);
+ INIT_WORK(&dio->work, dm_integrity_inline_recheck);
+ queue_work(ic->offload_wq, &dio->work);
+ return DM_ENDIO_INCOMPLETE;
+ }
+ kunmap_local(mem);
+ pos += ic->tuple_size;
+ bio_advance_iter_single(bio, &dio->bio_details.bi_iter, ic->sectors_per_block << SECTOR_SHIFT);
+ }
+ }
+skip_check:
+ dm_integrity_free_payload(dio);
+ if (unlikely(dio->integrity_range_locked))
+ remove_range(ic, &dio->range);
+ }
+ return DM_ENDIO_DONE;
+}
static void integrity_bio_wait(struct work_struct *w)
{
struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
+ struct dm_integrity_c *ic = dio->ic;
- dm_integrity_map_continue(dio, false);
+ if (ic->mode == 'I') {
+ struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
+ int r = dm_integrity_map_inline(dio, false);
+ switch (r) {
+ case DM_MAPIO_KILL:
+ bio->bi_status = BLK_STS_IOERR;
+ fallthrough;
+ case DM_MAPIO_REMAPPED:
+ submit_bio_noacct(bio);
+ fallthrough;
+ case DM_MAPIO_SUBMITTED:
+ return;
+ default:
+ BUG();
+ }
+ } else {
+ dm_integrity_map_continue(dio, false);
+ }
}
static void pad_uncommitted(struct dm_integrity_c *ic)
@@ -2424,6 +2710,9 @@ static void integrity_commit(struct work_struct *w)
del_timer(&ic->autocommit_timer);
+ if (ic->mode == 'I')
+ return;
+
spin_lock_irq(&ic->endio_wait.lock);
flushes = bio_list_get(&ic->flush_bio_list);
if (unlikely(ic->mode != 'J')) {
@@ -2618,7 +2907,7 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned int write_start
unlikely(from_replay) &&
#endif
ic->internal_hash) {
- char test_tag[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
+ char test_tag[MAX_T(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
(char *)access_journal_data(ic, i, l), test_tag);
@@ -2806,7 +3095,7 @@ next_chunk:
io_loc.sector = get_data_sector(ic, area, offset);
io_loc.count = n_sectors;
- r = dm_io(&io_req, 1, &io_loc, NULL);
+ r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
if (unlikely(r)) {
dm_integrity_io_error(ic, "reading data", r);
goto err;
@@ -2860,6 +3149,133 @@ free_ret:
kvfree(recalc_tags);
}
+static void integrity_recalc_inline(struct work_struct *w)
+{
+ struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work);
+ size_t recalc_tags_size;
+ u8 *recalc_buffer = NULL;
+ u8 *recalc_tags = NULL;
+ struct dm_integrity_range range;
+ struct bio *bio;
+ struct bio_integrity_payload *bip;
+ __u8 *t;
+ unsigned int i;
+ int r;
+ unsigned ret;
+ unsigned int super_counter = 0;
+ unsigned recalc_sectors = RECALC_SECTORS;
+
+retry:
+ recalc_buffer = kmalloc(recalc_sectors << SECTOR_SHIFT, GFP_NOIO | __GFP_NOWARN);
+ if (!recalc_buffer) {
+oom:
+ recalc_sectors >>= 1;
+ if (recalc_sectors >= 1U << ic->sb->log2_sectors_per_block)
+ goto retry;
+ DMCRIT("out of memory for recalculate buffer - recalculation disabled");
+ goto free_ret;
+ }
+
+ recalc_tags_size = (recalc_sectors >> ic->sb->log2_sectors_per_block) * ic->tuple_size;
+ if (crypto_shash_digestsize(ic->internal_hash) > ic->tuple_size)
+ recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tuple_size;
+ recalc_tags = kmalloc(recalc_tags_size, GFP_NOIO | __GFP_NOWARN);
+ if (!recalc_tags) {
+ kfree(recalc_buffer);
+ recalc_buffer = NULL;
+ goto oom;
+ }
+
+ spin_lock_irq(&ic->endio_wait.lock);
+
+next_chunk:
+ if (unlikely(dm_post_suspending(ic->ti)))
+ goto unlock_ret;
+
+ range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
+ if (unlikely(range.logical_sector >= ic->provided_data_sectors))
+ goto unlock_ret;
+ range.n_sectors = min((sector_t)recalc_sectors, ic->provided_data_sectors - range.logical_sector);
+
+ add_new_range_and_wait(ic, &range);
+ spin_unlock_irq(&ic->endio_wait.lock);
+
+ if (unlikely(++super_counter == RECALC_WRITE_SUPER)) {
+ recalc_write_super(ic);
+ super_counter = 0;
+ }
+
+ if (unlikely(dm_integrity_failed(ic)))
+ goto err;
+
+ DEBUG_print("recalculating: %llx - %llx\n", range.logical_sector, range.n_sectors);
+
+ bio = bio_alloc_bioset(ic->dev->bdev, 1, REQ_OP_READ, GFP_NOIO, &ic->recalc_bios);
+ bio->bi_iter.bi_sector = ic->start + SB_SECTORS + range.logical_sector;
+ __bio_add_page(bio, virt_to_page(recalc_buffer), range.n_sectors << SECTOR_SHIFT, offset_in_page(recalc_buffer));
+ r = submit_bio_wait(bio);
+ bio_put(bio);
+ if (unlikely(r)) {
+ dm_integrity_io_error(ic, "reading data", r);
+ goto err;
+ }
+
+ t = recalc_tags;
+ for (i = 0; i < range.n_sectors; i += ic->sectors_per_block) {
+ memset(t, 0, ic->tuple_size);
+ integrity_sector_checksum(ic, range.logical_sector + i, recalc_buffer + (i << SECTOR_SHIFT), t);
+ t += ic->tuple_size;
+ }
+
+ bio = bio_alloc_bioset(ic->dev->bdev, 1, REQ_OP_WRITE, GFP_NOIO, &ic->recalc_bios);
+ bio->bi_iter.bi_sector = ic->start + SB_SECTORS + range.logical_sector;
+ __bio_add_page(bio, virt_to_page(recalc_buffer), range.n_sectors << SECTOR_SHIFT, offset_in_page(recalc_buffer));
+
+ bip = bio_integrity_alloc(bio, GFP_NOIO, 1);
+ if (unlikely(IS_ERR(bip))) {
+ bio_put(bio);
+ DMCRIT("out of memory for bio integrity payload - recalculation disabled");
+ goto err;
+ }
+ ret = bio_integrity_add_page(bio, virt_to_page(recalc_tags), t - recalc_tags, offset_in_page(recalc_tags));
+ if (unlikely(ret != t - recalc_tags)) {
+ bio_put(bio);
+ dm_integrity_io_error(ic, "attaching integrity tags", -ENOMEM);
+ goto err;
+ }
+
+ r = submit_bio_wait(bio);
+ bio_put(bio);
+ if (unlikely(r)) {
+ dm_integrity_io_error(ic, "writing data", r);
+ goto err;
+ }
+
+ cond_resched();
+ spin_lock_irq(&ic->endio_wait.lock);
+ remove_range_unlocked(ic, &range);
+#ifdef CONFIG_64BIT
+ /* Paired with smp_load_acquire in dm_integrity_map_inline. */
+ smp_store_release(&ic->sb->recalc_sector, cpu_to_le64(range.logical_sector + range.n_sectors));
+#else
+ ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors);
+#endif
+ goto next_chunk;
+
+err:
+ remove_range(ic, &range);
+ goto free_ret;
+
+unlock_ret:
+ spin_unlock_irq(&ic->endio_wait.lock);
+
+ recalc_write_super(ic);
+
+free_ret:
+ kfree(recalc_buffer);
+ kfree(recalc_tags);
+}
+
static void bitmap_block_work(struct work_struct *w)
{
struct bitmap_block_status *bbs = container_of(w, struct bitmap_block_status, work);
@@ -3374,20 +3790,18 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
break;
case STATUSTYPE_TABLE: {
- __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
-
- watermark_percentage += ic->journal_entries / 2;
- do_div(watermark_percentage, ic->journal_entries);
- arg_count = 3;
+ arg_count = 1; /* buffer_sectors */
arg_count += !!ic->meta_dev;
arg_count += ic->sectors_per_block != 1;
arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
arg_count += ic->reset_recalculate_flag;
arg_count += ic->discard;
- arg_count += ic->mode == 'J';
- arg_count += ic->mode == 'J';
- arg_count += ic->mode == 'B';
- arg_count += ic->mode == 'B';
+ arg_count += ic->mode != 'I'; /* interleave_sectors */
+ arg_count += ic->mode == 'J'; /* journal_sectors */
+ arg_count += ic->mode == 'J'; /* journal_watermark */
+ arg_count += ic->mode == 'J'; /* commit_time */
+ arg_count += ic->mode == 'B'; /* sectors_per_bit */
+ arg_count += ic->mode == 'B'; /* bitmap_flush_interval */
arg_count += !!ic->internal_hash_alg.alg_string;
arg_count += !!ic->journal_crypt_alg.alg_string;
arg_count += !!ic->journal_mac_alg.alg_string;
@@ -3406,10 +3820,15 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
DMEMIT(" reset_recalculate");
if (ic->discard)
DMEMIT(" allow_discards");
- DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
- DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
+ if (ic->mode != 'I')
+ DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
if (ic->mode == 'J') {
+ __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
+
+ watermark_percentage += ic->journal_entries / 2;
+ do_div(watermark_percentage, ic->journal_entries);
+ DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
DMEMIT(" journal_watermark:%u", (unsigned int)watermark_percentage);
DMEMIT(" commit_time:%u", ic->autocommit_msec);
}
@@ -3482,9 +3901,22 @@ static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *lim
if (ic->sectors_per_block > 1) {
limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
- blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
+ limits->io_min = ic->sectors_per_block << SECTOR_SHIFT;
limits->dma_alignment = limits->logical_block_size - 1;
+ limits->discard_granularity = ic->sectors_per_block << SECTOR_SHIFT;
+ }
+
+ if (!ic->internal_hash) {
+ struct blk_integrity *bi = &limits->integrity;
+
+ memset(bi, 0, sizeof(*bi));
+ bi->tuple_size = ic->tag_size;
+ bi->tag_size = bi->tuple_size;
+ bi->interval_exp =
+ ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
}
+
+ limits->max_integrity_segments = USHRT_MAX;
}
static void calculate_journal_section_size(struct dm_integrity_c *ic)
@@ -3513,7 +3945,10 @@ static int calculate_device_limits(struct dm_integrity_c *ic)
return -EINVAL;
ic->initial_sectors = initial_sectors;
- if (!ic->meta_dev) {
+ if (ic->mode == 'I') {
+ if (ic->initial_sectors + ic->provided_data_sectors > ic->meta_device_sectors)
+ return -EINVAL;
+ } else if (!ic->meta_dev) {
sector_t last_sector, last_area, last_offset;
/* we have to maintain excessive padding for compatibility with existing volumes */
@@ -3576,6 +4011,8 @@ static int initialize_superblock(struct dm_integrity_c *ic,
memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT);
memcpy(ic->sb->magic, SB_MAGIC, 8);
+ if (ic->mode == 'I')
+ ic->sb->flags |= cpu_to_le32(SB_FLAG_INLINE);
ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size);
ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block);
if (ic->journal_mac_alg.alg_string)
@@ -3585,6 +4022,8 @@ static int initialize_superblock(struct dm_integrity_c *ic,
journal_sections = journal_sectors / ic->journal_section_sectors;
if (!journal_sections)
journal_sections = 1;
+ if (ic->mode == 'I')
+ journal_sections = 0;
if (ic->fix_hmac && (ic->internal_hash_alg.alg_string || ic->journal_mac_alg.alg_string)) {
ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_HMAC);
@@ -3640,21 +4079,6 @@ try_smaller_buffer:
return 0;
}
-static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
-{
- struct gendisk *disk = dm_disk(dm_table_get_md(ti->table));
- struct blk_integrity bi;
-
- memset(&bi, 0, sizeof(bi));
- bi.profile = &dm_integrity_profile;
- bi.tuple_size = ic->tag_size;
- bi.tag_size = bi.tuple_size;
- bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
-
- blk_integrity_register(disk, &bi);
- blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
-}
-
static void dm_integrity_free_page_list(struct page_list *pl)
{
unsigned int i;
@@ -4148,10 +4572,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
}
if (!strcmp(argv[3], "J") || !strcmp(argv[3], "B") ||
- !strcmp(argv[3], "D") || !strcmp(argv[3], "R")) {
+ !strcmp(argv[3], "D") || !strcmp(argv[3], "R") ||
+ !strcmp(argv[3], "I")) {
ic->mode = argv[3][0];
} else {
- ti->error = "Invalid mode (expecting J, B, D, R)";
+ ti->error = "Invalid mode (expecting J, B, D, R, I)";
r = -EINVAL;
goto bad;
}
@@ -4213,7 +4638,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
} else if (sscanf(opt_string, "sectors_per_bit:%llu%c", &llval, &dummy) == 1) {
log2_sectors_per_bitmap_bit = !llval ? 0 : __ilog2_u64(llval);
} else if (sscanf(opt_string, "bitmap_flush_interval:%u%c", &val, &dummy) == 1) {
- if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
+ if ((uint64_t)val >= (uint64_t)UINT_MAX * 1000 / HZ) {
r = -EINVAL;
ti->error = "Invalid bitmap_flush_interval argument";
goto bad;
@@ -4297,6 +4722,53 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
else
ic->log2_tag_size = -1;
+ if (ic->mode == 'I') {
+ struct blk_integrity *bi;
+ if (ic->meta_dev) {
+ r = -EINVAL;
+ ti->error = "Metadata device not supported in inline mode";
+ goto bad;
+ }
+ if (!ic->internal_hash_alg.alg_string) {
+ r = -EINVAL;
+ ti->error = "Internal hash not set in inline mode";
+ goto bad;
+ }
+ if (ic->journal_crypt_alg.alg_string || ic->journal_mac_alg.alg_string) {
+ r = -EINVAL;
+ ti->error = "Journal crypt not supported in inline mode";
+ goto bad;
+ }
+ if (ic->discard) {
+ r = -EINVAL;
+ ti->error = "Discards not supported in inline mode";
+ goto bad;
+ }
+ bi = blk_get_integrity(ic->dev->bdev->bd_disk);
+ if (!bi || bi->csum_type != BLK_INTEGRITY_CSUM_NONE) {
+ r = -EINVAL;
+ ti->error = "Integrity profile not supported";
+ goto bad;
+ }
+ /*printk("tag_size: %u, tuple_size: %u\n", bi->tag_size, bi->tuple_size);*/
+ if (bi->tuple_size < ic->tag_size) {
+ r = -EINVAL;
+ ti->error = "The integrity profile is smaller than tag size";
+ goto bad;
+ }
+ if ((unsigned long)bi->tuple_size > PAGE_SIZE / 2) {
+ r = -EINVAL;
+ ti->error = "Too big tuple size";
+ goto bad;
+ }
+ ic->tuple_size = bi->tuple_size;
+ if (1 << bi->interval_exp != ic->sectors_per_block << SECTOR_SHIFT) {
+ r = -EINVAL;
+ ti->error = "Integrity profile sector size mismatch";
+ goto bad;
+ }
+ }
+
if (ic->mode == 'B' && !ic->internal_hash) {
r = -EINVAL;
ti->error = "Bitmap mode can be only used with internal hash";
@@ -4327,12 +4799,37 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
goto bad;
}
- r = mempool_init_page_pool(&ic->recheck_pool, 1, 0);
+ r = mempool_init_page_pool(&ic->recheck_pool, 1, ic->mode == 'I' ? 1 : 0);
if (r) {
ti->error = "Cannot allocate mempool";
goto bad;
}
+ if (ic->mode == 'I') {
+ r = bioset_init(&ic->recheck_bios, RECHECK_POOL_SIZE, 0, BIOSET_NEED_BVECS);
+ if (r) {
+ ti->error = "Cannot allocate bio set";
+ goto bad;
+ }
+ r = bioset_integrity_create(&ic->recheck_bios, RECHECK_POOL_SIZE);
+ if (r) {
+ ti->error = "Cannot allocate bio integrity set";
+ r = -ENOMEM;
+ goto bad;
+ }
+ r = bioset_init(&ic->recalc_bios, 1, 0, BIOSET_NEED_BVECS);
+ if (r) {
+ ti->error = "Cannot allocate bio set";
+ goto bad;
+ }
+ r = bioset_integrity_create(&ic->recalc_bios, 1);
+ if (r) {
+ ti->error = "Cannot allocate bio integrity set";
+ r = -ENOMEM;
+ goto bad;
+ }
+ }
+
ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
if (!ic->metadata_wq) {
@@ -4409,11 +4906,16 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
should_write_sb = true;
}
- if (!ic->sb->version || ic->sb->version > SB_VERSION_5) {
+ if (!ic->sb->version || ic->sb->version > SB_VERSION_6) {
r = -EINVAL;
ti->error = "Unknown version";
goto bad;
}
+ if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_INLINE)) != (ic->mode == 'I')) {
+ r = -EINVAL;
+ ti->error = "Inline flag mismatch";
+ goto bad;
+ }
if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) {
r = -EINVAL;
ti->error = "Tag size doesn't match the information in superblock";
@@ -4424,10 +4926,18 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
ti->error = "Block size doesn't match the information in superblock";
goto bad;
}
- if (!le32_to_cpu(ic->sb->journal_sections)) {
- r = -EINVAL;
- ti->error = "Corrupted superblock, journal_sections is 0";
- goto bad;
+ if (ic->mode != 'I') {
+ if (!le32_to_cpu(ic->sb->journal_sections)) {
+ r = -EINVAL;
+ ti->error = "Corrupted superblock, journal_sections is 0";
+ goto bad;
+ }
+ } else {
+ if (le32_to_cpu(ic->sb->journal_sections)) {
+ r = -EINVAL;
+ ti->error = "Corrupted superblock, journal_sections is not 0";
+ goto bad;
+ }
}
/* make sure that ti->max_io_len doesn't overflow */
if (!ic->meta_dev) {
@@ -4478,8 +4988,9 @@ try_smaller_buffer:
bits_in_journal = ((__u64)ic->journal_section_sectors * ic->journal_sections) << (SECTOR_SHIFT + 3);
if (bits_in_journal > UINT_MAX)
bits_in_journal = UINT_MAX;
- while (bits_in_journal < (ic->provided_data_sectors + ((sector_t)1 << log2_sectors_per_bitmap_bit) - 1) >> log2_sectors_per_bitmap_bit)
- log2_sectors_per_bitmap_bit++;
+ if (bits_in_journal)
+ while (bits_in_journal < (ic->provided_data_sectors + ((sector_t)1 << log2_sectors_per_bitmap_bit) - 1) >> log2_sectors_per_bitmap_bit)
+ log2_sectors_per_bitmap_bit++;
log2_blocks_per_bitmap_bit = log2_sectors_per_bitmap_bit - ic->sb->log2_sectors_per_block;
ic->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
@@ -4499,7 +5010,6 @@ try_smaller_buffer:
goto bad;
}
-
threshold = (__u64)ic->journal_entries * (100 - journal_watermark);
threshold += 50;
do_div(threshold, 100);
@@ -4534,7 +5044,7 @@ try_smaller_buffer:
r = -ENOMEM;
goto bad;
}
- INIT_WORK(&ic->recalc_work, integrity_recalc);
+ INIT_WORK(&ic->recalc_work, ic->mode == 'I' ? integrity_recalc_inline : integrity_recalc);
} else {
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
ti->error = "Recalculate can only be specified with internal_hash";
@@ -4561,7 +5071,7 @@ try_smaller_buffer:
}
dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors);
- if (ic->mode != 'R') {
+ if (ic->mode != 'R' && ic->mode != 'I') {
r = create_journal(ic, &ti->error);
if (r)
goto bad;
@@ -4621,7 +5131,7 @@ try_smaller_buffer:
ic->just_formatted = true;
}
- if (!ic->meta_dev) {
+ if (!ic->meta_dev && ic->mode != 'I') {
r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors);
if (r)
goto bad;
@@ -4640,14 +5150,14 @@ try_smaller_buffer:
}
}
- if (!ic->internal_hash)
- dm_integrity_set(ti, ic);
-
ti->num_flush_bios = 1;
ti->flush_supported = true;
if (ic->discard)
ti->num_discard_bios = 1;
+ if (ic->mode == 'I')
+ ti->mempool_needs_integrity = true;
+
dm_audit_log_ctr(DM_MSG_PREFIX, ti, 1);
return 0;
@@ -4681,6 +5191,8 @@ static void dm_integrity_dtr(struct dm_target *ti)
kvfree(ic->bbs);
if (ic->bufio)
dm_bufio_client_destroy(ic->bufio);
+ bioset_exit(&ic->recalc_bios);
+ bioset_exit(&ic->recheck_bios);
mempool_exit(&ic->recheck_pool);
mempool_exit(&ic->journal_io_mempool);
if (ic->io)
@@ -4734,12 +5246,13 @@ static void dm_integrity_dtr(struct dm_target *ti)
static struct target_type integrity_target = {
.name = "integrity",
- .version = {1, 11, 0},
+ .version = {1, 13, 0},
.module = THIS_MODULE,
.features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
.ctr = dm_integrity_ctr,
.dtr = dm_integrity_dtr,
.map = dm_integrity_map,
+ .end_io = dm_integrity_end_io,
.postsuspend = dm_integrity_postsuspend,
.resume = dm_integrity_resume,
.status = dm_integrity_status,