summaryrefslogtreecommitdiff
path: root/drivers/md
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-02-22 13:21:31 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2023-02-22 13:21:31 -0800
commitf0b2769a0185ccf84842a795b5587afc37274c3d (patch)
tree144ff7280bd7699168ebf635b80625b17fd5194a /drivers/md
parent23064dfe088e0926e3fc0922f118866dc1564405 (diff)
parentd695e44157c8da8d298295d1905428fb2495bc8b (diff)
Merge tag 'for-6.3/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper updates from Mike Snitzer: - Fix DM cache target to free background tracker work items, otherwise slab BUG will occur when kmem_cache_destroy() is called. - Improve 2 of DM's shrinker names to reflect their use. - Fix the DM flakey target to not corrupt the zero page. Fix dm-flakey on 32-bit hughmem systems by using bvec_kmap_local instead of page_address. Also, fix logic used when imposing the "corrupt_bio_byte" feature. - Stop using WQ_UNBOUND for DM verity target's verify_wq because it causes significant Android latencies on ARM64 (and doesn't show real benefit on other architectures). - Add negative check to catch simple case of a DM table referencing itself. More complex scenarios that use intermediate devices to self-reference still need to be avoided/handled in userspace. - Fix DM core's resize to only send one uevent instead of two. This fixes a race with udev, that if udev wins, will cause udev to miss uevents (which caused premature unmount attempts by systemd). - Add cond_resched() to workqueue functions in DM core, dn-thin and dm-cache so that their loops aren't the cause of unintended cpu scheduling fairness issues. - Fix all of DM's checkpatch errors and warnings (famous last words). Various other small cleanups. * tag 'for-6.3/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: (62 commits) dm: remove unnecessary (void*) conversion in event_callback() dm ioctl: remove unnecessary check when using dm_get_mdptr() dm ioctl: assert _hash_lock is held in __hash_remove dm cache: add cond_resched() to various workqueue loops dm thin: add cond_resched() to various workqueue loops dm: add cond_resched() to dm_wq_requeue_work() dm: add cond_resched() to dm_wq_work() dm sysfs: make kobj_type structure constant dm: update targets using system workqueues to use a local workqueue dm: remove flush_scheduled_work() during local_exit() dm clone: prefer kvmalloc_array() dm: declare variables static when sensible dm: fix suspect indent whitespace dm ioctl: prefer strscpy() instead of strlcpy() dm: avoid void function return statements dm integrity: change macros min/max() -> min_t/max_t where appropriate dm: fix use of sizeof() macro dm: avoid 'do {} while(0)' loop in single statement macros dm log: avoid multiple line dereference dm log: avoid trailing semicolon in macro ...
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-audit.c2
-rw-r--r--drivers/md/dm-bio-prison-v1.c19
-rw-r--r--drivers/md/dm-bio-prison-v1.h1
-rw-r--r--drivers/md/dm-bio-prison-v2.c15
-rw-r--r--drivers/md/dm-bio-prison-v2.h11
-rw-r--r--drivers/md/dm-bio-record.h1
-rw-r--r--drivers/md/dm-bufio.c130
-rw-r--r--drivers/md/dm-builtin.c3
-rw-r--r--drivers/md/dm-cache-background-tracker.c17
-rw-r--r--drivers/md/dm-cache-background-tracker.h47
-rw-r--r--drivers/md/dm-cache-block-types.h1
-rw-r--r--drivers/md/dm-cache-metadata.c74
-rw-r--r--drivers/md/dm-cache-metadata.h5
-rw-r--r--drivers/md/dm-cache-policy-internal.h14
-rw-r--r--drivers/md/dm-cache-policy-smq.c166
-rw-r--r--drivers/md/dm-cache-policy.c3
-rw-r--r--drivers/md/dm-cache-policy.h7
-rw-r--r--drivers/md/dm-cache-target.c141
-rw-r--r--drivers/md/dm-clone-target.c2
-rw-r--r--drivers/md/dm-core.h9
-rw-r--r--drivers/md/dm-crypt.c117
-rw-r--r--drivers/md/dm-delay.c7
-rw-r--r--drivers/md/dm-dust.c2
-rw-r--r--drivers/md/dm-ebs-target.c5
-rw-r--r--drivers/md/dm-era-target.c122
-rw-r--r--drivers/md/dm-exception-store.c7
-rw-r--r--drivers/md/dm-exception-store.h57
-rw-r--r--drivers/md/dm-flakey.c58
-rw-r--r--drivers/md/dm-ima.c5
-rw-r--r--drivers/md/dm-ima.h7
-rw-r--r--drivers/md/dm-init.c5
-rw-r--r--drivers/md/dm-integrity.c541
-rw-r--r--drivers/md/dm-io-rewind.c8
-rw-r--r--drivers/md/dm-io-tracker.h1
-rw-r--r--drivers/md/dm-io.c88
-rw-r--r--drivers/md/dm-ioctl.c168
-rw-r--r--drivers/md/dm-kcopyd.c61
-rw-r--r--drivers/md/dm-linear.c5
-rw-r--r--drivers/md/dm-log-userspace-base.c15
-rw-r--r--drivers/md/dm-log-userspace-transfer.c8
-rw-r--r--drivers/md/dm-log-userspace-transfer.h1
-rw-r--r--drivers/md/dm-log-writes.c23
-rw-r--r--drivers/md/dm-log.c65
-rw-r--r--drivers/md/dm-mpath.c125
-rw-r--r--drivers/md/dm-mpath.h3
-rw-r--r--drivers/md/dm-path-selector.c4
-rw-r--r--drivers/md/dm-path-selector.h28
-rw-r--r--drivers/md/dm-ps-historical-service-time.c2
-rw-r--r--drivers/md/dm-ps-io-affinity.c6
-rw-r--r--drivers/md/dm-ps-queue-length.c15
-rw-r--r--drivers/md/dm-ps-round-robin.c22
-rw-r--r--drivers/md/dm-ps-service-time.c26
-rw-r--r--drivers/md/dm-raid.c35
-rw-r--r--drivers/md/dm-raid1.c92
-rw-r--r--drivers/md/dm-region-hash.c29
-rw-r--r--drivers/md/dm-rq.c27
-rw-r--r--drivers/md/dm-rq.h3
-rw-r--r--drivers/md/dm-snap-persistent.c48
-rw-r--r--drivers/md/dm-snap-transient.c18
-rw-r--r--drivers/md/dm-snap.c91
-rw-r--r--drivers/md/dm-stats.c103
-rw-r--r--drivers/md/dm-stats.h6
-rw-r--r--drivers/md/dm-stripe.c53
-rw-r--r--drivers/md/dm-switch.c47
-rw-r--r--drivers/md/dm-sysfs.c12
-rw-r--r--drivers/md/dm-table.c58
-rw-r--r--drivers/md/dm-target.c6
-rw-r--r--drivers/md/dm-thin-metadata.c66
-rw-r--r--drivers/md/dm-thin-metadata.h1
-rw-r--r--drivers/md/dm-thin.c88
-rw-r--r--drivers/md/dm-uevent.c6
-rw-r--r--drivers/md/dm-uevent.h6
-rw-r--r--drivers/md/dm-unstripe.c1
-rw-r--r--drivers/md/dm-verity-fec.c30
-rw-r--r--drivers/md/dm-verity-fec.h18
-rw-r--r--drivers/md/dm-verity-target.c83
-rw-r--r--drivers/md/dm-verity-verify-sig.c2
-rw-r--r--drivers/md/dm-verity-verify-sig.h2
-rw-r--r--drivers/md/dm-verity.h8
-rw-r--r--drivers/md/dm-writecache.c171
-rw-r--r--drivers/md/dm-zero.c1
-rw-r--r--drivers/md/dm-zone.c2
-rw-r--r--drivers/md/dm-zoned-metadata.c22
-rw-r--r--drivers/md/dm-zoned-target.c1
-rw-r--r--drivers/md/dm.c116
-rw-r--r--drivers/md/dm.h16
-rw-r--r--drivers/md/persistent-data/dm-array.c82
-rw-r--r--drivers/md/persistent-data/dm-array.h3
-rw-r--r--drivers/md/persistent-data/dm-bitset.c14
-rw-r--r--drivers/md/persistent-data/dm-bitset.h1
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c32
-rw-r--r--drivers/md/persistent-data/dm-block-manager.h7
-rw-r--r--drivers/md/persistent-data/dm-btree-internal.h6
-rw-r--r--drivers/md/persistent-data/dm-btree-remove.c52
-rw-r--r--drivers/md/persistent-data/dm-btree-spine.c21
-rw-r--r--drivers/md/persistent-data/dm-btree.c130
-rw-r--r--drivers/md/persistent-data/dm-btree.h15
-rw-r--r--drivers/md/persistent-data/dm-persistent-data-internal.h7
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c52
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.h11
-rw-r--r--drivers/md/persistent-data/dm-space-map-disk.c13
-rw-r--r--drivers/md/persistent-data/dm-space-map-disk.h1
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c24
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.h1
-rw-r--r--drivers/md/persistent-data/dm-space-map.h1
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.c18
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.h3
107 files changed, 2243 insertions, 1794 deletions
diff --git a/drivers/md/dm-audit.c b/drivers/md/dm-audit.c
index 3049dfe67e50..2e979eeb1116 100644
--- a/drivers/md/dm-audit.c
+++ b/drivers/md/dm-audit.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Creating audit records for mapped devices.
*
diff --git a/drivers/md/dm-bio-prison-v1.c b/drivers/md/dm-bio-prison-v1.c
index 1f8f98efd97a..c4c05d5d8909 100644
--- a/drivers/md/dm-bio-prison-v1.c
+++ b/drivers/md/dm-bio-prison-v1.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Red Hat, Inc.
*
@@ -77,9 +78,9 @@ static void __setup_new_cell(struct dm_cell_key *key,
struct bio *holder,
struct dm_bio_prison_cell *cell)
{
- memcpy(&cell->key, key, sizeof(cell->key));
- cell->holder = holder;
- bio_list_init(&cell->bios);
+ memcpy(&cell->key, key, sizeof(cell->key));
+ cell->holder = holder;
+ bio_list_init(&cell->bios);
}
static int cmp_keys(struct dm_cell_key *lhs,
@@ -285,14 +286,14 @@ EXPORT_SYMBOL_GPL(dm_cell_promote_or_release);
struct dm_deferred_entry {
struct dm_deferred_set *ds;
- unsigned count;
+ unsigned int count;
struct list_head work_items;
};
struct dm_deferred_set {
spinlock_t lock;
- unsigned current_entry;
- unsigned sweeper;
+ unsigned int current_entry;
+ unsigned int sweeper;
struct dm_deferred_entry entries[DEFERRED_SET_SIZE];
};
@@ -338,7 +339,7 @@ struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds)
}
EXPORT_SYMBOL_GPL(dm_deferred_entry_inc);
-static unsigned ds_next(unsigned index)
+static unsigned int ds_next(unsigned int index)
{
return (index + 1) % DEFERRED_SET_SIZE;
}
@@ -373,7 +374,7 @@ EXPORT_SYMBOL_GPL(dm_deferred_entry_dec);
int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work)
{
int r = 1;
- unsigned next_entry;
+ unsigned int next_entry;
spin_lock_irq(&ds->lock);
if ((ds->sweeper == ds->current_entry) &&
@@ -432,7 +433,7 @@ static int __init dm_bio_prison_init(void)
return 0;
- bad:
+bad:
while (i--)
_exits[i]();
diff --git a/drivers/md/dm-bio-prison-v1.h b/drivers/md/dm-bio-prison-v1.h
index cec52ac5e1ae..dfbf1e94cb75 100644
--- a/drivers/md/dm-bio-prison-v1.h
+++ b/drivers/md/dm-bio-prison-v1.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011-2017 Red Hat, Inc.
*
diff --git a/drivers/md/dm-bio-prison-v2.c b/drivers/md/dm-bio-prison-v2.c
index 9dec3b61cf70..fd852981ef9c 100644
--- a/drivers/md/dm-bio-prison-v2.c
+++ b/drivers/md/dm-bio-prison-v2.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012-2017 Red Hat, Inc.
*
@@ -148,7 +149,7 @@ static bool __find_or_insert(struct dm_bio_prison_v2 *prison,
static bool __get(struct dm_bio_prison_v2 *prison,
struct dm_cell_key_v2 *key,
- unsigned lock_level,
+ unsigned int lock_level,
struct bio *inmate,
struct dm_bio_prison_cell_v2 *cell_prealloc,
struct dm_bio_prison_cell_v2 **cell)
@@ -171,7 +172,7 @@ static bool __get(struct dm_bio_prison_v2 *prison,
bool dm_cell_get_v2(struct dm_bio_prison_v2 *prison,
struct dm_cell_key_v2 *key,
- unsigned lock_level,
+ unsigned int lock_level,
struct bio *inmate,
struct dm_bio_prison_cell_v2 *cell_prealloc,
struct dm_bio_prison_cell_v2 **cell_result)
@@ -194,7 +195,7 @@ static bool __put(struct dm_bio_prison_v2 *prison,
// FIXME: shared locks granted above the lock level could starve this
if (!cell->shared_count) {
- if (cell->exclusive_lock){
+ if (cell->exclusive_lock) {
if (cell->quiesce_continuation) {
queue_work(prison->wq, cell->quiesce_continuation);
cell->quiesce_continuation = NULL;
@@ -224,7 +225,7 @@ EXPORT_SYMBOL_GPL(dm_cell_put_v2);
static int __lock(struct dm_bio_prison_v2 *prison,
struct dm_cell_key_v2 *key,
- unsigned lock_level,
+ unsigned int lock_level,
struct dm_bio_prison_cell_v2 *cell_prealloc,
struct dm_bio_prison_cell_v2 **cell_result)
{
@@ -255,7 +256,7 @@ static int __lock(struct dm_bio_prison_v2 *prison,
int dm_cell_lock_v2(struct dm_bio_prison_v2 *prison,
struct dm_cell_key_v2 *key,
- unsigned lock_level,
+ unsigned int lock_level,
struct dm_bio_prison_cell_v2 *cell_prealloc,
struct dm_bio_prison_cell_v2 **cell_result)
{
@@ -291,7 +292,7 @@ EXPORT_SYMBOL_GPL(dm_cell_quiesce_v2);
static int __promote(struct dm_bio_prison_v2 *prison,
struct dm_bio_prison_cell_v2 *cell,
- unsigned new_lock_level)
+ unsigned int new_lock_level)
{
if (!cell->exclusive_lock)
return -EINVAL;
@@ -302,7 +303,7 @@ static int __promote(struct dm_bio_prison_v2 *prison,
int dm_cell_lock_promote_v2(struct dm_bio_prison_v2 *prison,
struct dm_bio_prison_cell_v2 *cell,
- unsigned new_lock_level)
+ unsigned int new_lock_level)
{
int r;
diff --git a/drivers/md/dm-bio-prison-v2.h b/drivers/md/dm-bio-prison-v2.h
index 6e04234268db..ce2cca5ef2f5 100644
--- a/drivers/md/dm-bio-prison-v2.h
+++ b/drivers/md/dm-bio-prison-v2.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011-2017 Red Hat, Inc.
*
@@ -44,8 +45,8 @@ struct dm_cell_key_v2 {
struct dm_bio_prison_cell_v2 {
// FIXME: pack these
bool exclusive_lock;
- unsigned exclusive_level;
- unsigned shared_count;
+ unsigned int exclusive_level;
+ unsigned int shared_count;
struct work_struct *quiesce_continuation;
struct rb_node node;
@@ -86,7 +87,7 @@ void dm_bio_prison_free_cell_v2(struct dm_bio_prison_v2 *prison,
*/
bool dm_cell_get_v2(struct dm_bio_prison_v2 *prison,
struct dm_cell_key_v2 *key,
- unsigned lock_level,
+ unsigned int lock_level,
struct bio *inmate,
struct dm_bio_prison_cell_v2 *cell_prealloc,
struct dm_bio_prison_cell_v2 **cell_result);
@@ -114,7 +115,7 @@ bool dm_cell_put_v2(struct dm_bio_prison_v2 *prison,
*/
int dm_cell_lock_v2(struct dm_bio_prison_v2 *prison,
struct dm_cell_key_v2 *key,
- unsigned lock_level,
+ unsigned int lock_level,
struct dm_bio_prison_cell_v2 *cell_prealloc,
struct dm_bio_prison_cell_v2 **cell_result);
@@ -132,7 +133,7 @@ void dm_cell_quiesce_v2(struct dm_bio_prison_v2 *prison,
*/
int dm_cell_lock_promote_v2(struct dm_bio_prison_v2 *prison,
struct dm_bio_prison_cell_v2 *cell,
- unsigned new_lock_level);
+ unsigned int new_lock_level);
/*
* Adds any held bios to the bio list.
diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h
index 745e3ab4aa0a..04d2f0cda324 100644
--- a/drivers/md/dm-bio-record.h
+++ b/drivers/md/dm-bio-record.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
*
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index bb786c39545e..cf077f9b30c3 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2009-2011 Red Hat, Inc.
*
@@ -89,18 +90,18 @@ struct dm_bufio_client {
unsigned long n_buffers[LIST_SIZE];
struct block_device *bdev;
- unsigned block_size;
+ unsigned int block_size;
s8 sectors_per_block_bits;
- void (*alloc_callback)(struct dm_buffer *);
- void (*write_callback)(struct dm_buffer *);
+ void (*alloc_callback)(struct dm_buffer *buf);
+ void (*write_callback)(struct dm_buffer *buf);
struct kmem_cache *slab_buffer;
struct kmem_cache *slab_cache;
struct dm_io_client *dm_io;
struct list_head reserved_buffers;
- unsigned need_reserved_buffers;
+ unsigned int need_reserved_buffers;
- unsigned minimum_buffers;
+ unsigned int minimum_buffers;
struct rb_root buffer_tree;
wait_queue_head_t free_buffer_wait;
@@ -145,17 +146,17 @@ struct dm_buffer {
unsigned char list_mode; /* LIST_* */
blk_status_t read_error;
blk_status_t write_error;
- unsigned accessed;
- unsigned hold_count;
+ unsigned int accessed;
+ unsigned int hold_count;
unsigned long state;
unsigned long last_accessed;
- unsigned dirty_start;
- unsigned dirty_end;
- unsigned write_start;
- unsigned write_end;
+ unsigned int dirty_start;
+ unsigned int dirty_end;
+ unsigned int write_start;
+ unsigned int write_end;
struct dm_bufio_client *c;
struct list_head write_list;
- void (*end_io)(struct dm_buffer *, blk_status_t);
+ void (*end_io)(struct dm_buffer *buf, blk_status_t stat);
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
#define MAX_STACK 10
unsigned int stack_len;
@@ -215,12 +216,12 @@ static DEFINE_SPINLOCK(global_spinlock);
static LIST_HEAD(global_queue);
-static unsigned long global_num = 0;
+static unsigned long global_num;
/*
* Buffers are freed after this timeout
*/
-static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
+static unsigned int dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
static unsigned long dm_bufio_peak_allocated;
@@ -258,9 +259,11 @@ static void buffer_record_stack(struct dm_buffer *b)
}
#endif
-/*----------------------------------------------------------------
+/*
+ *----------------------------------------------------------------
* A red/black tree acts as an index for all the buffers.
- *--------------------------------------------------------------*/
+ *----------------------------------------------------------------
+ */
static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
{
struct rb_node *n = c->buffer_tree.rb_node;
@@ -438,7 +441,7 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
* as if GFP_NOIO was specified.
*/
if (gfp_mask & __GFP_NORETRY) {
- unsigned noio_flag = memalloc_noio_save();
+ unsigned int noio_flag = memalloc_noio_save();
void *ptr = __vmalloc(c->block_size, gfp_mask);
memalloc_noio_restore(noio_flag);
@@ -561,7 +564,8 @@ static void __relink_lru(struct dm_buffer *b, int dirty)
b->last_accessed = jiffies;
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------------------
* Submit I/O on the buffer.
*
* Bio interface is faster but it has some problems:
@@ -577,7 +581,8 @@ static void __relink_lru(struct dm_buffer *b, int dirty)
* rejects the bio because it is too large, use dm-io layer to do the I/O.
* The dm-io layer splits the I/O into multiple requests, avoiding the above
* shortcomings.
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------------------
+ */
/*
* dm-io completion routine. It just calls b->bio.bi_end_io, pretending
@@ -591,7 +596,7 @@ static void dmio_complete(unsigned long error, void *context)
}
static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
- unsigned n_sectors, unsigned offset)
+ unsigned int n_sectors, unsigned int offset)
{
int r;
struct dm_io_request io_req = {
@@ -623,17 +628,18 @@ static void bio_complete(struct bio *bio)
{
struct dm_buffer *b = bio->bi_private;
blk_status_t status = bio->bi_status;
+
bio_uninit(bio);
kfree(bio);
b->end_io(b, status);
}
static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
- unsigned n_sectors, unsigned offset)
+ unsigned int n_sectors, unsigned int offset)
{
struct bio *bio;
char *ptr;
- unsigned vec_size, len;
+ unsigned int vec_size, len;
vec_size = b->c->block_size >> PAGE_SHIFT;
if (unlikely(b->c->sectors_per_block_bits < PAGE_SHIFT - SECTOR_SHIFT))
@@ -654,7 +660,8 @@ dmio:
len = n_sectors << SECTOR_SHIFT;
do {
- unsigned this_step = min((unsigned)(PAGE_SIZE - offset_in_page(ptr)), len);
+ unsigned int this_step = min((unsigned int)(PAGE_SIZE - offset_in_page(ptr)), len);
+
if (!bio_add_page(bio, virt_to_page(ptr), this_step,
offset_in_page(ptr))) {
bio_put(bio);
@@ -684,9 +691,9 @@ static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block
static void submit_io(struct dm_buffer *b, enum req_op op,
void (*end_io)(struct dm_buffer *, blk_status_t))
{
- unsigned n_sectors;
+ unsigned int n_sectors;
sector_t sector;
- unsigned offset, end;
+ unsigned int offset, end;
b->end_io = end_io;
@@ -716,9 +723,11 @@ static void submit_io(struct dm_buffer *b, enum req_op op,
use_dmio(b, op, sector, n_sectors, offset);
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Writing dirty buffers
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
/*
* The endio routine for write.
@@ -775,6 +784,7 @@ static void __write_dirty_buffer(struct dm_buffer *b,
static void __flush_write_list(struct list_head *write_list)
{
struct blk_plug plug;
+
blk_start_plug(&plug);
while (!list_empty(write_list)) {
struct dm_buffer *b =
@@ -998,9 +1008,11 @@ static void __check_watermark(struct dm_bufio_client *c,
__write_dirty_buffers_async(c, 1, write_list);
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Getting a buffer
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
enum new_flag nf, int *need_submit,
@@ -1156,7 +1168,7 @@ void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
EXPORT_SYMBOL_GPL(dm_bufio_new);
void dm_bufio_prefetch(struct dm_bufio_client *c,
- sector_t block, unsigned n_blocks)
+ sector_t block, unsigned int n_blocks)
{
struct blk_plug plug;
@@ -1170,6 +1182,7 @@ void dm_bufio_prefetch(struct dm_bufio_client *c,
for (; n_blocks--; block++) {
int need_submit;
struct dm_buffer *b;
+
b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
&write_list);
if (unlikely(!list_empty(&write_list))) {
@@ -1232,7 +1245,7 @@ void dm_bufio_release(struct dm_buffer *b)
EXPORT_SYMBOL_GPL(dm_bufio_release);
void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
- unsigned start, unsigned end)
+ unsigned int start, unsigned int end)
{
struct dm_bufio_client *c = b->c;
@@ -1454,6 +1467,7 @@ retry:
__link_buffer(b, new_block, LIST_DIRTY);
} else {
sector_t old_block;
+
wait_on_bit_lock_io(&b->state, B_WRITING,
TASK_UNINTERRUPTIBLE);
/*
@@ -1529,13 +1543,13 @@ void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t
}
EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers);
-void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
+void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned int n)
{
c->minimum_buffers = n;
}
EXPORT_SYMBOL_GPL(dm_bufio_set_minimum_buffers);
-unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
+unsigned int dm_bufio_get_block_size(struct dm_bufio_client *c)
{
return c->block_size;
}
@@ -1544,6 +1558,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
{
sector_t s = bdev_nr_sectors(c->bdev);
+
if (s >= c->start)
s -= c->start;
else
@@ -1659,10 +1674,12 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
static unsigned long get_retain_buffers(struct dm_bufio_client *c)
{
unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes);
+
if (likely(c->sectors_per_block_bits >= 0))
retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT;
else
retain_bytes /= c->block_size;
+
return retain_bytes;
}
@@ -1734,15 +1751,15 @@ static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrin
/*
* Create the buffering interface
*/
-struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
- unsigned reserved_buffers, unsigned aux_size,
+struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned int block_size,
+ unsigned int reserved_buffers, unsigned int aux_size,
void (*alloc_callback)(struct dm_buffer *),
void (*write_callback)(struct dm_buffer *),
unsigned int flags)
{
int r;
struct dm_bufio_client *c;
- unsigned i;
+ unsigned int i;
char slab_name[27];
if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) {
@@ -1796,8 +1813,9 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
if (block_size <= KMALLOC_MAX_SIZE &&
(block_size < PAGE_SIZE || !is_power_of_2(block_size))) {
- unsigned align = min(1U << __ffs(block_size), (unsigned)PAGE_SIZE);
- snprintf(slab_name, sizeof slab_name, "dm_bufio_cache-%u", block_size);
+ unsigned int align = min(1U << __ffs(block_size), (unsigned int)PAGE_SIZE);
+
+ snprintf(slab_name, sizeof(slab_name), "dm_bufio_cache-%u", block_size);
c->slab_cache = kmem_cache_create(slab_name, block_size, align,
SLAB_RECLAIM_ACCOUNT, NULL);
if (!c->slab_cache) {
@@ -1806,9 +1824,9 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
}
}
if (aux_size)
- snprintf(slab_name, sizeof slab_name, "dm_bufio_buffer-%u", aux_size);
+ snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer-%u", aux_size);
else
- snprintf(slab_name, sizeof slab_name, "dm_bufio_buffer");
+ snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer");
c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size,
0, SLAB_RECLAIM_ACCOUNT, NULL);
if (!c->slab_buffer) {
@@ -1833,7 +1851,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
c->shrinker.scan_objects = dm_bufio_shrink_scan;
c->shrinker.seeks = 1;
c->shrinker.batch = 0;
- r = register_shrinker(&c->shrinker, "md-%s:(%u:%u)", slab_name,
+ r = register_shrinker(&c->shrinker, "dm-bufio:(%u:%u)",
MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
if (r)
goto bad;
@@ -1872,7 +1890,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_client_create);
*/
void dm_bufio_client_destroy(struct dm_bufio_client *c)
{
- unsigned i;
+ unsigned int i;
drop_buffers(c);
@@ -1920,9 +1938,9 @@ void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
}
EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
-static unsigned get_max_age_hz(void)
+static unsigned int get_max_age_hz(void)
{
- unsigned max_age = READ_ONCE(dm_bufio_max_age);
+ unsigned int max_age = READ_ONCE(dm_bufio_max_age);
if (max_age > UINT_MAX / HZ)
max_age = UINT_MAX / HZ;
@@ -1973,7 +1991,7 @@ static void do_global_cleanup(struct work_struct *w)
struct dm_bufio_client *locked_client = NULL;
struct dm_bufio_client *current_client;
struct dm_buffer *b;
- unsigned spinlock_hold_count;
+ unsigned int spinlock_hold_count;
unsigned long threshold = dm_bufio_cache_size -
dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO;
unsigned long loops = global_num * 2;
@@ -2059,9 +2077,11 @@ static void work_fn(struct work_struct *w)
DM_BUFIO_WORK_TIMER_SECS * HZ);
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Module setup
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
/*
* This is called only once for the whole dm_bufio module.
@@ -2145,28 +2165,28 @@ static void __exit dm_bufio_exit(void)
module_init(dm_bufio_init)
module_exit(dm_bufio_exit)
-module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
+module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, 0644);
MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
-module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
+module_param_named(max_age_seconds, dm_bufio_max_age, uint, 0644);
MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
-module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR);
+module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, 0644);
MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
-module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
+module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, 0644);
MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
-module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
+module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, 0444);
MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
-module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
+module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, 0444);
MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
-module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
+module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, 0444);
MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
-module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
+module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, 0444);
MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
diff --git a/drivers/md/dm-builtin.c b/drivers/md/dm-builtin.c
index 8eb52e425141..e51076ea629e 100644
--- a/drivers/md/dm-builtin.c
+++ b/drivers/md/dm-builtin.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
#include "dm-core.h"
/*
@@ -45,5 +45,4 @@ void dm_kobject_release(struct kobject *kobj)
{
complete(dm_get_completion_from_kobject(kobj));
}
-
EXPORT_SYMBOL(dm_kobject_release);
diff --git a/drivers/md/dm-cache-background-tracker.c b/drivers/md/dm-cache-background-tracker.c
index 84814e819e4c..9c5308298cf1 100644
--- a/drivers/md/dm-cache-background-tracker.c
+++ b/drivers/md/dm-cache-background-tracker.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017 Red Hat. All rights reserved.
*
@@ -17,7 +18,7 @@ struct bt_work {
};
struct background_tracker {
- unsigned max_work;
+ unsigned int max_work;
atomic_t pending_promotes;
atomic_t pending_writebacks;
atomic_t pending_demotes;
@@ -29,7 +30,7 @@ struct background_tracker {
struct kmem_cache *work_cache;
};
-struct background_tracker *btracker_create(unsigned max_work)
+struct background_tracker *btracker_create(unsigned int max_work)
{
struct background_tracker *b = kmalloc(sizeof(*b), GFP_KERNEL);
@@ -60,6 +61,14 @@ EXPORT_SYMBOL_GPL(btracker_create);
void btracker_destroy(struct background_tracker *b)
{
+ struct bt_work *w, *tmp;
+
+ BUG_ON(!list_empty(&b->issued));
+ list_for_each_entry_safe (w, tmp, &b->queued, list) {
+ list_del(&w->list);
+ kmem_cache_free(b->work_cache, w);
+ }
+
kmem_cache_destroy(b->work_cache);
kfree(b);
}
@@ -147,13 +156,13 @@ static void update_stats(struct background_tracker *b, struct policy_work *w, in
}
}
-unsigned btracker_nr_writebacks_queued(struct background_tracker *b)
+unsigned int btracker_nr_writebacks_queued(struct background_tracker *b)
{
return atomic_read(&b->pending_writebacks);
}
EXPORT_SYMBOL_GPL(btracker_nr_writebacks_queued);
-unsigned btracker_nr_demotions_queued(struct background_tracker *b)
+unsigned int btracker_nr_demotions_queued(struct background_tracker *b)
{
return atomic_read(&b->pending_demotes);
}
diff --git a/drivers/md/dm-cache-background-tracker.h b/drivers/md/dm-cache-background-tracker.h
index 27ab90dbc275..5b8f5c667b81 100644
--- a/drivers/md/dm-cache-background-tracker.h
+++ b/drivers/md/dm-cache-background-tracker.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2017 Red Hat. All rights reserved.
*
@@ -12,19 +13,44 @@
/*----------------------------------------------------------------*/
+/*
+ * The cache policy decides what background work should be performed,
+ * such as promotions, demotions and writebacks. The core cache target
+ * is in charge of performing the work, and does so when it sees fit.
+ *
+ * The background_tracker acts as a go between. Keeping track of future
+ * work that the policy has decided upon, and handing (issuing) it to
+ * the core target when requested.
+ *
+ * There is no locking in this, so calls will probably need to be
+ * protected with a spinlock.
+ */
+
struct background_work;
struct background_tracker;
/*
- * FIXME: discuss lack of locking in all methods.
+ * Create a new tracker, it will not be able to queue more than
+ * 'max_work' entries.
+ */
+struct background_tracker *btracker_create(unsigned int max_work);
+
+/*
+ * Destroy the tracker. No issued, but not complete, work should
+ * exist when this is called. It is fine to have queued but unissued
+ * work.
*/
-struct background_tracker *btracker_create(unsigned max_work);
void btracker_destroy(struct background_tracker *b);
-unsigned btracker_nr_writebacks_queued(struct background_tracker *b);
-unsigned btracker_nr_demotions_queued(struct background_tracker *b);
+unsigned int btracker_nr_writebacks_queued(struct background_tracker *b);
+unsigned int btracker_nr_demotions_queued(struct background_tracker *b);
/*
+ * Queue some work within the tracker. 'work' should point to the work
+ * to queue, this will be copied (ownership doesn't pass). If pwork
+ * is not NULL then it will be set to point to the tracker's internal
+ * copy of the work.
+ *
* returns -EINVAL iff the work is already queued. -ENOMEM if the work
* couldn't be queued for another reason.
*/
@@ -33,11 +59,20 @@ int btracker_queue(struct background_tracker *b,
struct policy_work **pwork);
/*
+ * Hands out the next piece of work to be performed.
* Returns -ENODATA if there's no work.
*/
int btracker_issue(struct background_tracker *b, struct policy_work **work);
-void btracker_complete(struct background_tracker *b,
- struct policy_work *op);
+
+/*
+ * Informs the tracker that the work has been completed and it may forget
+ * about it.
+ */
+void btracker_complete(struct background_tracker *b, struct policy_work *op);
+
+/*
+ * Predicate to see if an origin block is already scheduled for promotion.
+ */
bool btracker_promotion_already_present(struct background_tracker *b,
dm_oblock_t oblock);
diff --git a/drivers/md/dm-cache-block-types.h b/drivers/md/dm-cache-block-types.h
index 389c9e8ac785..57d2534c59ce 100644
--- a/drivers/md/dm-cache-block-types.h
+++ b/drivers/md/dm-cache-block-types.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Red Hat, Inc.
*
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 83a5975bcc72..acffed750e3e 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Red Hat, Inc.
*
@@ -104,7 +105,7 @@ struct dm_cache_metadata {
refcount_t ref_count;
struct list_head list;
- unsigned version;
+ unsigned int version;
struct block_device *bdev;
struct dm_block_manager *bm;
struct dm_space_map *metadata_sm;
@@ -129,7 +130,7 @@ struct dm_cache_metadata {
bool clean_when_opened:1;
char policy_name[CACHE_POLICY_NAME_SIZE];
- unsigned policy_version[CACHE_POLICY_VERSION_SIZE];
+ unsigned int policy_version[CACHE_POLICY_VERSION_SIZE];
size_t policy_hint_size;
struct dm_cache_statistics stats;
@@ -162,10 +163,11 @@ struct dm_cache_metadata {
struct dm_bitset_cursor dirty_cursor;
};
-/*-------------------------------------------------------------------
+/*
+ *-----------------------------------------------------------------
* superblock validator
- *-----------------------------------------------------------------*/
-
+ *-----------------------------------------------------------------
+ */
#define SUPERBLOCK_CSUM_XOR 9031977
static void sb_prepare_for_write(struct dm_block_validator *v,
@@ -201,15 +203,15 @@ static int sb_check(struct dm_block_validator *v,
__le32 csum_le;
if (dm_block_location(b) != le64_to_cpu(disk_super->blocknr)) {
- DMERR("sb_check failed: blocknr %llu: wanted %llu",
- le64_to_cpu(disk_super->blocknr),
+ DMERR("%s failed: blocknr %llu: wanted %llu",
+ __func__, le64_to_cpu(disk_super->blocknr),
(unsigned long long)dm_block_location(b));
return -ENOTBLK;
}
if (le64_to_cpu(disk_super->magic) != CACHE_SUPERBLOCK_MAGIC) {
- DMERR("sb_check failed: magic %llu: wanted %llu",
- le64_to_cpu(disk_super->magic),
+ DMERR("%s failed: magic %llu: wanted %llu",
+ __func__, le64_to_cpu(disk_super->magic),
(unsigned long long)CACHE_SUPERBLOCK_MAGIC);
return -EILSEQ;
}
@@ -218,8 +220,8 @@ static int sb_check(struct dm_block_validator *v,
sb_block_size - sizeof(__le32),
SUPERBLOCK_CSUM_XOR));
if (csum_le != disk_super->csum) {
- DMERR("sb_check failed: csum %u: wanted %u",
- le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum));
+ DMERR("%s failed: csum %u: wanted %u",
+ __func__, le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum));
return -EILSEQ;
}
@@ -260,10 +262,10 @@ static int superblock_lock(struct dm_cache_metadata *cmd,
static int __superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
{
int r;
- unsigned i;
+ unsigned int i;
struct dm_block *b;
__le64 *data_le, zero = cpu_to_le64(0);
- unsigned sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
+ unsigned int sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
/*
* We can't use a validator here - it may be all zeroes.
@@ -533,6 +535,7 @@ static int __create_persistent_data_objects(struct dm_cache_metadata *cmd,
bool may_format_device)
{
int r;
+
cmd->bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
CACHE_MAX_CONCURRENT_LOCKS);
if (IS_ERR(cmd->bm)) {
@@ -566,6 +569,7 @@ static void update_flags(struct cache_disk_superblock *disk_super,
flags_mutator mutator)
{
uint32_t sb_flags = mutator(le32_to_cpu(disk_super->flags));
+
disk_super->flags = cpu_to_le32(sb_flags);
}
@@ -727,18 +731,20 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
*/
#define FLAGS_MASK ((1 << 16) - 1)
-static __le64 pack_value(dm_oblock_t block, unsigned flags)
+static __le64 pack_value(dm_oblock_t block, unsigned int flags)
{
uint64_t value = from_oblock(block);
+
value <<= 16;
value = value | (flags & FLAGS_MASK);
return cpu_to_le64(value);
}
-static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned *flags)
+static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned int *flags)
{
uint64_t value = le64_to_cpu(value_le);
uint64_t b = value >> 16;
+
*block = to_oblock(b);
*flags = value & FLAGS_MASK;
}
@@ -749,7 +755,7 @@ static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
sector_t data_block_size,
bool may_format_device,
size_t policy_hint_size,
- unsigned metadata_version)
+ unsigned int metadata_version)
{
int r;
struct dm_cache_metadata *cmd;
@@ -810,7 +816,7 @@ static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
sector_t data_block_size,
bool may_format_device,
size_t policy_hint_size,
- unsigned metadata_version)
+ unsigned int metadata_version)
{
struct dm_cache_metadata *cmd, *cmd2;
@@ -855,7 +861,7 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
sector_t data_block_size,
bool may_format_device,
size_t policy_hint_size,
- unsigned metadata_version)
+ unsigned int metadata_version)
{
struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size, may_format_device,
policy_hint_size, metadata_version);
@@ -890,7 +896,7 @@ static int block_clean_combined_dirty(struct dm_cache_metadata *cmd, dm_cblock_t
int r;
__le64 value;
dm_oblock_t ob;
- unsigned flags;
+ unsigned int flags;
r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(b), &value);
if (r)
@@ -1009,13 +1015,13 @@ static bool cmd_write_lock(struct dm_cache_metadata *cmd)
do { \
if (!cmd_write_lock((cmd))) \
return -EINVAL; \
- } while(0)
+ } while (0)
#define WRITE_LOCK_VOID(cmd) \
do { \
if (!cmd_write_lock((cmd))) \
return; \
- } while(0)
+ } while (0)
#define WRITE_UNLOCK(cmd) \
up_write(&(cmd)->root_lock)
@@ -1034,13 +1040,13 @@ static bool cmd_read_lock(struct dm_cache_metadata *cmd)
do { \
if (!cmd_read_lock((cmd))) \
return -EINVAL; \
- } while(0)
+ } while (0)
#define READ_LOCK_VOID(cmd) \
do { \
if (!cmd_read_lock((cmd))) \
return; \
- } while(0)
+ } while (0)
#define READ_UNLOCK(cmd) \
up_read(&(cmd)->root_lock)
@@ -1252,6 +1258,7 @@ static int __insert(struct dm_cache_metadata *cmd,
{
int r;
__le64 value = pack_value(oblock, M_VALID);
+
__dm_bless_for_disk(&value);
r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
@@ -1288,7 +1295,7 @@ static bool policy_unchanged(struct dm_cache_metadata *cmd,
struct dm_cache_policy *policy)
{
const char *policy_name = dm_cache_policy_get_name(policy);
- const unsigned *policy_version = dm_cache_policy_get_version(policy);
+ const unsigned int *policy_version = dm_cache_policy_get_version(policy);
size_t policy_hint_size = dm_cache_policy_get_hint_size(policy);
/*
@@ -1339,7 +1346,7 @@ static int __load_mapping_v1(struct dm_cache_metadata *cmd,
__le32 *hint_value_le;
dm_oblock_t oblock;
- unsigned flags;
+ unsigned int flags;
bool dirty = true;
dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
@@ -1381,7 +1388,7 @@ static int __load_mapping_v2(struct dm_cache_metadata *cmd,
__le32 *hint_value_le;
dm_oblock_t oblock;
- unsigned flags;
+ unsigned int flags;
bool dirty = true;
dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
@@ -1513,7 +1520,7 @@ static int __dump_mapping(void *context, uint64_t cblock, void *leaf)
{
__le64 value;
dm_oblock_t oblock;
- unsigned flags;
+ unsigned int flags;
memcpy(&value, leaf, sizeof(value));
unpack_value(value, &oblock, &flags);
@@ -1547,7 +1554,7 @@ int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd)
static int __dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty)
{
int r;
- unsigned flags;
+ unsigned int flags;
dm_oblock_t oblock;
__le64 value;
@@ -1574,10 +1581,11 @@ static int __dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty
}
-static int __set_dirty_bits_v1(struct dm_cache_metadata *cmd, unsigned nr_bits, unsigned long *bits)
+static int __set_dirty_bits_v1(struct dm_cache_metadata *cmd, unsigned int nr_bits, unsigned long *bits)
{
int r;
- unsigned i;
+ unsigned int i;
+
for (i = 0; i < nr_bits; i++) {
r = __dirty(cmd, to_cblock(i), test_bit(i, bits));
if (r)
@@ -1594,7 +1602,7 @@ static int is_dirty_callback(uint32_t index, bool *value, void *context)
return 0;
}
-static int __set_dirty_bits_v2(struct dm_cache_metadata *cmd, unsigned nr_bits, unsigned long *bits)
+static int __set_dirty_bits_v2(struct dm_cache_metadata *cmd, unsigned int nr_bits, unsigned long *bits)
{
int r = 0;
@@ -1613,7 +1621,7 @@ static int __set_dirty_bits_v2(struct dm_cache_metadata *cmd, unsigned nr_bits,
}
int dm_cache_set_dirty_bits(struct dm_cache_metadata *cmd,
- unsigned nr_bits,
+ unsigned int nr_bits,
unsigned long *bits)
{
int r;
@@ -1712,7 +1720,7 @@ static int write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *po
int r;
size_t hint_size;
const char *policy_name = dm_cache_policy_get_name(policy);
- const unsigned *policy_version = dm_cache_policy_get_version(policy);
+ const unsigned int *policy_version = dm_cache_policy_get_version(policy);
if (!policy_name[0] ||
(strlen(policy_name) > sizeof(cmd->policy_name) - 1))
diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h
index 0905f2c1615e..57afc7047947 100644
--- a/drivers/md/dm-cache-metadata.h
+++ b/drivers/md/dm-cache-metadata.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Red Hat, Inc.
*
@@ -60,7 +61,7 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
sector_t data_block_size,
bool may_format_device,
size_t policy_hint_size,
- unsigned metadata_version);
+ unsigned int metadata_version);
void dm_cache_metadata_close(struct dm_cache_metadata *cmd);
@@ -96,7 +97,7 @@ int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
void *context);
int dm_cache_set_dirty_bits(struct dm_cache_metadata *cmd,
- unsigned nr_bits, unsigned long *bits);
+ unsigned int nr_bits, unsigned long *bits);
struct dm_cache_statistics {
uint32_t read_hits;
diff --git a/drivers/md/dm-cache-policy-internal.h b/drivers/md/dm-cache-policy-internal.h
index 56f0a23f698c..476a4f6794fc 100644
--- a/drivers/md/dm-cache-policy-internal.h
+++ b/drivers/md/dm-cache-policy-internal.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Red Hat. All rights reserved.
*
@@ -85,9 +86,10 @@ static inline void policy_tick(struct dm_cache_policy *p, bool can_block)
}
static inline int policy_emit_config_values(struct dm_cache_policy *p, char *result,
- unsigned maxlen, ssize_t *sz_ptr)
+ unsigned int maxlen, ssize_t *sz_ptr)
{
ssize_t sz = *sz_ptr;
+
if (p->emit_config_values)
return p->emit_config_values(p, result, maxlen, sz_ptr);
@@ -112,20 +114,22 @@ static inline void policy_allow_migrations(struct dm_cache_policy *p, bool allow
/*
* Some utility functions commonly used by policies and the core target.
*/
-static inline size_t bitset_size_in_bytes(unsigned nr_entries)
+static inline size_t bitset_size_in_bytes(unsigned int nr_entries)
{
return sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG);
}
-static inline unsigned long *alloc_bitset(unsigned nr_entries)
+static inline unsigned long *alloc_bitset(unsigned int nr_entries)
{
size_t s = bitset_size_in_bytes(nr_entries);
+
return vzalloc(s);
}
-static inline void clear_bitset(void *bitset, unsigned nr_entries)
+static inline void clear_bitset(void *bitset, unsigned int nr_entries)
{
size_t s = bitset_size_in_bytes(nr_entries);
+
memset(bitset, 0, s);
}
@@ -154,7 +158,7 @@ void dm_cache_policy_destroy(struct dm_cache_policy *p);
*/
const char *dm_cache_policy_get_name(struct dm_cache_policy *p);
-const unsigned *dm_cache_policy_get_version(struct dm_cache_policy *p);
+const unsigned int *dm_cache_policy_get_version(struct dm_cache_policy *p);
size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p);
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index a3d281fc14c3..493a8715dc8f 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 Red Hat. All rights reserved.
*
@@ -23,12 +24,12 @@
/*
* Safe division functions that return zero on divide by zero.
*/
-static unsigned safe_div(unsigned n, unsigned d)
+static unsigned int safe_div(unsigned int n, unsigned int d)
{
return d ? n / d : 0u;
}
-static unsigned safe_mod(unsigned n, unsigned d)
+static unsigned int safe_mod(unsigned int n, unsigned int d)
{
return d ? n % d : 0u;
}
@@ -36,10 +37,10 @@ static unsigned safe_mod(unsigned n, unsigned d)
/*----------------------------------------------------------------*/
struct entry {
- unsigned hash_next:28;
- unsigned prev:28;
- unsigned next:28;
- unsigned level:6;
+ unsigned int hash_next:28;
+ unsigned int prev:28;
+ unsigned int next:28;
+ unsigned int level:6;
bool dirty:1;
bool allocated:1;
bool sentinel:1;
@@ -62,7 +63,7 @@ struct entry_space {
struct entry *end;
};
-static int space_init(struct entry_space *es, unsigned nr_entries)
+static int space_init(struct entry_space *es, unsigned int nr_entries)
{
if (!nr_entries) {
es->begin = es->end = NULL;
@@ -82,7 +83,7 @@ static void space_exit(struct entry_space *es)
vfree(es->begin);
}
-static struct entry *__get_entry(struct entry_space *es, unsigned block)
+static struct entry *__get_entry(struct entry_space *es, unsigned int block)
{
struct entry *e;
@@ -92,13 +93,13 @@ static struct entry *__get_entry(struct entry_space *es, unsigned block)
return e;
}
-static unsigned to_index(struct entry_space *es, struct entry *e)
+static unsigned int to_index(struct entry_space *es, struct entry *e)
{
BUG_ON(e < es->begin || e >= es->end);
return e - es->begin;
}
-static struct entry *to_entry(struct entry_space *es, unsigned block)
+static struct entry *to_entry(struct entry_space *es, unsigned int block)
{
if (block == INDEXER_NULL)
return NULL;
@@ -109,8 +110,8 @@ static struct entry *to_entry(struct entry_space *es, unsigned block)
/*----------------------------------------------------------------*/
struct ilist {
- unsigned nr_elts; /* excluding sentinel entries */
- unsigned head, tail;
+ unsigned int nr_elts; /* excluding sentinel entries */
+ unsigned int head, tail;
};
static void l_init(struct ilist *l)
@@ -252,23 +253,23 @@ static struct entry *l_pop_tail(struct entry_space *es, struct ilist *l)
struct queue {
struct entry_space *es;
- unsigned nr_elts;
- unsigned nr_levels;
+ unsigned int nr_elts;
+ unsigned int nr_levels;
struct ilist qs[MAX_LEVELS];
/*
* We maintain a count of the number of entries we would like in each
* level.
*/
- unsigned last_target_nr_elts;
- unsigned nr_top_levels;
- unsigned nr_in_top_levels;
- unsigned target_count[MAX_LEVELS];
+ unsigned int last_target_nr_elts;
+ unsigned int nr_top_levels;
+ unsigned int nr_in_top_levels;
+ unsigned int target_count[MAX_LEVELS];
};
-static void q_init(struct queue *q, struct entry_space *es, unsigned nr_levels)
+static void q_init(struct queue *q, struct entry_space *es, unsigned int nr_levels)
{
- unsigned i;
+ unsigned int i;
q->es = es;
q->nr_elts = 0;
@@ -284,7 +285,7 @@ static void q_init(struct queue *q, struct entry_space *es, unsigned nr_levels)
q->nr_in_top_levels = 0u;
}
-static unsigned q_size(struct queue *q)
+static unsigned int q_size(struct queue *q)
{
return q->nr_elts;
}
@@ -332,9 +333,9 @@ static void q_del(struct queue *q, struct entry *e)
/*
* Return the oldest entry of the lowest populated level.
*/
-static struct entry *q_peek(struct queue *q, unsigned max_level, bool can_cross_sentinel)
+static struct entry *q_peek(struct queue *q, unsigned int max_level, bool can_cross_sentinel)
{
- unsigned level;
+ unsigned int level;
struct entry *e;
max_level = min(max_level, q->nr_levels);
@@ -369,7 +370,7 @@ static struct entry *q_pop(struct queue *q)
* used by redistribute, so we know this is true. It also doesn't adjust
* the q->nr_elts count.
*/
-static struct entry *__redist_pop_from(struct queue *q, unsigned level)
+static struct entry *__redist_pop_from(struct queue *q, unsigned int level)
{
struct entry *e;
@@ -383,9 +384,10 @@ static struct entry *__redist_pop_from(struct queue *q, unsigned level)
return NULL;
}
-static void q_set_targets_subrange_(struct queue *q, unsigned nr_elts, unsigned lbegin, unsigned lend)
+static void q_set_targets_subrange_(struct queue *q, unsigned int nr_elts,
+ unsigned int lbegin, unsigned int lend)
{
- unsigned level, nr_levels, entries_per_level, remainder;
+ unsigned int level, nr_levels, entries_per_level, remainder;
BUG_ON(lbegin > lend);
BUG_ON(lend > q->nr_levels);
@@ -426,7 +428,7 @@ static void q_set_targets(struct queue *q)
static void q_redistribute(struct queue *q)
{
- unsigned target, level;
+ unsigned int target, level;
struct ilist *l, *l_above;
struct entry *e;
@@ -467,12 +469,12 @@ static void q_redistribute(struct queue *q)
}
}
-static void q_requeue(struct queue *q, struct entry *e, unsigned extra_levels,
+static void q_requeue(struct queue *q, struct entry *e, unsigned int extra_levels,
struct entry *s1, struct entry *s2)
{
struct entry *de;
- unsigned sentinels_passed = 0;
- unsigned new_level = min(q->nr_levels - 1u, e->level + extra_levels);
+ unsigned int sentinels_passed = 0;
+ unsigned int new_level = min(q->nr_levels - 1u, e->level + extra_levels);
/* try and find an entry to swap with */
if (extra_levels && (e->level < q->nr_levels - 1u)) {
@@ -512,9 +514,9 @@ static void q_requeue(struct queue *q, struct entry *e, unsigned extra_levels,
#define EIGHTH (1u << (FP_SHIFT - 3u))
struct stats {
- unsigned hit_threshold;
- unsigned hits;
- unsigned misses;
+ unsigned int hit_threshold;
+ unsigned int hits;
+ unsigned int misses;
};
enum performance {
@@ -523,7 +525,7 @@ enum performance {
Q_WELL
};
-static void stats_init(struct stats *s, unsigned nr_levels)
+static void stats_init(struct stats *s, unsigned int nr_levels)
{
s->hit_threshold = (nr_levels * 3u) / 4u;
s->hits = 0u;
@@ -535,7 +537,7 @@ static void stats_reset(struct stats *s)
s->hits = s->misses = 0u;
}
-static void stats_level_accessed(struct stats *s, unsigned level)
+static void stats_level_accessed(struct stats *s, unsigned int level)
{
if (level >= s->hit_threshold)
s->hits++;
@@ -556,7 +558,7 @@ static void stats_miss(struct stats *s)
*/
static enum performance stats_assess(struct stats *s)
{
- unsigned confidence = safe_div(s->hits << FP_SHIFT, s->hits + s->misses);
+ unsigned int confidence = safe_div(s->hits << FP_SHIFT, s->hits + s->misses);
if (confidence < SIXTEENTH)
return Q_POOR;
@@ -573,16 +575,16 @@ static enum performance stats_assess(struct stats *s)
struct smq_hash_table {
struct entry_space *es;
unsigned long long hash_bits;
- unsigned *buckets;
+ unsigned int *buckets;
};
/*
* All cache entries are stored in a chained hash table. To save space we
* use indexing again, and only store indexes to the next entry.
*/
-static int h_init(struct smq_hash_table *ht, struct entry_space *es, unsigned nr_entries)
+static int h_init(struct smq_hash_table *ht, struct entry_space *es, unsigned int nr_entries)
{
- unsigned i, nr_buckets;
+ unsigned int i, nr_buckets;
ht->es = es;
nr_buckets = roundup_pow_of_two(max(nr_entries / 4u, 16u));
@@ -603,7 +605,7 @@ static void h_exit(struct smq_hash_table *ht)
vfree(ht->buckets);
}
-static struct entry *h_head(struct smq_hash_table *ht, unsigned bucket)
+static struct entry *h_head(struct smq_hash_table *ht, unsigned int bucket)
{
return to_entry(ht->es, ht->buckets[bucket]);
}
@@ -613,7 +615,7 @@ static struct entry *h_next(struct smq_hash_table *ht, struct entry *e)
return to_entry(ht->es, e->hash_next);
}
-static void __h_insert(struct smq_hash_table *ht, unsigned bucket, struct entry *e)
+static void __h_insert(struct smq_hash_table *ht, unsigned int bucket, struct entry *e)
{
e->hash_next = ht->buckets[bucket];
ht->buckets[bucket] = to_index(ht->es, e);
@@ -621,11 +623,12 @@ static void __h_insert(struct smq_hash_table *ht, unsigned bucket, struct entry
static void h_insert(struct smq_hash_table *ht, struct entry *e)
{
- unsigned h = hash_64(from_oblock(e->oblock), ht->hash_bits);
+ unsigned int h = hash_64(from_oblock(e->oblock), ht->hash_bits);
+
__h_insert(ht, h, e);
}
-static struct entry *__h_lookup(struct smq_hash_table *ht, unsigned h, dm_oblock_t oblock,
+static struct entry *__h_lookup(struct smq_hash_table *ht, unsigned int h, dm_oblock_t oblock,
struct entry **prev)
{
struct entry *e;
@@ -641,7 +644,7 @@ static struct entry *__h_lookup(struct smq_hash_table *ht, unsigned h, dm_oblock
return NULL;
}
-static void __h_unlink(struct smq_hash_table *ht, unsigned h,
+static void __h_unlink(struct smq_hash_table *ht, unsigned int h,
struct entry *e, struct entry *prev)
{
if (prev)
@@ -656,7 +659,7 @@ static void __h_unlink(struct smq_hash_table *ht, unsigned h,
static struct entry *h_lookup(struct smq_hash_table *ht, dm_oblock_t oblock)
{
struct entry *e, *prev;
- unsigned h = hash_64(from_oblock(oblock), ht->hash_bits);
+ unsigned int h = hash_64(from_oblock(oblock), ht->hash_bits);
e = __h_lookup(ht, h, oblock, &prev);
if (e && prev) {
@@ -673,7 +676,7 @@ static struct entry *h_lookup(struct smq_hash_table *ht, dm_oblock_t oblock)
static void h_remove(struct smq_hash_table *ht, struct entry *e)
{
- unsigned h = hash_64(from_oblock(e->oblock), ht->hash_bits);
+ unsigned int h = hash_64(from_oblock(e->oblock), ht->hash_bits);
struct entry *prev;
/*
@@ -689,16 +692,16 @@ static void h_remove(struct smq_hash_table *ht, struct entry *e)
struct entry_alloc {
struct entry_space *es;
- unsigned begin;
+ unsigned int begin;
- unsigned nr_allocated;
+ unsigned int nr_allocated;
struct ilist free;
};
static void init_allocator(struct entry_alloc *ea, struct entry_space *es,
- unsigned begin, unsigned end)
+ unsigned int begin, unsigned int end)
{
- unsigned i;
+ unsigned int i;
ea->es = es;
ea->nr_allocated = 0u;
@@ -742,7 +745,7 @@ static struct entry *alloc_entry(struct entry_alloc *ea)
/*
* This assumes the cblock hasn't already been allocated.
*/
-static struct entry *alloc_particular_entry(struct entry_alloc *ea, unsigned i)
+static struct entry *alloc_particular_entry(struct entry_alloc *ea, unsigned int i)
{
struct entry *e = __get_entry(ea->es, ea->begin + i);
@@ -770,12 +773,12 @@ static bool allocator_empty(struct entry_alloc *ea)
return l_empty(&ea->free);
}
-static unsigned get_index(struct entry_alloc *ea, struct entry *e)
+static unsigned int get_index(struct entry_alloc *ea, struct entry *e)
{
return to_index(ea->es, e) - ea->begin;
}
-static struct entry *get_entry(struct entry_alloc *ea, unsigned index)
+static struct entry *get_entry(struct entry_alloc *ea, unsigned int index)
{
return __get_entry(ea->es, ea->begin + index);
}
@@ -800,9 +803,9 @@ struct smq_policy {
sector_t cache_block_size;
sector_t hotspot_block_size;
- unsigned nr_hotspot_blocks;
- unsigned cache_blocks_per_hotspot_block;
- unsigned hotspot_level_jump;
+ unsigned int nr_hotspot_blocks;
+ unsigned int cache_blocks_per_hotspot_block;
+ unsigned int hotspot_level_jump;
struct entry_space es;
struct entry_alloc writeback_sentinel_alloc;
@@ -831,7 +834,7 @@ struct smq_policy {
* Keeps track of time, incremented by the core. We use this to
* avoid attributing multiple hits within the same tick.
*/
- unsigned tick;
+ unsigned int tick;
/*
* The hash tables allows us to quickly find an entry by origin
@@ -846,8 +849,8 @@ struct smq_policy {
bool current_demote_sentinels;
unsigned long next_demote_period;
- unsigned write_promote_level;
- unsigned read_promote_level;
+ unsigned int write_promote_level;
+ unsigned int read_promote_level;
unsigned long next_hotspot_period;
unsigned long next_cache_period;
@@ -859,24 +862,24 @@ struct smq_policy {
/*----------------------------------------------------------------*/
-static struct entry *get_sentinel(struct entry_alloc *ea, unsigned level, bool which)
+static struct entry *get_sentinel(struct entry_alloc *ea, unsigned int level, bool which)
{
return get_entry(ea, which ? level : NR_CACHE_LEVELS + level);
}
-static struct entry *writeback_sentinel(struct smq_policy *mq, unsigned level)
+static struct entry *writeback_sentinel(struct smq_policy *mq, unsigned int level)
{
return get_sentinel(&mq->writeback_sentinel_alloc, level, mq->current_writeback_sentinels);
}
-static struct entry *demote_sentinel(struct smq_policy *mq, unsigned level)
+static struct entry *demote_sentinel(struct smq_policy *mq, unsigned int level)
{
return get_sentinel(&mq->demote_sentinel_alloc, level, mq->current_demote_sentinels);
}
static void __update_writeback_sentinels(struct smq_policy *mq)
{
- unsigned level;
+ unsigned int level;
struct queue *q = &mq->dirty;
struct entry *sentinel;
@@ -889,7 +892,7 @@ static void __update_writeback_sentinels(struct smq_policy *mq)
static void __update_demote_sentinels(struct smq_policy *mq)
{
- unsigned level;
+ unsigned int level;
struct queue *q = &mq->clean;
struct entry *sentinel;
@@ -917,7 +920,7 @@ static void update_sentinels(struct smq_policy *mq)
static void __sentinels_init(struct smq_policy *mq)
{
- unsigned level;
+ unsigned int level;
struct entry *sentinel;
for (level = 0; level < NR_CACHE_LEVELS; level++) {
@@ -1008,7 +1011,7 @@ static void requeue(struct smq_policy *mq, struct entry *e)
}
}
-static unsigned default_promote_level(struct smq_policy *mq)
+static unsigned int default_promote_level(struct smq_policy *mq)
{
/*
* The promote level depends on the current performance of the
@@ -1030,9 +1033,9 @@ static unsigned default_promote_level(struct smq_policy *mq)
1, 1, 1, 2, 4, 6, 7, 8, 7, 6, 4, 4, 3, 3, 2, 2, 1
};
- unsigned hits = mq->cache_stats.hits;
- unsigned misses = mq->cache_stats.misses;
- unsigned index = safe_div(hits << 4u, hits + misses);
+ unsigned int hits = mq->cache_stats.hits;
+ unsigned int misses = mq->cache_stats.misses;
+ unsigned int index = safe_div(hits << 4u, hits + misses);
return table[index];
}
@@ -1042,7 +1045,7 @@ static void update_promote_levels(struct smq_policy *mq)
* If there are unused cache entries then we want to be really
* eager to promote.
*/
- unsigned threshold_level = allocator_empty(&mq->cache_alloc) ?
+ unsigned int threshold_level = allocator_empty(&mq->cache_alloc) ?
default_promote_level(mq) : (NR_HOTSPOT_LEVELS / 2u);
threshold_level = max(threshold_level, NR_HOTSPOT_LEVELS);
@@ -1124,7 +1127,7 @@ static void end_cache_period(struct smq_policy *mq)
#define CLEAN_TARGET 25u
#define FREE_TARGET 25u
-static unsigned percent_to_target(struct smq_policy *mq, unsigned p)
+static unsigned int percent_to_target(struct smq_policy *mq, unsigned int p)
{
return from_cblock(mq->cache_size) * p / 100u;
}
@@ -1150,7 +1153,7 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
static bool free_target_met(struct smq_policy *mq)
{
- unsigned nr_free;
+ unsigned int nr_free;
nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated;
return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
@@ -1300,7 +1303,7 @@ static dm_oblock_t to_hblock(struct smq_policy *mq, dm_oblock_t b)
static struct entry *update_hotspot_queue(struct smq_policy *mq, dm_oblock_t b)
{
- unsigned hi;
+ unsigned int hi;
dm_oblock_t hb = to_hblock(mq, b);
struct entry *e = h_lookup(&mq->hotspot_table, hb);
@@ -1549,7 +1552,7 @@ static void smq_clear_dirty(struct dm_cache_policy *p, dm_cblock_t cblock)
spin_unlock_irqrestore(&mq->lock, flags);
}
-static unsigned random_level(dm_cblock_t cblock)
+static unsigned int random_level(dm_cblock_t cblock)
{
return hash_32(from_cblock(cblock), 9) & (NR_CACHE_LEVELS - 1);
}
@@ -1631,6 +1634,7 @@ static void smq_tick(struct dm_cache_policy *p, bool can_block)
static void smq_allow_migrations(struct dm_cache_policy *p, bool allow)
{
struct smq_policy *mq = to_smq_policy(p);
+
mq->migrations_allowed = allow;
}
@@ -1660,7 +1664,7 @@ static int mq_set_config_value(struct dm_cache_policy *p,
}
static int mq_emit_config_values(struct dm_cache_policy *p, char *result,
- unsigned maxlen, ssize_t *sz_ptr)
+ unsigned int maxlen, ssize_t *sz_ptr)
{
ssize_t sz = *sz_ptr;
@@ -1699,16 +1703,16 @@ static void init_policy_functions(struct smq_policy *mq, bool mimic_mq)
static bool too_many_hotspot_blocks(sector_t origin_size,
sector_t hotspot_block_size,
- unsigned nr_hotspot_blocks)
+ unsigned int nr_hotspot_blocks)
{
return (hotspot_block_size * nr_hotspot_blocks) > origin_size;
}
static void calc_hotspot_params(sector_t origin_size,
sector_t cache_block_size,
- unsigned nr_cache_blocks,
+ unsigned int nr_cache_blocks,
sector_t *hotspot_block_size,
- unsigned *nr_hotspot_blocks)
+ unsigned int *nr_hotspot_blocks)
{
*hotspot_block_size = cache_block_size * 16u;
*nr_hotspot_blocks = max(nr_cache_blocks / 4u, 1024u);
@@ -1724,9 +1728,9 @@ static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
bool mimic_mq,
bool migrations_allowed)
{
- unsigned i;
- unsigned nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS;
- unsigned total_sentinels = 2u * nr_sentinels_per_queue;
+ unsigned int i;
+ unsigned int nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS;
+ unsigned int total_sentinels = 2u * nr_sentinels_per_queue;
struct smq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
if (!mq)
diff --git a/drivers/md/dm-cache-policy.c b/drivers/md/dm-cache-policy.c
index c1a3cee99b44..9330d5748895 100644
--- a/drivers/md/dm-cache-policy.c
+++ b/drivers/md/dm-cache-policy.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Red Hat. All rights reserved.
*
@@ -154,7 +155,7 @@ const char *dm_cache_policy_get_name(struct dm_cache_policy *p)
}
EXPORT_SYMBOL_GPL(dm_cache_policy_get_name);
-const unsigned *dm_cache_policy_get_version(struct dm_cache_policy *p)
+const unsigned int *dm_cache_policy_get_version(struct dm_cache_policy *p)
{
struct dm_cache_policy_type *t = p->private;
diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h
index 06eb31af626f..a1eedcc42677 100644
--- a/drivers/md/dm-cache-policy.h
+++ b/drivers/md/dm-cache-policy.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Red Hat. All rights reserved.
*
@@ -75,7 +76,7 @@ struct dm_cache_policy {
* background work.
*/
int (*get_background_work)(struct dm_cache_policy *p, bool idle,
- struct policy_work **result);
+ struct policy_work **result);
/*
* You must pass in the same work pointer that you were given, not
@@ -128,7 +129,7 @@ struct dm_cache_policy {
* Configuration.
*/
int (*emit_config_values)(struct dm_cache_policy *p, char *result,
- unsigned maxlen, ssize_t *sz_ptr);
+ unsigned int maxlen, ssize_t *sz_ptr);
int (*set_config_value)(struct dm_cache_policy *p,
const char *key, const char *value);
@@ -157,7 +158,7 @@ struct dm_cache_policy_type {
* what gets passed on the target line to select your policy.
*/
char name[CACHE_POLICY_NAME_SIZE];
- unsigned version[CACHE_POLICY_VERSION_SIZE];
+ unsigned int version[CACHE_POLICY_VERSION_SIZE];
/*
* For use by an alias dm_cache_policy_type to point to the
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 5e92fac90b67..dbbcfa580078 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Red Hat. All rights reserved.
*
@@ -180,15 +181,15 @@ static void continue_after_commit(struct batcher *b, struct continuation *k)
*/
static void issue_after_commit(struct batcher *b, struct bio *bio)
{
- bool commit_scheduled;
+ bool commit_scheduled;
- spin_lock_irq(&b->lock);
- commit_scheduled = b->commit_scheduled;
- bio_list_add(&b->bios, bio);
- spin_unlock_irq(&b->lock);
+ spin_lock_irq(&b->lock);
+ commit_scheduled = b->commit_scheduled;
+ bio_list_add(&b->bios, bio);
+ spin_unlock_irq(&b->lock);
- if (commit_scheduled)
- async_commit(b);
+ if (commit_scheduled)
+ async_commit(b);
}
/*
@@ -275,7 +276,7 @@ enum cache_io_mode {
struct cache_features {
enum cache_metadata_mode mode;
enum cache_io_mode io_mode;
- unsigned metadata_version;
+ unsigned int metadata_version;
bool discard_passdown:1;
};
@@ -362,7 +363,7 @@ struct cache {
* Rather than reconstructing the table line for the status we just
* save it and regurgitate.
*/
- unsigned nr_ctr_args;
+ unsigned int nr_ctr_args;
const char **ctr_args;
struct dm_kcopyd_client *copier;
@@ -378,7 +379,7 @@ struct cache {
unsigned long *dirty_bitset;
atomic_t nr_dirty;
- unsigned policy_nr_args;
+ unsigned int policy_nr_args;
struct dm_cache_policy *policy;
/*
@@ -409,7 +410,7 @@ struct cache {
struct per_bio_data {
bool tick:1;
- unsigned req_nr:2;
+ unsigned int req_nr:2;
struct dm_bio_prison_cell_v2 *cell;
struct dm_hook_info hook_info;
sector_t len;
@@ -517,20 +518,23 @@ static void build_key(dm_oblock_t begin, dm_oblock_t end, struct dm_cell_key_v2
#define WRITE_LOCK_LEVEL 0
#define READ_WRITE_LOCK_LEVEL 1
-static unsigned lock_level(struct bio *bio)
+static unsigned int lock_level(struct bio *bio)
{
return bio_data_dir(bio) == WRITE ?
WRITE_LOCK_LEVEL :
READ_WRITE_LOCK_LEVEL;
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Per bio data
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
static struct per_bio_data *get_per_bio_data(struct bio *bio)
{
struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
+
BUG_ON(!pb);
return pb;
}
@@ -687,6 +691,7 @@ static void clear_discard(struct cache *cache, dm_dblock_t b)
static bool is_discarded(struct cache *cache, dm_dblock_t b)
{
int r;
+
spin_lock_irq(&cache->lock);
r = test_bit(from_dblock(b), cache->discard_bitset);
spin_unlock_irq(&cache->lock);
@@ -697,6 +702,7 @@ static bool is_discarded(struct cache *cache, dm_dblock_t b)
static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
{
int r;
+
spin_lock_irq(&cache->lock);
r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
cache->discard_bitset);
@@ -705,9 +711,11 @@ static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
return r;
}
-/*----------------------------------------------------------------
+/*
+ * -------------------------------------------------------------
* Remapping
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
static void remap_to_origin(struct cache *cache, struct bio *bio)
{
bio_set_dev(bio, cache->origin_dev->bdev);
@@ -809,6 +817,7 @@ static void accounted_request(struct cache *cache, struct bio *bio)
static void issue_op(struct bio *bio, void *context)
{
struct cache *cache = context;
+
accounted_request(cache, bio);
}
@@ -833,9 +842,11 @@ static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio,
remap_to_cache(cache, bio, cblock);
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Failure modes
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
static enum cache_metadata_mode get_cache_mode(struct cache *cache)
{
return cache->features.mode;
@@ -848,7 +859,7 @@ static const char *cache_device_name(struct cache *cache)
static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mode)
{
- const char *descs[] = {
+ static const char *descs[] = {
"write",
"read-only",
"fail"
@@ -972,13 +983,14 @@ static void update_stats(struct cache_stats *stats, enum policy_operation op)
}
}
-/*----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------------
* Migration processing
*
* Migration covers moving data from the origin device to the cache, or
* vice versa.
- *--------------------------------------------------------------*/
-
+ *---------------------------------------------------------------------
+ */
static void inc_io_migrations(struct cache *cache)
{
atomic_inc(&cache->nr_io_migrations);
@@ -1066,6 +1078,7 @@ static void quiesce(struct dm_cache_migration *mg,
static struct dm_cache_migration *ws_to_mg(struct work_struct *ws)
{
struct continuation *k = container_of(ws, struct continuation, ws);
+
return container_of(k, struct dm_cache_migration, k);
}
@@ -1217,6 +1230,7 @@ static void mg_complete(struct dm_cache_migration *mg, bool success)
static void mg_success(struct work_struct *ws)
{
struct dm_cache_migration *mg = ws_to_mg(ws);
+
mg_complete(mg, mg->k.input == 0);
}
@@ -1355,6 +1369,7 @@ static void mg_copy(struct work_struct *ws)
* Fallback to a real full copy after doing some tidying up.
*/
bool rb = bio_detain_shared(mg->cache, mg->op->oblock, mg->overwrite_bio);
+
BUG_ON(rb); /* An exclussive lock must _not_ be held for this block */
mg->overwrite_bio = NULL;
inc_io_migrations(mg->cache);
@@ -1430,9 +1445,11 @@ static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio
return mg_lock_writes(mg);
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* invalidation processing
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
static void invalidate_complete(struct dm_cache_migration *mg, bool success)
{
@@ -1455,12 +1472,15 @@ static void invalidate_complete(struct dm_cache_migration *mg, bool success)
static void invalidate_completed(struct work_struct *ws)
{
struct dm_cache_migration *mg = ws_to_mg(ws);
+
invalidate_complete(mg, !mg->k.input);
}
static int invalidate_cblock(struct cache *cache, dm_cblock_t cblock)
{
- int r = policy_invalidate_mapping(cache->policy, cblock);
+ int r;
+
+ r = policy_invalidate_mapping(cache->policy, cblock);
if (!r) {
r = dm_cache_remove_mapping(cache->cmd, cblock);
if (r) {
@@ -1553,9 +1573,11 @@ static int invalidate_start(struct cache *cache, dm_cblock_t cblock,
return invalidate_lock(mg);
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* bio processing
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
enum busy {
IDLE,
@@ -1763,9 +1785,11 @@ static bool process_discard_bio(struct cache *cache, struct bio *bio)
{
dm_dblock_t b, e;
- // FIXME: do we need to lock the region? Or can we just assume the
- // user wont be so foolish as to issue discard concurrently with
- // other IO?
+ /*
+ * FIXME: do we need to lock the region? Or can we just assume the
+ * user wont be so foolish as to issue discard concurrently with
+ * other IO?
+ */
calc_discard_block_range(cache, bio, &b, &e);
while (b != e) {
set_discard(cache, b);
@@ -1805,16 +1829,18 @@ static void process_deferred_bios(struct work_struct *ws)
else
commit_needed = process_bio(cache, bio) || commit_needed;
+ cond_resched();
}
if (commit_needed)
schedule_commit(&cache->committer);
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Main worker loop
- *--------------------------------------------------------------*/
-
+ *--------------------------------------------------------------
+ */
static void requeue_deferred_bios(struct cache *cache)
{
struct bio *bio;
@@ -1827,6 +1853,7 @@ static void requeue_deferred_bios(struct cache *cache)
while ((bio = bio_list_pop(&bios))) {
bio->bi_status = BLK_STS_DM_REQUEUE;
bio_endio(bio);
+ cond_resched();
}
}
@@ -1867,12 +1894,16 @@ static void check_migrations(struct work_struct *ws)
r = mg_start(cache, op, NULL);
if (r)
break;
+
+ cond_resched();
}
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Target methods
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
/*
* This function gets called on the error paths of the constructor, so we
@@ -1880,7 +1911,7 @@ static void check_migrations(struct work_struct *ws)
*/
static void destroy(struct cache *cache)
{
- unsigned i;
+ unsigned int i;
mempool_exit(&cache->migration_pool);
@@ -2120,7 +2151,7 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
};
int r, mode_ctr = 0;
- unsigned argc;
+ unsigned int argc;
const char *arg;
struct cache_features *cf = &ca->features;
@@ -2540,7 +2571,7 @@ bad:
static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
{
- unsigned i;
+ unsigned int i;
const char **copy;
copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
@@ -2562,7 +2593,7 @@ static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
return 0;
}
-static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv)
+static int cache_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
int r = -EINVAL;
struct cache_args *ca;
@@ -2665,7 +2696,7 @@ static int write_dirty_bitset(struct cache *cache)
static int write_discard_bitset(struct cache *cache)
{
- unsigned i, r;
+ unsigned int i, r;
if (get_cache_mode(cache) >= CM_READ_ONLY)
return -EINVAL;
@@ -2979,11 +3010,11 @@ static void cache_resume(struct dm_target *ti)
}
static void emit_flags(struct cache *cache, char *result,
- unsigned maxlen, ssize_t *sz_ptr)
+ unsigned int maxlen, ssize_t *sz_ptr)
{
ssize_t sz = *sz_ptr;
struct cache_features *cf = &cache->features;
- unsigned count = (cf->metadata_version == 2) + !cf->discard_passdown + 1;
+ unsigned int count = (cf->metadata_version == 2) + !cf->discard_passdown + 1;
DMEMIT("%u ", count);
@@ -3023,10 +3054,10 @@ static void emit_flags(struct cache *cache, char *result,
* <policy name> <#policy args> <policy args>* <cache metadata mode> <needs_check>
*/
static void cache_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
int r = 0;
- unsigned i;
+ unsigned int i;
ssize_t sz = 0;
dm_block_t nr_free_blocks_metadata = 0;
dm_block_t nr_blocks_metadata = 0;
@@ -3063,18 +3094,18 @@ static void cache_status(struct dm_target *ti, status_type_t type,
residency = policy_residency(cache->policy);
DMEMIT("%u %llu/%llu %llu %llu/%llu %u %u %u %u %u %u %lu ",
- (unsigned)DM_CACHE_METADATA_BLOCK_SIZE,
+ (unsigned int)DM_CACHE_METADATA_BLOCK_SIZE,
(unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
(unsigned long long)nr_blocks_metadata,
(unsigned long long)cache->sectors_per_block,
(unsigned long long) from_cblock(residency),
(unsigned long long) from_cblock(cache->cache_size),
- (unsigned) atomic_read(&cache->stats.read_hit),
- (unsigned) atomic_read(&cache->stats.read_miss),
- (unsigned) atomic_read(&cache->stats.write_hit),
- (unsigned) atomic_read(&cache->stats.write_miss),
- (unsigned) atomic_read(&cache->stats.demotion),
- (unsigned) atomic_read(&cache->stats.promotion),
+ (unsigned int) atomic_read(&cache->stats.read_hit),
+ (unsigned int) atomic_read(&cache->stats.read_miss),
+ (unsigned int) atomic_read(&cache->stats.write_hit),
+ (unsigned int) atomic_read(&cache->stats.write_miss),
+ (unsigned int) atomic_read(&cache->stats.demotion),
+ (unsigned int) atomic_read(&cache->stats.promotion),
(unsigned long) atomic_read(&cache->nr_dirty));
emit_flags(cache, result, maxlen, &sz);
@@ -3253,11 +3284,11 @@ static int request_invalidation(struct cache *cache, struct cblock_range *range)
return r;
}
-static int process_invalidate_cblocks_message(struct cache *cache, unsigned count,
+static int process_invalidate_cblocks_message(struct cache *cache, unsigned int count,
const char **cblock_ranges)
{
int r = 0;
- unsigned i;
+ unsigned int i;
struct cblock_range range;
if (!passthrough_mode(cache)) {
@@ -3294,8 +3325,8 @@ static int process_invalidate_cblocks_message(struct cache *cache, unsigned coun
*
* The key migration_threshold is supported by the cache target core.
*/
-static int cache_message(struct dm_target *ti, unsigned argc, char **argv,
- char *result, unsigned maxlen)
+static int cache_message(struct dm_target *ti, unsigned int argc, char **argv,
+ char *result, unsigned int maxlen)
{
struct cache *cache = ti->private;
diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
index 29e0b85eeaf0..f38a27604c7a 100644
--- a/drivers/md/dm-clone-target.c
+++ b/drivers/md/dm-clone-target.c
@@ -580,7 +580,7 @@ static int hash_table_init(struct clone *clone)
sz = 1 << HASH_TABLE_BITS;
- clone->ht = kvmalloc(sz * sizeof(struct hash_table_bucket), GFP_KERNEL);
+ clone->ht = kvmalloc_array(sz, sizeof(struct hash_table_bucket), GFP_KERNEL);
if (!clone->ht)
return -ENOMEM;
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 6c6bd24774f2..aecab0c0720f 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Internal header file _only_ for device mapper core
*
@@ -119,7 +120,7 @@ struct mapped_device {
struct dm_stats stats;
/* the number of internal suspends */
- unsigned internal_suspend_count;
+ unsigned int internal_suspend_count;
int swap_bios;
struct semaphore swap_bios_semaphore;
@@ -216,7 +217,7 @@ struct dm_table {
struct list_head devices;
/* events get handed up using this callback */
- void (*event_fn)(void *);
+ void (*event_fn)(void *data);
void *event_context;
struct dm_md_mempools *mempools;
@@ -326,9 +327,9 @@ static inline struct completion *dm_get_completion_from_kobject(struct kobject *
return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
}
-unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max);
+unsigned int __dm_get_module_param(unsigned int *module_param, unsigned int def, unsigned int max);
-static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
+static inline bool dm_message_test_buffer_overflow(char *result, unsigned int maxlen)
{
return !maxlen || strlen(result) + 1 >= maxlen;
}
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 3aeeb8f2802f..40cb1719ae4d 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2003 Jana Saout <jana@saout.de>
* Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
@@ -171,14 +172,14 @@ struct crypt_config {
} iv_gen_private;
u64 iv_offset;
unsigned int iv_size;
- unsigned short int sector_size;
+ unsigned short sector_size;
unsigned char sector_shift;
union {
struct crypto_skcipher **tfms;
struct crypto_aead **tfms_aead;
} cipher_tfm;
- unsigned tfms_count;
+ unsigned int tfms_count;
unsigned long cipher_flags;
/*
@@ -212,7 +213,7 @@ struct crypt_config {
* pool for per bio private data, crypto requests,
* encryption requeusts/buffer pages and integrity tags
*/
- unsigned tag_pool_max_sectors;
+ unsigned int tag_pool_max_sectors;
mempool_t tag_pool;
mempool_t req_pool;
mempool_t page_pool;
@@ -229,7 +230,7 @@ struct crypt_config {
#define POOL_ENTRY_SIZE 512
static DEFINE_SPINLOCK(dm_crypt_clients_lock);
-static unsigned dm_crypt_clients_n = 0;
+static unsigned int dm_crypt_clients_n;
static volatile unsigned long dm_crypt_pages_per_client;
#define DM_CRYPT_MEMORY_PERCENT 2
#define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_VECS * 16)
@@ -354,7 +355,7 @@ static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
const char *opts)
{
- unsigned bs;
+ unsigned int bs;
int log;
if (crypt_integrity_aead(cc))
@@ -363,9 +364,10 @@ static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
bs = crypto_skcipher_blocksize(any_tfm(cc));
log = ilog2(bs);
- /* we need to calculate how far we must shift the sector count
- * to get the cipher block count, we use this shift in _gen */
-
+ /*
+ * We need to calculate how far we must shift the sector count
+ * to get the cipher block count, we use this shift in _gen.
+ */
if (1 << log != bs) {
ti->error = "cypher blocksize is not a power of 2";
return -EINVAL;
@@ -531,9 +533,9 @@ static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
sg = crypt_get_sg_data(cc, dmreq->sg_in);
- src = kmap_atomic(sg_page(sg));
+ src = kmap_local_page(sg_page(sg));
r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset);
- kunmap_atomic(src);
+ kunmap_local(src);
} else
memset(iv, 0, cc->iv_size);
@@ -551,14 +553,14 @@ static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
return 0;
sg = crypt_get_sg_data(cc, dmreq->sg_out);
- dst = kmap_atomic(sg_page(sg));
+ dst = kmap_local_page(sg_page(sg));
r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset);
/* Tweak the first block of plaintext sector */
if (!r)
crypto_xor(dst + sg->offset, iv, cc->iv_size);
- kunmap_atomic(dst);
+ kunmap_local(dst);
return r;
}
@@ -681,9 +683,9 @@ static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
/* Remove whitening from ciphertext */
if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
sg = crypt_get_sg_data(cc, dmreq->sg_in);
- src = kmap_atomic(sg_page(sg));
+ src = kmap_local_page(sg_page(sg));
r = crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset);
- kunmap_atomic(src);
+ kunmap_local(src);
}
/* Calculate IV */
@@ -707,9 +709,9 @@ static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
/* Apply whitening on ciphertext */
sg = crypt_get_sg_data(cc, dmreq->sg_out);
- dst = kmap_atomic(sg_page(sg));
+ dst = kmap_local_page(sg_page(sg));
r = crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset);
- kunmap_atomic(dst);
+ kunmap_local(dst);
return r;
}
@@ -731,8 +733,7 @@ static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti,
}
if (crypto_skcipher_blocksize(any_tfm(cc)) != cc->iv_size) {
- ti->error = "Block size of EBOIV cipher does "
- "not match IV size of block cipher";
+ ti->error = "Block size of EBOIV cipher does not match IV size of block cipher";
return -EINVAL;
}
@@ -974,35 +975,35 @@ static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *d
goto out;
sg = crypt_get_sg_data(cc, dmreq->sg_out);
- data = kmap_atomic(sg_page(sg));
+ data = kmap_local_page(sg_page(sg));
data_offset = data + sg->offset;
/* Cannot modify original bio, copy to sg_out and apply Elephant to it */
if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
sg2 = crypt_get_sg_data(cc, dmreq->sg_in);
- data2 = kmap_atomic(sg_page(sg2));
+ data2 = kmap_local_page(sg_page(sg2));
memcpy(data_offset, data2 + sg2->offset, cc->sector_size);
- kunmap_atomic(data2);
+ kunmap_local(data2);
}
if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
- diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32));
- diffuser_b_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
- diffuser_a_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
- diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32));
+ diffuser_disk_to_cpu((u32 *)data_offset, cc->sector_size / sizeof(u32));
+ diffuser_b_decrypt((u32 *)data_offset, cc->sector_size / sizeof(u32));
+ diffuser_a_decrypt((u32 *)data_offset, cc->sector_size / sizeof(u32));
+ diffuser_cpu_to_disk((__le32 *)data_offset, cc->sector_size / sizeof(u32));
}
for (i = 0; i < (cc->sector_size / 32); i++)
crypto_xor(data_offset + i * 32, ks, 32);
if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
- diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32));
- diffuser_a_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
- diffuser_b_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
- diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32));
+ diffuser_disk_to_cpu((u32 *)data_offset, cc->sector_size / sizeof(u32));
+ diffuser_a_encrypt((u32 *)data_offset, cc->sector_size / sizeof(u32));
+ diffuser_b_encrypt((u32 *)data_offset, cc->sector_size / sizeof(u32));
+ diffuser_cpu_to_disk((__le32 *)data_offset, cc->sector_size / sizeof(u32));
}
- kunmap_atomic(data);
+ kunmap_local(data);
out:
kfree_sensitive(ks);
kfree_sensitive(es);
@@ -1255,6 +1256,7 @@ static __le64 *org_sector_of_dmreq(struct crypt_config *cc,
struct dm_crypt_request *dmreq)
{
u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size;
+
return (__le64 *) ptr;
}
@@ -1263,7 +1265,8 @@ static unsigned int *org_tag_of_dmreq(struct crypt_config *cc,
{
u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size +
cc->iv_size + sizeof(uint64_t);
- return (unsigned int*)ptr;
+
+ return (unsigned int *)ptr;
}
static void *tag_from_dmreq(struct crypt_config *cc,
@@ -1463,7 +1466,7 @@ static void kcryptd_async_done(void *async_req, int error);
static int crypt_alloc_req_skcipher(struct crypt_config *cc,
struct convert_context *ctx)
{
- unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
+ unsigned int key_index = ctx->cc_sector & (cc->tfms_count - 1);
if (!ctx->r.req) {
ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
@@ -1657,13 +1660,13 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
* non-blocking allocations without a mutex first but on failure we fallback
* to blocking allocations with a mutex.
*/
-static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
+static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size)
{
struct crypt_config *cc = io->cc;
struct bio *clone;
unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
- unsigned i, len, remaining_size;
+ unsigned int i, len, remaining_size;
struct page *page;
retry:
@@ -1738,6 +1741,7 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
static void kcryptd_io_bio_endio(struct work_struct *work)
{
struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
+
bio_endio(io->base_bio);
}
@@ -1803,7 +1807,7 @@ static void crypt_endio(struct bio *clone)
{
struct dm_crypt_io *io = clone->bi_private;
struct crypt_config *cc = io->cc;
- unsigned rw = bio_data_dir(clone);
+ unsigned int rw = bio_data_dir(clone);
blk_status_t error;
/*
@@ -2255,7 +2259,7 @@ static void crypt_free_tfms_aead(struct crypt_config *cc)
static void crypt_free_tfms_skcipher(struct crypt_config *cc)
{
- unsigned i;
+ unsigned int i;
if (!cc->cipher_tfm.tfms)
return;
@@ -2280,7 +2284,7 @@ static void crypt_free_tfms(struct crypt_config *cc)
static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode)
{
- unsigned i;
+ unsigned int i;
int err;
cc->cipher_tfm.tfms = kcalloc(cc->tfms_count,
@@ -2338,12 +2342,12 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
return crypt_alloc_tfms_skcipher(cc, ciphermode);
}
-static unsigned crypt_subkey_size(struct crypt_config *cc)
+static unsigned int crypt_subkey_size(struct crypt_config *cc)
{
return (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
}
-static unsigned crypt_authenckey_size(struct crypt_config *cc)
+static unsigned int crypt_authenckey_size(struct crypt_config *cc)
{
return crypt_subkey_size(cc) + RTA_SPACE(sizeof(struct crypto_authenc_key_param));
}
@@ -2354,7 +2358,7 @@ static unsigned crypt_authenckey_size(struct crypt_config *cc)
* This funcion converts cc->key to this special format.
*/
static void crypt_copy_authenckey(char *p, const void *key,
- unsigned enckeylen, unsigned authkeylen)
+ unsigned int enckeylen, unsigned int authkeylen)
{
struct crypto_authenc_key_param *param;
struct rtattr *rta;
@@ -2372,7 +2376,7 @@ static void crypt_copy_authenckey(char *p, const void *key,
static int crypt_setkey(struct crypt_config *cc)
{
- unsigned subkey_size;
+ unsigned int subkey_size;
int err = 0, i, r;
/* Ignore extra keys (which are used for IV etc) */
@@ -2485,7 +2489,7 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
}
/* look for next ':' separating key_type from key_description */
- key_desc = strpbrk(key_string, ":");
+ key_desc = strchr(key_string, ':');
if (!key_desc || key_desc == key_string || !strlen(key_desc + 1))
return -EINVAL;
@@ -2500,7 +2504,7 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
type = &key_type_encrypted;
set_key = set_key_encrypted;
} else if (IS_ENABLED(CONFIG_TRUSTED_KEYS) &&
- !strncmp(key_string, "trusted:", key_desc - key_string + 1)) {
+ !strncmp(key_string, "trusted:", key_desc - key_string + 1)) {
type = &key_type_trusted;
set_key = set_key_trusted;
} else {
@@ -3411,11 +3415,14 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
if (cc->on_disk_tag_size) {
- unsigned tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift);
+ unsigned int tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift);
+
+ if (unlikely(tag_len > KMALLOC_MAX_SIZE))
+ io->integrity_metadata = NULL;
+ else
+ io->integrity_metadata = kmalloc(tag_len, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
- if (unlikely(tag_len > KMALLOC_MAX_SIZE) ||
- unlikely(!(io->integrity_metadata = kmalloc(tag_len,
- GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
+ if (unlikely(!io->integrity_metadata)) {
if (bio_sectors(bio) > cc->tag_pool_max_sectors)
dm_accept_partial_bio(bio, cc->tag_pool_max_sectors);
io->integrity_metadata = mempool_alloc(&cc->tag_pool, GFP_NOIO);
@@ -3439,14 +3446,14 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
static char hex2asc(unsigned char c)
{
- return c + '0' + ((unsigned)(9 - c) >> 4 & 0x27);
+ return c + '0' + ((unsigned int)(9 - c) >> 4 & 0x27);
}
static void crypt_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
struct crypt_config *cc = ti->private;
- unsigned i, sz = 0;
+ unsigned int i, sz = 0;
int num_feature_args = 0;
switch (type) {
@@ -3562,8 +3569,8 @@ static void crypt_resume(struct dm_target *ti)
* key set <key>
* key wipe
*/
-static int crypt_message(struct dm_target *ti, unsigned argc, char **argv,
- char *result, unsigned maxlen)
+static int crypt_message(struct dm_target *ti, unsigned int argc, char **argv,
+ char *result, unsigned int maxlen)
{
struct crypt_config *cc = ti->private;
int key_size, ret = -EINVAL;
@@ -3624,10 +3631,10 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
limits->max_segment_size = PAGE_SIZE;
limits->logical_block_size =
- max_t(unsigned, limits->logical_block_size, cc->sector_size);
+ max_t(unsigned int, limits->logical_block_size, cc->sector_size);
limits->physical_block_size =
- max_t(unsigned, limits->physical_block_size, cc->sector_size);
- limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size);
+ max_t(unsigned int, limits->physical_block_size, cc->sector_size);
+ limits->io_min = max_t(unsigned int, limits->io_min, cc->sector_size);
limits->dma_alignment = limits->logical_block_size - 1;
}
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 869afef5654a..a425046f88c7 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2005-2007 Red Hat GmbH
*
@@ -20,8 +21,8 @@
struct delay_class {
struct dm_dev *dev;
sector_t start;
- unsigned delay;
- unsigned ops;
+ unsigned int delay;
+ unsigned int ops;
};
struct delay_c {
@@ -305,7 +306,7 @@ static int delay_map(struct dm_target *ti, struct bio *bio)
DMEMIT("%s %llu %u", (c)->dev->name, (unsigned long long)(c)->start, (c)->delay)
static void delay_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
struct delay_c *dc = ti->private;
int sz = 0;
diff --git a/drivers/md/dm-dust.c b/drivers/md/dm-dust.c
index 03672204b0e3..7ae9936752de 100644
--- a/drivers/md/dm-dust.c
+++ b/drivers/md/dm-dust.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018 Red Hat, Inc.
*
diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c
index 512cc6cea095..b1068a68bc46 100644
--- a/drivers/md/dm-ebs-target.c
+++ b/drivers/md/dm-ebs-target.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020 Red Hat GmbH
*
@@ -241,7 +242,7 @@ static void __ebs_process_bios(struct work_struct *ws)
* <offset>: offset in 512 bytes sectors into <dev_path>
* <ebs>: emulated block size in units of 512 bytes exposed to the upper layer
* [<ubs>]: underlying block size in units of 512 bytes imposed on the lower layer;
- * optional, if not supplied, retrieve logical block size from underlying device
+ * optional, if not supplied, retrieve logical block size from underlying device
*/
static int ebs_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
@@ -390,7 +391,7 @@ static int ebs_map(struct dm_target *ti, struct bio *bio)
}
static void ebs_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
struct ebs_c *ec = ti->private;
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index e92c1afc3677..c2e7780cdd2d 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -22,9 +22,11 @@
#define INVALID_WRITESET_ROOT SUPERBLOCK_LOCATION
#define MIN_BLOCK_SIZE 8
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Writeset
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
struct writeset_metadata {
uint32_t nr_bits;
dm_block_t root;
@@ -51,7 +53,7 @@ static void writeset_free(struct writeset *ws)
}
static int setup_on_disk_bitset(struct dm_disk_bitset *info,
- unsigned nr_bits, dm_block_t *root)
+ unsigned int nr_bits, dm_block_t *root)
{
int r;
@@ -62,7 +64,7 @@ static int setup_on_disk_bitset(struct dm_disk_bitset *info,
return dm_bitset_resize(info, *root, 0, nr_bits, false, root);
}
-static size_t bitset_size(unsigned nr_bits)
+static size_t bitset_size(unsigned int nr_bits)
{
return sizeof(unsigned long) * dm_div_up(nr_bits, BITS_PER_LONG);
}
@@ -110,13 +112,14 @@ static int writeset_marked_on_disk(struct dm_disk_bitset *info,
struct writeset_metadata *m, dm_block_t block,
bool *result)
{
+ int r;
dm_block_t old = m->root;
/*
* The bitset was flushed when it was archived, so we know there'll
* be no change to the root.
*/
- int r = dm_bitset_test_bit(info, m->root, block, &m->root, result);
+ r = dm_bitset_test_bit(info, m->root, block, &m->root, result);
if (r) {
DMERR("%s: dm_bitset_test_bit failed", __func__);
return r;
@@ -148,9 +151,11 @@ static int writeset_test_and_set(struct dm_disk_bitset *info,
return 1;
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* On disk metadata layout
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
#define SPACE_MAP_ROOT_SIZE 128
#define UUID_LEN 16
@@ -186,9 +191,11 @@ struct superblock_disk {
__le64 metadata_snap;
} __packed;
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Superblock validation
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
static void sb_prepare_for_write(struct dm_block_validator *v,
struct dm_block *b,
size_t sb_block_size)
@@ -204,6 +211,7 @@ static void sb_prepare_for_write(struct dm_block_validator *v,
static int check_metadata_version(struct superblock_disk *disk)
{
uint32_t metadata_version = le32_to_cpu(disk->version);
+
if (metadata_version < MIN_ERA_VERSION || metadata_version > MAX_ERA_VERSION) {
DMERR("Era metadata version %u found, but only versions between %u and %u supported.",
metadata_version, MIN_ERA_VERSION, MAX_ERA_VERSION);
@@ -221,15 +229,15 @@ static int sb_check(struct dm_block_validator *v,
__le32 csum_le;
if (dm_block_location(b) != le64_to_cpu(disk->blocknr)) {
- DMERR("sb_check failed: blocknr %llu: wanted %llu",
- le64_to_cpu(disk->blocknr),
+ DMERR("%s failed: blocknr %llu: wanted %llu",
+ __func__, le64_to_cpu(disk->blocknr),
(unsigned long long)dm_block_location(b));
return -ENOTBLK;
}
if (le64_to_cpu(disk->magic) != SUPERBLOCK_MAGIC) {
- DMERR("sb_check failed: magic %llu: wanted %llu",
- le64_to_cpu(disk->magic),
+ DMERR("%s failed: magic %llu: wanted %llu",
+ __func__, le64_to_cpu(disk->magic),
(unsigned long long) SUPERBLOCK_MAGIC);
return -EILSEQ;
}
@@ -238,8 +246,8 @@ static int sb_check(struct dm_block_validator *v,
sb_block_size - sizeof(__le32),
SUPERBLOCK_CSUM_XOR));
if (csum_le != disk->csum) {
- DMERR("sb_check failed: csum %u: wanted %u",
- le32_to_cpu(csum_le), le32_to_cpu(disk->csum));
+ DMERR("%s failed: csum %u: wanted %u",
+ __func__, le32_to_cpu(csum_le), le32_to_cpu(disk->csum));
return -EILSEQ;
}
@@ -252,9 +260,11 @@ static struct dm_block_validator sb_validator = {
.check = sb_check
};
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Low level metadata handling
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
#define DM_ERA_METADATA_BLOCK_SIZE 4096
#define ERA_MAX_CONCURRENT_LOCKS 5
@@ -323,10 +333,10 @@ static int superblock_lock(struct era_metadata *md,
static int superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
{
int r;
- unsigned i;
+ unsigned int i;
struct dm_block *b;
__le64 *data_le, zero = cpu_to_le64(0);
- unsigned sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
+ unsigned int sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
/*
* We can't use a validator here - it may be all zeroes.
@@ -363,12 +373,12 @@ static void ws_unpack(const struct writeset_disk *disk, struct writeset_metadata
core->root = le64_to_cpu(disk->root);
}
-static void ws_inc(void *context, const void *value, unsigned count)
+static void ws_inc(void *context, const void *value, unsigned int count)
{
struct era_metadata *md = context;
struct writeset_disk ws_d;
dm_block_t b;
- unsigned i;
+ unsigned int i;
for (i = 0; i < count; i++) {
memcpy(&ws_d, value + (i * sizeof(ws_d)), sizeof(ws_d));
@@ -377,12 +387,12 @@ static void ws_inc(void *context, const void *value, unsigned count)
}
}
-static void ws_dec(void *context, const void *value, unsigned count)
+static void ws_dec(void *context, const void *value, unsigned int count)
{
struct era_metadata *md = context;
struct writeset_disk ws_d;
dm_block_t b;
- unsigned i;
+ unsigned int i;
for (i = 0; i < count; i++) {
memcpy(&ws_d, value + (i * sizeof(ws_d)), sizeof(ws_d));
@@ -401,6 +411,7 @@ static int ws_eq(void *context, const void *value1, const void *value2)
static void setup_writeset_tree_info(struct era_metadata *md)
{
struct dm_btree_value_type *vt = &md->writeset_tree_info.value_type;
+
md->writeset_tree_info.tm = md->tm;
md->writeset_tree_info.levels = 1;
vt->context = md;
@@ -411,9 +422,9 @@ static void setup_writeset_tree_info(struct era_metadata *md)
}
static void setup_era_array_info(struct era_metadata *md)
-
{
struct dm_btree_value_type vt;
+
vt.context = NULL;
vt.size = sizeof(__le32);
vt.inc = NULL;
@@ -658,21 +669,23 @@ static void swap_writeset(struct era_metadata *md, struct writeset *new_writeset
synchronize_rcu();
}
-/*----------------------------------------------------------------
+/*
+ *------------------------------------------------------------------------
* Writesets get 'digested' into the main era array.
*
* We're using a coroutine here so the worker thread can do the digestion,
* thus avoiding synchronisation of the metadata. Digesting a whole
* writeset in one go would cause too much latency.
- *--------------------------------------------------------------*/
+ *------------------------------------------------------------------------
+ */
struct digest {
uint32_t era;
- unsigned nr_bits, current_bit;
+ unsigned int nr_bits, current_bit;
struct writeset_metadata writeset;
__le32 value;
struct dm_disk_bitset info;
- int (*step)(struct era_metadata *, struct digest *);
+ int (*step)(struct era_metadata *md, struct digest *d);
};
static int metadata_digest_lookup_writeset(struct era_metadata *md,
@@ -702,7 +715,7 @@ static int metadata_digest_transcribe_writeset(struct era_metadata *md,
{
int r;
bool marked;
- unsigned b, e = min(d->current_bit + INSERTS_PER_STEP, d->nr_bits);
+ unsigned int b, e = min(d->current_bit + INSERTS_PER_STEP, d->nr_bits);
for (b = d->current_bit; b < e; b++) {
r = writeset_marked_on_disk(&d->info, &d->writeset, b, &marked);
@@ -784,10 +797,12 @@ static int metadata_digest_start(struct era_metadata *md, struct digest *d)
return 0;
}
-/*----------------------------------------------------------------
- * High level metadata interface. Target methods should use these, and not
- * the lower level ones.
- *--------------------------------------------------------------*/
+/*
+ *-----------------------------------------------------------------
+ * High level metadata interface. Target methods should use these,
+ * and not the lower level ones.
+ *-----------------------------------------------------------------
+ */
static struct era_metadata *metadata_open(struct block_device *bdev,
sector_t block_size,
bool may_format)
@@ -1181,17 +1196,19 @@ struct era {
struct rpc {
struct list_head list;
- int (*fn0)(struct era_metadata *);
- int (*fn1)(struct era_metadata *, void *);
+ int (*fn0)(struct era_metadata *md);
+ int (*fn1)(struct era_metadata *md, void *ref);
void *arg;
int result;
struct completion complete;
};
-/*----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Remapping.
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
static bool block_size_is_power_of_two(struct era *era)
{
return era->sectors_per_block_shift >= 0;
@@ -1214,9 +1231,11 @@ static void remap_to_origin(struct era *era, struct bio *bio)
bio_set_dev(bio, era->origin_dev->bdev);
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Worker thread
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
static void wake_worker(struct era *era)
{
if (!atomic_read(&era->suspended))
@@ -1372,9 +1391,10 @@ static int perform_rpc(struct era *era, struct rpc *rpc)
return rpc->result;
}
-static int in_worker0(struct era *era, int (*fn)(struct era_metadata *))
+static int in_worker0(struct era *era, int (*fn)(struct era_metadata *md))
{
struct rpc rpc;
+
rpc.fn0 = fn;
rpc.fn1 = NULL;
@@ -1382,9 +1402,10 @@ static int in_worker0(struct era *era, int (*fn)(struct era_metadata *))
}
static int in_worker1(struct era *era,
- int (*fn)(struct era_metadata *, void *), void *arg)
+ int (*fn)(struct era_metadata *md, void *ref), void *arg)
{
struct rpc rpc;
+
rpc.fn0 = NULL;
rpc.fn1 = fn;
rpc.arg = arg;
@@ -1403,9 +1424,11 @@ static void stop_worker(struct era *era)
drain_workqueue(era->wq);
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Target methods
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
static void era_destroy(struct era *era)
{
if (era->md)
@@ -1439,7 +1462,7 @@ static bool valid_block_size(dm_block_t block_size)
/*
* <metadata dev> <data dev> <data block size (sectors)>
*/
-static int era_ctr(struct dm_target *ti, unsigned argc, char **argv)
+static int era_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
int r;
char dummy;
@@ -1618,7 +1641,7 @@ static int era_preresume(struct dm_target *ti)
* <current era> <held metadata root | '-'>
*/
static void era_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
int r;
struct era *era = ti->private;
@@ -1633,10 +1656,10 @@ static void era_status(struct dm_target *ti, status_type_t type,
goto err;
DMEMIT("%u %llu/%llu %u",
- (unsigned) (DM_ERA_METADATA_BLOCK_SIZE >> SECTOR_SHIFT),
+ (unsigned int) (DM_ERA_METADATA_BLOCK_SIZE >> SECTOR_SHIFT),
(unsigned long long) stats.used,
(unsigned long long) stats.total,
- (unsigned) stats.era);
+ (unsigned int) stats.era);
if (stats.snap != SUPERBLOCK_LOCATION)
DMEMIT(" %llu", stats.snap);
@@ -1662,8 +1685,8 @@ err:
DMEMIT("Error");
}
-static int era_message(struct dm_target *ti, unsigned argc, char **argv,
- char *result, unsigned maxlen)
+static int era_message(struct dm_target *ti, unsigned int argc, char **argv,
+ char *result, unsigned int maxlen)
{
struct era *era = ti->private;
@@ -1694,6 +1717,7 @@ static int era_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
struct era *era = ti->private;
+
return fn(ti, era->origin_dev, 0, get_dev_size(era->origin_dev), data);
}
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 3997f34cfebc..c3799757bf4a 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2001-2002 Sistina Software (UK) Limited.
* Copyright (C) 2006-2008 Red Hat GmbH
@@ -142,7 +143,7 @@ EXPORT_SYMBOL(dm_exception_store_type_unregister);
static int set_chunk_size(struct dm_exception_store *store,
const char *chunk_size_arg, char **error)
{
- unsigned chunk_size;
+ unsigned int chunk_size;
if (kstrtouint(chunk_size_arg, 10, &chunk_size)) {
*error = "Invalid chunk size";
@@ -158,7 +159,7 @@ static int set_chunk_size(struct dm_exception_store *store,
}
int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
- unsigned chunk_size,
+ unsigned int chunk_size,
char **error)
{
/* Check chunk_size is a power of 2 */
@@ -190,7 +191,7 @@ int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
struct dm_snapshot *snap,
- unsigned *args_used,
+ unsigned int *args_used,
struct dm_exception_store **store)
{
int r = 0;
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
index b5f20eba3641..b67976637538 100644
--- a/drivers/md/dm-exception-store.h
+++ b/drivers/md/dm-exception-store.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2001-2002 Sistina Software (UK) Limited.
* Copyright (C) 2008 Red Hat, Inc. All rights reserved.
@@ -43,36 +44,36 @@ struct dm_exception_store_type {
const char *name;
struct module *module;
- int (*ctr) (struct dm_exception_store *store, char *options);
+ int (*ctr)(struct dm_exception_store *store, char *options);
/*
* Destroys this object when you've finished with it.
*/
- void (*dtr) (struct dm_exception_store *store);
+ void (*dtr)(struct dm_exception_store *store);
/*
* The target shouldn't read the COW device until this is
* called. As exceptions are read from the COW, they are
* reported back via the callback.
*/
- int (*read_metadata) (struct dm_exception_store *store,
- int (*callback)(void *callback_context,
- chunk_t old, chunk_t new),
- void *callback_context);
+ int (*read_metadata)(struct dm_exception_store *store,
+ int (*callback)(void *callback_context,
+ chunk_t old, chunk_t new),
+ void *callback_context);
/*
* Find somewhere to store the next exception.
*/
- int (*prepare_exception) (struct dm_exception_store *store,
- struct dm_exception *e);
+ int (*prepare_exception)(struct dm_exception_store *store,
+ struct dm_exception *e);
/*
* Update the metadata with this exception.
*/
- void (*commit_exception) (struct dm_exception_store *store,
- struct dm_exception *e, int valid,
- void (*callback) (void *, int success),
- void *callback_context);
+ void (*commit_exception)(struct dm_exception_store *store,
+ struct dm_exception *e, int valid,
+ void (*callback)(void *, int success),
+ void *callback_context);
/*
* Returns 0 if the exception store is empty.
@@ -82,30 +83,30 @@ struct dm_exception_store_type {
* still-to-be-merged chunk and returns the number of
* consecutive previous ones.
*/
- int (*prepare_merge) (struct dm_exception_store *store,
- chunk_t *last_old_chunk, chunk_t *last_new_chunk);
+ int (*prepare_merge)(struct dm_exception_store *store,
+ chunk_t *last_old_chunk, chunk_t *last_new_chunk);
/*
* Clear the last n exceptions.
* nr_merged must be <= the value returned by prepare_merge.
*/
- int (*commit_merge) (struct dm_exception_store *store, int nr_merged);
+ int (*commit_merge)(struct dm_exception_store *store, int nr_merged);
/*
* The snapshot is invalid, note this in the metadata.
*/
- void (*drop_snapshot) (struct dm_exception_store *store);
+ void (*drop_snapshot)(struct dm_exception_store *store);
- unsigned (*status) (struct dm_exception_store *store,
- status_type_t status, char *result,
- unsigned maxlen);
+ unsigned int (*status)(struct dm_exception_store *store,
+ status_type_t status, char *result,
+ unsigned int maxlen);
/*
* Return how full the snapshot is.
*/
- void (*usage) (struct dm_exception_store *store,
- sector_t *total_sectors, sector_t *sectors_allocated,
- sector_t *metadata_sectors);
+ void (*usage)(struct dm_exception_store *store,
+ sector_t *total_sectors, sector_t *sectors_allocated,
+ sector_t *metadata_sectors);
/* For internal device-mapper use only. */
struct list_head list;
@@ -118,9 +119,9 @@ struct dm_exception_store {
struct dm_snapshot *snap;
/* Size of data blocks saved - must be a power of 2 */
- unsigned chunk_size;
- unsigned chunk_mask;
- unsigned chunk_shift;
+ unsigned int chunk_size;
+ unsigned int chunk_mask;
+ unsigned int chunk_shift;
void *context;
@@ -144,7 +145,7 @@ static inline chunk_t dm_chunk_number(chunk_t chunk)
return chunk & (chunk_t)((1ULL << DM_CHUNK_NUMBER_BITS) - 1ULL);
}
-static inline unsigned dm_consecutive_chunk_count(struct dm_exception *e)
+static inline unsigned int dm_consecutive_chunk_count(struct dm_exception *e)
{
return e->new_chunk >> DM_CHUNK_NUMBER_BITS;
}
@@ -181,12 +182,12 @@ int dm_exception_store_type_register(struct dm_exception_store_type *type);
int dm_exception_store_type_unregister(struct dm_exception_store_type *type);
int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
- unsigned chunk_size,
+ unsigned int chunk_size,
char **error);
int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
struct dm_snapshot *snap,
- unsigned *args_used,
+ unsigned int *args_used,
struct dm_exception_store **store);
void dm_exception_store_destroy(struct dm_exception_store *store);
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 89fa7a68c6c4..5b7556d2a9d9 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2003 Sistina Software (UK) Limited.
* Copyright (C) 2004, 2010-2011 Red Hat, Inc. All rights reserved.
@@ -26,12 +27,12 @@ struct flakey_c {
struct dm_dev *dev;
unsigned long start_time;
sector_t start;
- unsigned up_interval;
- unsigned down_interval;
+ unsigned int up_interval;
+ unsigned int down_interval;
unsigned long flags;
- unsigned corrupt_bio_byte;
- unsigned corrupt_bio_rw;
- unsigned corrupt_bio_value;
+ unsigned int corrupt_bio_byte;
+ unsigned int corrupt_bio_rw;
+ unsigned int corrupt_bio_value;
blk_opf_t corrupt_bio_flags;
};
@@ -48,7 +49,7 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
struct dm_target *ti)
{
int r;
- unsigned argc;
+ unsigned int argc;
const char *arg_name;
static const struct dm_arg _args[] = {
@@ -148,7 +149,7 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
BUILD_BUG_ON(sizeof(fc->corrupt_bio_flags) !=
sizeof(unsigned int));
r = dm_read_arg(_args + 3, as,
- (__force unsigned *)&fc->corrupt_bio_flags,
+ (__force unsigned int *)&fc->corrupt_bio_flags,
&ti->error);
if (r)
return r;
@@ -303,9 +304,13 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
*/
bio_for_each_segment(bvec, bio, iter) {
if (bio_iter_len(bio, iter) > corrupt_bio_byte) {
- char *segment = (page_address(bio_iter_page(bio, iter))
- + bio_iter_offset(bio, iter));
+ char *segment;
+ struct page *page = bio_iter_page(bio, iter);
+ if (unlikely(page == ZERO_PAGE(0)))
+ break;
+ segment = bvec_kmap_local(&bvec);
segment[corrupt_bio_byte] = fc->corrupt_bio_value;
+ kunmap_local(segment);
DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
"(rw=%c bi_opf=%u bi_sector=%llu size=%u)\n",
bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
@@ -320,8 +325,9 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
static int flakey_map(struct dm_target *ti, struct bio *bio)
{
struct flakey_c *fc = ti->private;
- unsigned elapsed;
+ unsigned int elapsed;
struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
+
pb->bio_submitted = false;
if (op_is_zone_mgmt(bio_op(bio)))
@@ -352,8 +358,7 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
if (test_bit(DROP_WRITES, &fc->flags)) {
bio_endio(bio);
return DM_MAPIO_SUBMITTED;
- }
- else if (test_bit(ERROR_WRITES, &fc->flags)) {
+ } else if (test_bit(ERROR_WRITES, &fc->flags)) {
bio_io_error(bio);
return DM_MAPIO_SUBMITTED;
}
@@ -361,9 +366,11 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
/*
* Corrupt matching writes.
*/
- if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == WRITE)) {
- if (all_corrupt_bio_flags_match(bio, fc))
- corrupt_bio_data(bio, fc);
+ if (fc->corrupt_bio_byte) {
+ if (fc->corrupt_bio_rw == WRITE) {
+ if (all_corrupt_bio_flags_match(bio, fc))
+ corrupt_bio_data(bio, fc);
+ }
goto map_bio;
}
@@ -389,13 +396,14 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio,
return DM_ENDIO_DONE;
if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
- if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
- all_corrupt_bio_flags_match(bio, fc)) {
- /*
- * Corrupt successful matching READs while in down state.
- */
- corrupt_bio_data(bio, fc);
-
+ if (fc->corrupt_bio_byte) {
+ if ((fc->corrupt_bio_rw == READ) &&
+ all_corrupt_bio_flags_match(bio, fc)) {
+ /*
+ * Corrupt successful matching READs while in down state.
+ */
+ corrupt_bio_data(bio, fc);
+ }
} else if (!test_bit(DROP_WRITES, &fc->flags) &&
!test_bit(ERROR_WRITES, &fc->flags)) {
/*
@@ -410,11 +418,11 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio,
}
static void flakey_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
- unsigned sz = 0;
+ unsigned int sz = 0;
struct flakey_c *fc = ti->private;
- unsigned drop_writes, error_writes;
+ unsigned int drop_writes, error_writes;
switch (type) {
case STATUSTYPE_INFO:
diff --git a/drivers/md/dm-ima.c b/drivers/md/dm-ima.c
index a1bd7cd52b1b..b90f34259fbb 100644
--- a/drivers/md/dm-ima.c
+++ b/drivers/md/dm-ima.c
@@ -1,11 +1,10 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2021 Microsoft Corporation
*
* Author: Tushar Sugandhi <tusharsu@linux.microsoft.com>
*
- * File: dm-ima.c
- * Enables IMA measurements for DM targets
+ * Enables IMA measurements for DM targets
*/
#include "dm-core.h"
diff --git a/drivers/md/dm-ima.h b/drivers/md/dm-ima.h
index b8c3b614670b..568870a1a145 100644
--- a/drivers/md/dm-ima.h
+++ b/drivers/md/dm-ima.h
@@ -1,11 +1,10 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
* Copyright (C) 2021 Microsoft Corporation
*
* Author: Tushar Sugandhi <tusharsu@linux.microsoft.com>
*
- * File: dm-ima.h
- * Header file for device mapper IMA measurements.
+ * Header file for device mapper IMA measurements.
*/
#ifndef DM_IMA_H
diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c
index dc4381d68313..d369457dbed0 100644
--- a/drivers/md/dm-init.c
+++ b/drivers/md/dm-init.c
@@ -1,7 +1,6 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
/*
- * dm-init.c
* Copyright (C) 2017 The Chromium OS Authors <chromium-os-dev@chromium.org>
*
* This file is released under the GPLv2.
@@ -296,7 +295,7 @@ static int __init dm_init_init(void)
if (waitfor[i]) {
DMINFO("waiting for device %s ...", waitfor[i]);
while (!dm_get_dev_t(waitfor[i]))
- msleep(5);
+ fsleep(5000);
}
}
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index c58156deb2b1..b0d5057fbdd9 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved.
* Copyright (C) 2016-2017 Milan Broz
@@ -112,9 +113,9 @@ struct journal_entry {
#endif
#define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
#define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1))
-#define journal_entry_set_unused(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-1)); } while (0)
+#define journal_entry_set_unused(je) ((je)->u.s.sector_hi = cpu_to_le32(-1))
#define journal_entry_is_inprogress(je) ((je)->u.s.sector_hi == cpu_to_le32(-2))
-#define journal_entry_set_inprogress(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-2)); } while (0)
+#define journal_entry_set_inprogress(je) ((je)->u.s.sector_hi = cpu_to_le32(-2))
#define JOURNAL_BLOCK_SECTORS 8
#define JOURNAL_SECTOR_DATA ((1 << SECTOR_SHIFT) - sizeof(commit_id_t))
@@ -157,13 +158,13 @@ struct alg_spec {
char *alg_string;
char *key_string;
__u8 *key;
- unsigned key_size;
+ unsigned int key_size;
};
struct dm_integrity_c {
struct dm_dev *dev;
struct dm_dev *meta_dev;
- unsigned tag_size;
+ unsigned int tag_size;
__s8 log2_tag_size;
sector_t start;
mempool_t journal_io_mempool;
@@ -171,8 +172,8 @@ struct dm_integrity_c {
struct dm_bufio_client *bufio;
struct workqueue_struct *metadata_wq;
struct superblock *sb;
- unsigned journal_pages;
- unsigned n_bitmap_blocks;
+ unsigned int journal_pages;
+ unsigned int n_bitmap_blocks;
struct page_list *journal;
struct page_list *journal_io;
@@ -180,7 +181,7 @@ struct dm_integrity_c {
struct page_list *recalc_bitmap;
struct page_list *may_write_bitmap;
struct bitmap_block_status *bbs;
- unsigned bitmap_flush_interval;
+ unsigned int bitmap_flush_interval;
int synchronous_mode;
struct bio_list synchronous_bios;
struct delayed_work bitmap_flush_work;
@@ -201,12 +202,12 @@ struct dm_integrity_c {
unsigned char journal_entries_per_sector;
unsigned char journal_section_entries;
unsigned short journal_section_sectors;
- unsigned journal_sections;
- unsigned journal_entries;
+ unsigned int journal_sections;
+ unsigned int journal_entries;
sector_t data_device_sectors;
sector_t meta_device_sectors;
- unsigned initial_sectors;
- unsigned metadata_run;
+ unsigned int initial_sectors;
+ unsigned int metadata_run;
__s8 log2_metadata_run;
__u8 log2_buffer_sectors;
__u8 sectors_per_block;
@@ -230,17 +231,17 @@ struct dm_integrity_c {
unsigned char commit_seq;
commit_id_t commit_ids[N_COMMIT_IDS];
- unsigned committed_section;
- unsigned n_committed_sections;
+ unsigned int committed_section;
+ unsigned int n_committed_sections;
- unsigned uncommitted_section;
- unsigned n_uncommitted_sections;
+ unsigned int uncommitted_section;
+ unsigned int n_uncommitted_sections;
- unsigned free_section;
+ unsigned int free_section;
unsigned char free_section_entry;
- unsigned free_sectors;
+ unsigned int free_sectors;
- unsigned free_sectors_threshold;
+ unsigned int free_sectors_threshold;
struct workqueue_struct *commit_wq;
struct work_struct commit_work;
@@ -257,7 +258,7 @@ struct dm_integrity_c {
unsigned long autocommit_jiffies;
struct timer_list autocommit_timer;
- unsigned autocommit_msec;
+ unsigned int autocommit_msec;
wait_queue_head_t copy_to_journal_wait;
@@ -305,7 +306,7 @@ struct dm_integrity_io {
struct dm_integrity_range range;
sector_t metadata_block;
- unsigned metadata_offset;
+ unsigned int metadata_offset;
atomic_t in_flight;
blk_status_t bi_status;
@@ -329,7 +330,7 @@ struct journal_io {
struct bitmap_block_status {
struct work_struct work;
struct dm_integrity_c *ic;
- unsigned idx;
+ unsigned int idx;
unsigned long *bitmap;
struct bio_list bio_queue;
spinlock_t bio_queue_lock;
@@ -345,6 +346,7 @@ static struct kmem_cache *journal_io_cache;
static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...)
{
va_list args;
+
va_start(args, msg);
vprintk(msg, args);
va_end(args);
@@ -410,8 +412,8 @@ static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic)
return false;
}
-static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
- unsigned j, unsigned char seq)
+static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned int i,
+ unsigned int j, unsigned char seq)
{
/*
* Xor the number with section and sector, so that if a piece of
@@ -426,7 +428,7 @@ static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
if (!ic->meta_dev) {
__u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
*area = data_sector >> log2_interleave_sectors;
- *offset = (unsigned)data_sector & ((1U << log2_interleave_sectors) - 1);
+ *offset = (unsigned int)data_sector & ((1U << log2_interleave_sectors) - 1);
} else {
*area = 0;
*offset = data_sector;
@@ -435,15 +437,15 @@ static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
#define sector_to_block(ic, n) \
do { \
- BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1)); \
+ BUG_ON((n) & (unsigned int)((ic)->sectors_per_block - 1)); \
(n) >>= (ic)->sb->log2_sectors_per_block; \
} while (0)
static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area,
- sector_t offset, unsigned *metadata_offset)
+ sector_t offset, unsigned int *metadata_offset)
{
__u64 ms;
- unsigned mo;
+ unsigned int mo;
ms = area << ic->sb->log2_interleave_sectors;
if (likely(ic->log2_metadata_run >= 0))
@@ -484,7 +486,7 @@ static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector
return result;
}
-static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
+static void wraparound_section(struct dm_integrity_c *ic, unsigned int *sec_ptr)
{
if (unlikely(*sec_ptr >= ic->journal_sections))
*sec_ptr -= ic->journal_sections;
@@ -508,7 +510,7 @@ static int sb_mac(struct dm_integrity_c *ic, bool wr)
{
SHASH_DESC_ON_STACK(desc, ic->journal_mac);
int r;
- unsigned size = crypto_shash_digestsize(ic->journal_mac);
+ unsigned int size = crypto_shash_digestsize(ic->journal_mac);
if (sizeof(struct superblock) + size > 1 << SECTOR_SHIFT) {
dm_integrity_io_error(ic, "digest is too long", -EINVAL);
@@ -537,6 +539,7 @@ static int sb_mac(struct dm_integrity_c *ic, bool wr)
}
} else {
__u8 result[HASH_MAX_DIGESTSIZE];
+
r = crypto_shash_final(desc, result);
if (unlikely(r < 0)) {
dm_integrity_io_error(ic, "crypto_shash_final", r);
@@ -627,11 +630,10 @@ static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap,
end_bit %= PAGE_SIZE * 8;
repeat:
- if (page < end_page) {
+ if (page < end_page)
this_end_bit = PAGE_SIZE * 8 - 1;
- } else {
+ else
this_end_bit = end_bit;
- }
data = lowmem_page_address(bitmap[page].page);
@@ -678,16 +680,18 @@ repeat:
} else if (mode == BITMAP_OP_CLEAR) {
if (!bit && this_end_bit == PAGE_SIZE * 8 - 1)
clear_page(data);
- else while (bit <= this_end_bit) {
- if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
- do {
- data[bit / BITS_PER_LONG] = 0;
- bit += BITS_PER_LONG;
- } while (this_end_bit >= bit + BITS_PER_LONG - 1);
- continue;
+ else {
+ while (bit <= this_end_bit) {
+ if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
+ do {
+ data[bit / BITS_PER_LONG] = 0;
+ bit += BITS_PER_LONG;
+ } while (this_end_bit >= bit + BITS_PER_LONG - 1);
+ continue;
+ }
+ __clear_bit(bit, data);
+ bit++;
}
- __clear_bit(bit, data);
- bit++;
}
} else {
BUG();
@@ -704,30 +708,31 @@ repeat:
static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src)
{
- unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
- unsigned i;
+ unsigned int n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
+ unsigned int i;
for (i = 0; i < n_bitmap_pages; i++) {
unsigned long *dst_data = lowmem_page_address(dst[i].page);
unsigned long *src_data = lowmem_page_address(src[i].page);
+
copy_page(dst_data, src_data);
}
}
static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector)
{
- unsigned bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
- unsigned bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8);
+ unsigned int bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
+ unsigned int bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8);
BUG_ON(bitmap_block >= ic->n_bitmap_blocks);
return &ic->bbs[bitmap_block];
}
-static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset,
+static void access_journal_check(struct dm_integrity_c *ic, unsigned int section, unsigned int offset,
bool e, const char *function)
{
#if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY)
- unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
+ unsigned int limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
if (unlikely(section >= ic->journal_sections) ||
unlikely(offset >= limit)) {
@@ -738,10 +743,10 @@ static void access_journal_check(struct dm_integrity_c *ic, unsigned section, un
#endif
}
-static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset,
- unsigned *pl_index, unsigned *pl_offset)
+static void page_list_location(struct dm_integrity_c *ic, unsigned int section, unsigned int offset,
+ unsigned int *pl_index, unsigned int *pl_offset)
{
- unsigned sector;
+ unsigned int sector;
access_journal_check(ic, section, offset, false, "page_list_location");
@@ -752,9 +757,9 @@ static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsi
}
static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl,
- unsigned section, unsigned offset, unsigned *n_sectors)
+ unsigned int section, unsigned int offset, unsigned int *n_sectors)
{
- unsigned pl_index, pl_offset;
+ unsigned int pl_index, pl_offset;
char *va;
page_list_location(ic, section, offset, &pl_index, &pl_offset);
@@ -767,14 +772,14 @@ static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct
return (struct journal_sector *)(va + pl_offset);
}
-static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset)
+static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned int section, unsigned int offset)
{
return access_page_list(ic, ic->journal, section, offset, NULL);
}
-static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsigned n)
+static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned int section, unsigned int n)
{
- unsigned rel_sector, offset;
+ unsigned int rel_sector, offset;
struct journal_sector *js;
access_journal_check(ic, section, n, true, "access_journal_entry");
@@ -786,7 +791,7 @@ static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, uns
return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size);
}
-static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsigned n)
+static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned int section, unsigned int n)
{
n <<= ic->sb->log2_sectors_per_block;
@@ -797,11 +802,11 @@ static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, uns
return access_journal(ic, section, n);
}
-static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE])
+static void section_mac(struct dm_integrity_c *ic, unsigned int section, __u8 result[JOURNAL_MAC_SIZE])
{
SHASH_DESC_ON_STACK(desc, ic->journal_mac);
int r;
- unsigned j, size;
+ unsigned int j, size;
desc->tfm = ic->journal_mac;
@@ -821,7 +826,7 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result
}
section_le = cpu_to_le64(section);
- r = crypto_shash_update(desc, (__u8 *)&section_le, sizeof section_le);
+ r = crypto_shash_update(desc, (__u8 *)&section_le, sizeof(section_le));
if (unlikely(r < 0)) {
dm_integrity_io_error(ic, "crypto_shash_update", r);
goto err;
@@ -830,7 +835,8 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result
for (j = 0; j < ic->journal_section_entries; j++) {
struct journal_entry *je = access_journal_entry(ic, section, j);
- r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof je->u.sector);
+
+ r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof(je->u.sector));
if (unlikely(r < 0)) {
dm_integrity_io_error(ic, "crypto_shash_update", r);
goto err;
@@ -866,10 +872,10 @@ err:
memset(result, 0, JOURNAL_MAC_SIZE);
}
-static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr)
+static void rw_section_mac(struct dm_integrity_c *ic, unsigned int section, bool wr)
{
__u8 result[JOURNAL_MAC_SIZE];
- unsigned j;
+ unsigned int j;
if (!ic->journal_mac)
return;
@@ -893,17 +899,18 @@ static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr)
static void complete_journal_op(void *context)
{
struct journal_completion *comp = context;
+
BUG_ON(!atomic_read(&comp->in_flight));
if (likely(atomic_dec_and_test(&comp->in_flight)))
complete(&comp->comp);
}
-static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
- unsigned n_sections, struct journal_completion *comp)
+static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section,
+ unsigned int n_sections, struct journal_completion *comp)
{
struct async_submit_ctl submit;
size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT;
- unsigned pl_index, pl_offset, section_index;
+ unsigned int pl_index, pl_offset, section_index;
struct page_list *source_pl, *target_pl;
if (likely(encrypt)) {
@@ -928,7 +935,8 @@ static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned sectio
struct page *dst_page;
while (unlikely(pl_index == section_index)) {
- unsigned dummy;
+ unsigned int dummy;
+
if (likely(encrypt))
rw_section_mac(ic, section, true);
section++;
@@ -958,6 +966,7 @@ static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned sectio
static void complete_journal_encrypt(void *data, int err)
{
struct journal_completion *comp = data;
+
if (unlikely(err)) {
if (likely(err == -EINPROGRESS)) {
complete(&comp->ic->crypto_backoff);
@@ -971,6 +980,7 @@ static void complete_journal_encrypt(void *data, int err)
static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
{
int r;
+
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
complete_journal_encrypt, comp);
if (likely(encrypt))
@@ -990,8 +1000,8 @@ static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_
return false;
}
-static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
- unsigned n_sections, struct journal_completion *comp)
+static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section,
+ unsigned int n_sections, struct journal_completion *comp)
{
struct scatterlist **source_sg;
struct scatterlist **target_sg;
@@ -1008,7 +1018,7 @@ static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned sect
do {
struct skcipher_request *req;
- unsigned ivsize;
+ unsigned int ivsize;
char *iv;
if (likely(encrypt))
@@ -1034,8 +1044,8 @@ static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned sect
complete_journal_op(comp);
}
-static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
- unsigned n_sections, struct journal_completion *comp)
+static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section,
+ unsigned int n_sections, struct journal_completion *comp)
{
if (ic->journal_xor)
return xor_journal(ic, encrypt, section, n_sections, comp);
@@ -1046,18 +1056,19 @@ static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned se
static void complete_journal_io(unsigned long error, void *context)
{
struct journal_completion *comp = context;
+
if (unlikely(error != 0))
dm_integrity_io_error(comp->ic, "writing journal", -EIO);
complete_journal_op(comp);
}
static void rw_journal_sectors(struct dm_integrity_c *ic, blk_opf_t opf,
- unsigned sector, unsigned n_sectors,
+ unsigned int sector, unsigned int n_sectors,
struct journal_completion *comp)
{
struct dm_io_request io_req;
struct dm_io_region io_loc;
- unsigned pl_index, pl_offset;
+ unsigned int pl_index, pl_offset;
int r;
if (unlikely(dm_integrity_failed(ic))) {
@@ -1099,10 +1110,10 @@ static void rw_journal_sectors(struct dm_integrity_c *ic, blk_opf_t opf,
}
static void rw_journal(struct dm_integrity_c *ic, blk_opf_t opf,
- unsigned section, unsigned n_sections,
+ unsigned int section, unsigned int n_sections,
struct journal_completion *comp)
{
- unsigned sector, n_sectors;
+ unsigned int sector, n_sectors;
sector = section * ic->journal_section_sectors;
n_sectors = n_sections * ic->journal_section_sectors;
@@ -1110,12 +1121,12 @@ static void rw_journal(struct dm_integrity_c *ic, blk_opf_t opf,
rw_journal_sectors(ic, opf, sector, n_sectors, comp);
}
-static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections)
+static void write_journal(struct dm_integrity_c *ic, unsigned int commit_start, unsigned int commit_sections)
{
struct journal_completion io_comp;
struct journal_completion crypt_comp_1;
struct journal_completion crypt_comp_2;
- unsigned i;
+ unsigned int i;
io_comp.ic = ic;
init_completion(&io_comp.comp);
@@ -1135,7 +1146,8 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi
rw_journal(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, commit_start,
commit_sections, &io_comp);
} else {
- unsigned to_end;
+ unsigned int to_end;
+
io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
to_end = ic->journal_sections - commit_start;
if (ic->journal_io) {
@@ -1172,15 +1184,15 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi
wait_for_completion_io(&io_comp.comp);
}
-static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset,
- unsigned n_sectors, sector_t target, io_notify_fn fn, void *data)
+static void copy_from_journal(struct dm_integrity_c *ic, unsigned int section, unsigned int offset,
+ unsigned int n_sectors, sector_t target, io_notify_fn fn, void *data)
{
struct dm_io_request io_req;
struct dm_io_region io_loc;
int r;
- unsigned sector, pl_index, pl_offset;
+ unsigned int sector, pl_index, pl_offset;
- BUG_ON((target | n_sectors | offset) & (unsigned)(ic->sectors_per_block - 1));
+ BUG_ON((target | n_sectors | offset) & (unsigned int)(ic->sectors_per_block - 1));
if (unlikely(dm_integrity_failed(ic))) {
fn(-1UL, data);
@@ -1221,10 +1233,11 @@ static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *
struct rb_node **n = &ic->in_progress.rb_node;
struct rb_node *parent;
- BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned)(ic->sectors_per_block - 1));
+ BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned int)(ic->sectors_per_block - 1));
if (likely(check_waiting)) {
struct dm_integrity_range *range;
+
list_for_each_entry(range, &ic->wait_list, wait_entry) {
if (unlikely(ranges_overlap(range, new_range)))
return false;
@@ -1237,13 +1250,12 @@ static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *
struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node);
parent = *n;
- if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector) {
+ if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector)
n = &range->node.rb_left;
- } else if (new_range->logical_sector >= range->logical_sector + range->n_sectors) {
+ else if (new_range->logical_sector >= range->logical_sector + range->n_sectors)
n = &range->node.rb_right;
- } else {
+ else
return false;
- }
}
rb_link_node(&new_range->node, parent, n);
@@ -1259,6 +1271,7 @@ static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity
struct dm_integrity_range *last_range =
list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
struct task_struct *last_range_task;
+
last_range_task = last_range->task;
list_del(&last_range->wait_entry);
if (!add_new_range(ic, last_range, false)) {
@@ -1318,6 +1331,7 @@ static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *nod
while (*link) {
struct journal_node *j;
+
parent = *link;
j = container_of(parent, struct journal_node, node);
if (sector < j->sector)
@@ -1339,28 +1353,29 @@ static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *
#define NOT_FOUND (-1U)
-static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
+static unsigned int find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
{
struct rb_node *n = ic->journal_tree_root.rb_node;
- unsigned found = NOT_FOUND;
+ unsigned int found = NOT_FOUND;
+
*next_sector = (sector_t)-1;
while (n) {
struct journal_node *j = container_of(n, struct journal_node, node);
- if (sector == j->sector) {
+
+ if (sector == j->sector)
found = j - ic->journal_tree;
- }
+
if (sector < j->sector) {
*next_sector = j->sector;
n = j->node.rb_left;
- } else {
+ } else
n = j->node.rb_right;
- }
}
return found;
}
-static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector)
+static bool test_journal_node(struct dm_integrity_c *ic, unsigned int pos, sector_t sector)
{
struct journal_node *node, *next_node;
struct rb_node *next;
@@ -1385,7 +1400,7 @@ static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_
{
struct rb_node *next;
struct journal_node *next_node;
- unsigned next_section;
+ unsigned int next_section;
BUG_ON(RB_EMPTY_NODE(&node->node));
@@ -1398,7 +1413,7 @@ static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_
if (next_node->sector != node->sector)
return false;
- next_section = (unsigned)(next_node - ic->journal_tree) / ic->journal_section_entries;
+ next_section = (unsigned int)(next_node - ic->journal_tree) / ic->journal_section_entries;
if (next_section >= ic->committed_section &&
next_section < ic->committed_section + ic->n_committed_sections)
return true;
@@ -1413,17 +1428,17 @@ static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_
#define TAG_CMP 2
static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
- unsigned *metadata_offset, unsigned total_size, int op)
+ unsigned int *metadata_offset, unsigned int total_size, int op)
{
#define MAY_BE_FILLER 1
#define MAY_BE_HASH 2
- unsigned hash_offset = 0;
- unsigned may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
+ unsigned int hash_offset = 0;
+ unsigned int may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
do {
unsigned char *data, *dp;
struct dm_buffer *b;
- unsigned to_copy;
+ unsigned int to_copy;
int r;
r = dm_integrity_failed(ic);
@@ -1453,7 +1468,7 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se
goto thorough_test;
}
} else {
- unsigned i, ts;
+ unsigned int i, ts;
thorough_test:
ts = total_size;
@@ -1483,9 +1498,8 @@ thorough_test:
*metadata_offset = 0;
}
- if (unlikely(!is_power_of_2(ic->tag_size))) {
+ if (unlikely(!is_power_of_2(ic->tag_size)))
hash_offset = (hash_offset + to_copy) % ic->tag_size;
- }
total_size -= to_copy;
} while (unlikely(total_size));
@@ -1505,6 +1519,7 @@ struct flush_request {
static void flush_notify(unsigned long error, void *fr_)
{
struct flush_request *fr = fr_;
+
if (unlikely(error != 0))
dm_integrity_io_error(fr->ic, "flushing disk cache", -EIO);
complete(&fr->comp);
@@ -1513,7 +1528,6 @@ static void flush_notify(unsigned long error, void *fr_)
static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_data)
{
int r;
-
struct flush_request fr;
if (!ic->meta_dev)
@@ -1545,6 +1559,7 @@ static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_dat
static void sleep_on_endio_wait(struct dm_integrity_c *ic)
{
DECLARE_WAITQUEUE(wait, current);
+
__add_wait_queue(&ic->endio_wait, &wait);
__set_current_state(TASK_UNINTERRUPTIBLE);
spin_unlock_irq(&ic->endio_wait.lock);
@@ -1582,11 +1597,14 @@ static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *
static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
{
- int r = dm_integrity_failed(ic);
+ int r;
+
+ r = dm_integrity_failed(ic);
if (unlikely(r) && !bio->bi_status)
bio->bi_status = errno_to_blk_status(r);
if (unlikely(ic->synchronous_mode) && bio_op(bio) == REQ_OP_WRITE) {
unsigned long flags;
+
spin_lock_irqsave(&ic->endio_wait.lock, flags);
bio_list_add(&ic->synchronous_bios, bio);
queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
@@ -1618,7 +1636,6 @@ static void dec_in_flight(struct dm_integrity_io *dio)
schedule_autocommit(ic);
bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
-
if (unlikely(dio->bi_status) && !bio->bi_status)
bio->bi_status = dio->bi_status;
if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
@@ -1652,7 +1669,7 @@ static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector
__le64 sector_le = cpu_to_le64(sector);
SHASH_DESC_ON_STACK(req, ic->internal_hash);
int r;
- unsigned digest_size;
+ unsigned int digest_size;
req->tfm = ic->internal_hash;
@@ -1670,7 +1687,7 @@ static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector
}
}
- r = crypto_shash_update(req, (const __u8 *)&sector_le, sizeof sector_le);
+ r = crypto_shash_update(req, (const __u8 *)&sector_le, sizeof(sector_le));
if (unlikely(r < 0)) {
dm_integrity_io_error(ic, "crypto_shash_update", r);
goto failed;
@@ -1709,13 +1726,13 @@ static void integrity_metadata(struct work_struct *w)
if (ic->internal_hash) {
struct bvec_iter iter;
struct bio_vec bv;
- unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
+ unsigned int digest_size = crypto_shash_digestsize(ic->internal_hash);
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
char *checksums;
- unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
- char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
+ unsigned int extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
+ char checksums_onstack[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
sector_t sector;
- unsigned sectors_to_process;
+ unsigned int sectors_to_process;
if (unlikely(ic->mode == 'R'))
goto skip_io;
@@ -1735,14 +1752,15 @@ static void integrity_metadata(struct work_struct *w)
}
if (unlikely(dio->op == REQ_OP_DISCARD)) {
- sector_t bi_sector = dio->bio_details.bi_iter.bi_sector;
- unsigned bi_size = dio->bio_details.bi_iter.bi_size;
- unsigned max_size = likely(checksums != checksums_onstack) ? PAGE_SIZE : HASH_MAX_DIGESTSIZE;
- unsigned max_blocks = max_size / ic->tag_size;
+ unsigned int bi_size = dio->bio_details.bi_iter.bi_size;
+ unsigned int max_size = likely(checksums != checksums_onstack) ? PAGE_SIZE : HASH_MAX_DIGESTSIZE;
+ unsigned int max_blocks = max_size / ic->tag_size;
+
memset(checksums, DISCARD_FILLER, max_size);
while (bi_size) {
- unsigned this_step_blocks = bi_size >> (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
+ unsigned int this_step_blocks = bi_size >> (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
+
this_step_blocks = min(this_step_blocks, max_blocks);
r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
this_step_blocks * ic->tag_size, TAG_WRITE);
@@ -1752,13 +1770,7 @@ static void integrity_metadata(struct work_struct *w)
goto error;
}
- /*if (bi_size < this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block)) {
- printk("BUGG: bi_sector: %llx, bi_size: %u\n", bi_sector, bi_size);
- printk("BUGG: this_step_blocks: %u\n", this_step_blocks);
- BUG();
- }*/
bi_size -= this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
- bi_sector += this_step_blocks << ic->sb->log2_sectors_per_block;
}
if (likely(checksums != checksums_onstack))
@@ -1770,7 +1782,7 @@ static void integrity_metadata(struct work_struct *w)
sectors_to_process = dio->range.n_sectors;
__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
- unsigned pos;
+ unsigned int pos;
char *mem, *checksums_ptr;
again:
@@ -1823,13 +1835,14 @@ again:
if (bip) {
struct bio_vec biv;
struct bvec_iter iter;
- unsigned data_to_process = dio->range.n_sectors;
+ unsigned int data_to_process = dio->range.n_sectors;
+
sector_to_block(ic, data_to_process);
data_to_process *= ic->tag_size;
bip_for_each_vec(biv, bip, iter) {
unsigned char *tag;
- unsigned this_len;
+ unsigned int this_len;
BUG_ON(PageHighMem(biv.bv_page));
tag = bvec_virt(&biv);
@@ -1867,11 +1880,13 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
if (unlikely(dio->op == REQ_OP_DISCARD)) {
if (ti->max_io_len) {
sector_t sec = dm_target_offset(ti, bio->bi_iter.bi_sector);
- unsigned log2_max_io_len = __fls(ti->max_io_len);
+ unsigned int log2_max_io_len = __fls(ti->max_io_len);
sector_t start_boundary = sec >> log2_max_io_len;
sector_t end_boundary = (sec + bio_sectors(bio) - 1) >> log2_max_io_len;
+
if (start_boundary < end_boundary) {
sector_t len = ti->max_io_len - (sec & (ti->max_io_len - 1));
+
dm_accept_partial_bio(bio, len);
}
}
@@ -1897,7 +1912,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
ic->provided_data_sectors);
return DM_MAPIO_KILL;
}
- if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) {
+ if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned int)(ic->sectors_per_block - 1))) {
DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
ic->sectors_per_block,
dio->range.logical_sector, bio_sectors(bio));
@@ -1907,6 +1922,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
if (ic->sectors_per_block > 1 && likely(dio->op != REQ_OP_DISCARD)) {
struct bvec_iter iter;
struct bio_vec bv;
+
bio_for_each_segment(bv, bio, iter) {
if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
@@ -1919,7 +1935,8 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
bip = bio_integrity(bio);
if (!ic->internal_hash) {
if (bip) {
- unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
+ unsigned int wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
+
if (ic->log2_tag_size >= 0)
wanted_tag_size <<= ic->log2_tag_size;
else
@@ -1949,11 +1966,11 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
}
static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
- unsigned journal_section, unsigned journal_entry)
+ unsigned int journal_section, unsigned int journal_entry)
{
struct dm_integrity_c *ic = dio->ic;
sector_t logical_sector;
- unsigned n_sectors;
+ unsigned int n_sectors;
logical_sector = dio->range.logical_sector;
n_sectors = dio->range.n_sectors;
@@ -1976,7 +1993,7 @@ retry_kmap:
if (unlikely(dio->op == REQ_OP_READ)) {
struct journal_sector *js;
char *mem_ptr;
- unsigned s;
+ unsigned int s;
if (unlikely(journal_entry_is_inprogress(je))) {
flush_dcache_page(bv.bv_page);
@@ -1998,7 +2015,7 @@ retry_kmap:
} while (++s < ic->sectors_per_block);
#ifdef INTERNAL_VERIFY
if (ic->internal_hash) {
- char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
+ char checksums_onstack[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
@@ -2013,31 +2030,32 @@ retry_kmap:
if (!ic->internal_hash) {
struct bio_integrity_payload *bip = bio_integrity(bio);
- unsigned tag_todo = ic->tag_size;
+ unsigned int tag_todo = ic->tag_size;
char *tag_ptr = journal_entry_tag(ic, je);
- if (bip) do {
- struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
- unsigned tag_now = min(biv.bv_len, tag_todo);
- char *tag_addr;
- BUG_ON(PageHighMem(biv.bv_page));
- tag_addr = bvec_virt(&biv);
- if (likely(dio->op == REQ_OP_WRITE))
- memcpy(tag_ptr, tag_addr, tag_now);
- else
- memcpy(tag_addr, tag_ptr, tag_now);
- bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now);
- tag_ptr += tag_now;
- tag_todo -= tag_now;
- } while (unlikely(tag_todo)); else {
- if (likely(dio->op == REQ_OP_WRITE))
- memset(tag_ptr, 0, tag_todo);
- }
+ if (bip) {
+ do {
+ struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
+ unsigned int tag_now = min(biv.bv_len, tag_todo);
+ char *tag_addr;
+
+ BUG_ON(PageHighMem(biv.bv_page));
+ tag_addr = bvec_virt(&biv);
+ if (likely(dio->op == REQ_OP_WRITE))
+ memcpy(tag_ptr, tag_addr, tag_now);
+ else
+ memcpy(tag_addr, tag_ptr, tag_now);
+ bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now);
+ tag_ptr += tag_now;
+ tag_todo -= tag_now;
+ } while (unlikely(tag_todo));
+ } else if (likely(dio->op == REQ_OP_WRITE))
+ memset(tag_ptr, 0, tag_todo);
}
if (likely(dio->op == REQ_OP_WRITE)) {
struct journal_sector *js;
- unsigned s;
+ unsigned int s;
js = access_journal_data(ic, journal_section, journal_entry);
memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT);
@@ -2048,9 +2066,11 @@ retry_kmap:
} while (++s < ic->sectors_per_block);
if (ic->internal_hash) {
- unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
+ unsigned int digest_size = crypto_shash_digestsize(ic->internal_hash);
+
if (unlikely(digest_size > ic->tag_size)) {
char checksums_onstack[HASH_MAX_DIGESTSIZE];
+
integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
} else
@@ -2080,14 +2100,12 @@ retry_kmap:
smp_mb();
if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
wake_up(&ic->copy_to_journal_wait);
- if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
+ if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
queue_work(ic->commit_wq, &ic->commit_work);
- } else {
+ else
schedule_autocommit(ic);
- }
- } else {
+ } else
remove_range(ic, &dio->range);
- }
if (unlikely(bio->bi_iter.bi_size)) {
sector_t area, offset;
@@ -2105,11 +2123,12 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
{
struct dm_integrity_c *ic = dio->ic;
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
- unsigned journal_section, journal_entry;
- unsigned journal_read_pos;
+ unsigned int journal_section, journal_entry;
+ unsigned int journal_read_pos;
struct completion read_comp;
bool discard_retried = false;
bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ;
+
if (unlikely(dio->op == REQ_OP_DISCARD) && ic->mode != 'D')
need_sync_io = true;
@@ -2131,8 +2150,8 @@ retry:
journal_read_pos = NOT_FOUND;
if (ic->mode == 'J' && likely(dio->op != REQ_OP_DISCARD)) {
if (dio->op == REQ_OP_WRITE) {
- unsigned next_entry, i, pos;
- unsigned ws, we, range_sectors;
+ unsigned int next_entry, i, pos;
+ unsigned int ws, we, range_sectors;
dio->range.n_sectors = min(dio->range.n_sectors,
(sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block);
@@ -2180,13 +2199,15 @@ retry:
goto journal_read_write;
} else {
sector_t next_sector;
+
journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
if (likely(journal_read_pos == NOT_FOUND)) {
if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
dio->range.n_sectors = next_sector - dio->range.logical_sector;
} else {
- unsigned i;
- unsigned jp = journal_read_pos + 1;
+ unsigned int i;
+ unsigned int jp = journal_read_pos + 1;
+
for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
break;
@@ -2218,7 +2239,9 @@ offload_to_thread:
*/
if (journal_read_pos != NOT_FOUND) {
sector_t next_sector;
- unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
+ unsigned int new_pos;
+
+ new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
if (unlikely(new_pos != journal_read_pos)) {
remove_range_unlocked(ic, &dio->range);
goto retry;
@@ -2227,7 +2250,9 @@ offload_to_thread:
}
if (ic->mode == 'J' && likely(dio->op == REQ_OP_DISCARD) && !discard_retried) {
sector_t next_sector;
- unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
+ unsigned int new_pos;
+
+ new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
if (unlikely(new_pos != NOT_FOUND) ||
unlikely(next_sector < dio->range.logical_sector - dio->range.n_sectors)) {
remove_range_unlocked(ic, &dio->range);
@@ -2307,7 +2332,6 @@ offload_to_thread:
else
skip_check:
dec_in_flight(dio);
-
} else {
INIT_WORK(&dio->work, integrity_metadata);
queue_work(ic->metadata_wq, &dio->work);
@@ -2354,8 +2378,8 @@ static void pad_uncommitted(struct dm_integrity_c *ic)
static void integrity_commit(struct work_struct *w)
{
struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work);
- unsigned commit_start, commit_sections;
- unsigned i, j, n;
+ unsigned int commit_start, commit_sections;
+ unsigned int i, j, n;
struct bio *flushes;
del_timer(&ic->autocommit_timer);
@@ -2382,11 +2406,13 @@ static void integrity_commit(struct work_struct *w)
for (n = 0; n < commit_sections; n++) {
for (j = 0; j < ic->journal_section_entries; j++) {
struct journal_entry *je;
+
je = access_journal_entry(ic, i, j);
io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
}
for (j = 0; j < ic->journal_section_sectors; j++) {
struct journal_sector *js;
+
js = access_journal(ic, i, j);
js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq);
}
@@ -2412,6 +2438,7 @@ static void integrity_commit(struct work_struct *w)
release_flush_bios:
while (flushes) {
struct bio *next = flushes->bi_next;
+
flushes->bi_next = NULL;
do_endio(ic, flushes);
flushes = next;
@@ -2423,6 +2450,7 @@ static void complete_copy_from_journal(unsigned long error, void *context)
struct journal_io *io = context;
struct journal_completion *comp = io->comp;
struct dm_integrity_c *ic = comp->ic;
+
remove_range(ic, &io->range);
mempool_free(io, &ic->journal_io_mempool);
if (unlikely(error != 0))
@@ -2433,17 +2461,18 @@ static void complete_copy_from_journal(unsigned long error, void *context)
static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js,
struct journal_entry *je)
{
- unsigned s = 0;
+ unsigned int s = 0;
+
do {
js->commit_id = je->last_bytes[s];
js++;
} while (++s < ic->sectors_per_block);
}
-static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
- unsigned write_sections, bool from_replay)
+static void do_journal_write(struct dm_integrity_c *ic, unsigned int write_start,
+ unsigned int write_sections, bool from_replay)
{
- unsigned i, j, n;
+ unsigned int i, j, n;
struct journal_completion comp;
struct blk_plug plug;
@@ -2462,9 +2491,9 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
for (j = 0; j < ic->journal_section_entries; j++) {
struct journal_entry *je = access_journal_entry(ic, i, j);
sector_t sec, area, offset;
- unsigned k, l, next_loop;
+ unsigned int k, l, next_loop;
sector_t metadata_block;
- unsigned metadata_offset;
+ unsigned int metadata_offset;
struct journal_io *io;
if (journal_entry_is_unused(je))
@@ -2472,7 +2501,7 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay);
sec = journal_entry_get_sector(je);
if (unlikely(from_replay)) {
- if (unlikely(sec & (unsigned)(ic->sectors_per_block - 1))) {
+ if (unlikely(sec & (unsigned int)(ic->sectors_per_block - 1))) {
dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
sec &= ~(sector_t)(ic->sectors_per_block - 1);
}
@@ -2486,6 +2515,7 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
for (k = j + 1; k < ic->journal_section_entries; k++) {
struct journal_entry *je2 = access_journal_entry(ic, i, k);
sector_t sec2, area2, offset2;
+
if (journal_entry_is_unused(je2))
break;
BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay);
@@ -2533,9 +2563,8 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
mempool_free(io, &ic->journal_io_mempool);
goto skip_io;
}
- for (l = j; l < k; l++) {
+ for (l = j; l < k; l++)
remove_journal_node(ic, &section_node[l]);
- }
}
spin_unlock_irq(&ic->endio_wait.lock);
@@ -2562,9 +2591,8 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
journal_entry_set_unused(je2);
r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset,
ic->tag_size, TAG_WRITE);
- if (unlikely(r)) {
+ if (unlikely(r))
dm_integrity_io_error(ic, "reading tags", r);
- }
}
atomic_inc(&comp.in_flight);
@@ -2590,9 +2618,8 @@ skip_io:
static void integrity_writer(struct work_struct *w)
{
struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work);
- unsigned write_start, write_sections;
-
- unsigned prev_free_sectors;
+ unsigned int write_start, write_sections;
+ unsigned int prev_free_sectors;
spin_lock_irq(&ic->endio_wait.lock);
write_start = ic->committed_section;
@@ -2639,12 +2666,12 @@ static void integrity_recalc(struct work_struct *w)
struct dm_io_region io_loc;
sector_t area, offset;
sector_t metadata_block;
- unsigned metadata_offset;
+ unsigned int metadata_offset;
sector_t logical_sector, n_sectors;
__u8 *t;
- unsigned i;
+ unsigned int i;
int r;
- unsigned super_counter = 0;
+ unsigned int super_counter = 0;
DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector));
@@ -2668,7 +2695,7 @@ next_chunk:
get_area_and_offset(ic, range.logical_sector, &area, &offset);
range.n_sectors = min((sector_t)RECALC_SECTORS, ic->provided_data_sectors - range.logical_sector);
if (!ic->meta_dev)
- range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned)offset);
+ range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned int)offset);
add_new_range_and_wait(ic, &range);
spin_unlock_irq(&ic->endio_wait.lock);
@@ -2676,9 +2703,9 @@ next_chunk:
n_sectors = range.n_sectors;
if (ic->mode == 'B') {
- if (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, n_sectors, BITMAP_OP_TEST_ALL_CLEAR)) {
+ if (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, n_sectors, BITMAP_OP_TEST_ALL_CLEAR))
goto advance_and_next;
- }
+
while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector,
ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
logical_sector += ic->sectors_per_block;
@@ -2697,9 +2724,9 @@ next_chunk:
if (unlikely(++super_counter == RECALC_WRITE_SUPER)) {
recalc_write_super(ic);
- if (ic->mode == 'B') {
+ if (ic->mode == 'B')
queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
- }
+
super_counter = 0;
}
@@ -2737,6 +2764,7 @@ next_chunk:
if (ic->mode == 'B') {
sector_t start, end;
+
start = (range.logical_sector >>
(ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
(ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
@@ -2859,10 +2887,10 @@ static void bitmap_flush_work(struct work_struct *work)
}
-static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
- unsigned n_sections, unsigned char commit_seq)
+static void init_journal(struct dm_integrity_c *ic, unsigned int start_section,
+ unsigned int n_sections, unsigned char commit_seq)
{
- unsigned i, j, n;
+ unsigned int i, j, n;
if (!n_sections)
return;
@@ -2872,12 +2900,14 @@ static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
wraparound_section(ic, &i);
for (j = 0; j < ic->journal_section_sectors; j++) {
struct journal_sector *js = access_journal(ic, i, j);
+
BUILD_BUG_ON(sizeof(js->sectors) != JOURNAL_SECTOR_DATA);
memset(&js->sectors, 0, sizeof(js->sectors));
js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq);
}
for (j = 0; j < ic->journal_section_entries; j++) {
struct journal_entry *je = access_journal_entry(ic, i, j);
+
journal_entry_set_unused(je);
}
}
@@ -2885,9 +2915,10 @@ static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
write_journal(ic, start_section, n_sections);
}
-static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, commit_id_t id)
+static int find_commit_seq(struct dm_integrity_c *ic, unsigned int i, unsigned int j, commit_id_t id)
{
unsigned char k;
+
for (k = 0; k < N_COMMIT_IDS; k++) {
if (dm_integrity_commit_id(ic, i, j, k) == id)
return k;
@@ -2898,11 +2929,11 @@ static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, co
static void replay_journal(struct dm_integrity_c *ic)
{
- unsigned i, j;
+ unsigned int i, j;
bool used_commit_ids[N_COMMIT_IDS];
- unsigned max_commit_id_sections[N_COMMIT_IDS];
- unsigned write_start, write_sections;
- unsigned continue_section;
+ unsigned int max_commit_id_sections[N_COMMIT_IDS];
+ unsigned int write_start, write_sections;
+ unsigned int continue_section;
bool journal_empty;
unsigned char unused, last_used, want_commit_seq;
@@ -2922,6 +2953,7 @@ static void replay_journal(struct dm_integrity_c *ic)
DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal");
if (ic->journal_io) {
struct journal_completion crypt_comp;
+
crypt_comp.ic = ic;
init_completion(&crypt_comp.comp);
crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0);
@@ -2935,12 +2967,13 @@ static void replay_journal(struct dm_integrity_c *ic)
goto clear_journal;
journal_empty = true;
- memset(used_commit_ids, 0, sizeof used_commit_ids);
- memset(max_commit_id_sections, 0, sizeof max_commit_id_sections);
+ memset(used_commit_ids, 0, sizeof(used_commit_ids));
+ memset(max_commit_id_sections, 0, sizeof(max_commit_id_sections));
for (i = 0; i < ic->journal_sections; i++) {
for (j = 0; j < ic->journal_section_sectors; j++) {
int k;
struct journal_sector *js = access_journal(ic, i, j);
+
k = find_commit_seq(ic, i, j, js->commit_id);
if (k < 0)
goto clear_journal;
@@ -2950,6 +2983,7 @@ static void replay_journal(struct dm_integrity_c *ic)
if (journal_empty) {
for (j = 0; j < ic->journal_section_entries; j++) {
struct journal_entry *je = access_journal_entry(ic, i, j);
+
if (!journal_entry_is_unused(je)) {
journal_empty = false;
break;
@@ -3020,8 +3054,9 @@ brk:
ic->commit_seq = want_commit_seq;
DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq);
} else {
- unsigned s;
+ unsigned int s;
unsigned char erase_seq;
+
clear_journal:
DEBUG_print("clearing journal\n");
@@ -3058,7 +3093,7 @@ clear_journal:
static void dm_integrity_enter_synchronous_mode(struct dm_integrity_c *ic)
{
- DEBUG_print("dm_integrity_enter_synchronous_mode\n");
+ DEBUG_print("%s\n", __func__);
if (ic->mode == 'B') {
ic->bitmap_flush_interval = msecs_to_jiffies(10) + 1;
@@ -3074,7 +3109,7 @@ static int dm_integrity_reboot(struct notifier_block *n, unsigned long code, voi
{
struct dm_integrity_c *ic = container_of(n, struct dm_integrity_c, reboot_notifier);
- DEBUG_print("dm_integrity_reboot\n");
+ DEBUG_print("%s\n", __func__);
dm_integrity_enter_synchronous_mode(ic);
@@ -3231,6 +3266,7 @@ static void dm_integrity_resume(struct dm_target *ti)
DEBUG_print("testing recalc: %x\n", ic->sb->flags);
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
__u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector);
+
DEBUG_print("recalc pos: %llx / %llx\n", recalc_pos, ic->provided_data_sectors);
if (recalc_pos < ic->provided_data_sectors) {
queue_work(ic->recalc_wq, &ic->recalc_work);
@@ -3252,10 +3288,10 @@ static void dm_integrity_resume(struct dm_target *ti)
}
static void dm_integrity_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
- unsigned arg_count;
+ unsigned int arg_count;
size_t sz = 0;
switch (type) {
@@ -3271,6 +3307,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
case STATUSTYPE_TABLE: {
__u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
+
watermark_percentage += ic->journal_entries / 2;
do_div(watermark_percentage, ic->journal_entries);
arg_count = 3;
@@ -3305,7 +3342,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
if (ic->mode == 'J') {
- DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage);
+ DMEMIT(" journal_watermark:%u", (unsigned int)watermark_percentage);
DMEMIT(" commit_time:%u", ic->autocommit_msec);
}
if (ic->mode == 'B') {
@@ -3384,7 +3421,7 @@ static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *lim
static void calculate_journal_section_size(struct dm_integrity_c *ic)
{
- unsigned sector_space = JOURNAL_SECTOR_DATA;
+ unsigned int sector_space = JOURNAL_SECTOR_DATA;
ic->journal_sections = le32_to_cpu(ic->sb->journal_sections);
ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size,
@@ -3430,6 +3467,7 @@ static int calculate_device_limits(struct dm_integrity_c *ic)
return -EINVAL;
} else {
__u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
+
meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1))
>> (ic->log2_buffer_sectors + SECTOR_SHIFT);
meta_size <<= ic->log2_buffer_sectors;
@@ -3447,6 +3485,7 @@ static void get_provided_data_sectors(struct dm_integrity_c *ic)
{
if (!ic->meta_dev) {
int test_bit;
+
ic->provided_data_sectors = 0;
for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) {
__u64 prev_data_sectors = ic->provided_data_sectors;
@@ -3461,9 +3500,10 @@ static void get_provided_data_sectors(struct dm_integrity_c *ic)
}
}
-static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors)
+static int initialize_superblock(struct dm_integrity_c *ic,
+ unsigned int journal_sectors, unsigned int interleave_sectors)
{
- unsigned journal_sections;
+ unsigned int journal_sections;
int test_bit;
memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT);
@@ -3490,8 +3530,8 @@ static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sec
if (!interleave_sectors)
interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
ic->sb->log2_interleave_sectors = __fls(interleave_sectors);
- ic->sb->log2_interleave_sectors = max((__u8)MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
- ic->sb->log2_interleave_sectors = min((__u8)MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
+ ic->sb->log2_interleave_sectors = max_t(__u8, MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
+ ic->sb->log2_interleave_sectors = min_t(__u8, MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
get_provided_data_sectors(ic);
if (!ic->provided_data_sectors)
@@ -3508,6 +3548,7 @@ try_smaller_buffer:
for (test_bit = fls(journal_sections) - 1; test_bit >= 0; test_bit--) {
__u32 prev_journal_sections = le32_to_cpu(ic->sb->journal_sections);
__u32 test_journal_sections = prev_journal_sections | (1U << test_bit);
+
if (test_journal_sections > journal_sections)
continue;
ic->sb->journal_sections = cpu_to_le32(test_journal_sections);
@@ -3548,7 +3589,7 @@ static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
static void dm_integrity_free_page_list(struct page_list *pl)
{
- unsigned i;
+ unsigned int i;
if (!pl)
return;
@@ -3557,10 +3598,10 @@ static void dm_integrity_free_page_list(struct page_list *pl)
kvfree(pl);
}
-static struct page_list *dm_integrity_alloc_page_list(unsigned n_pages)
+static struct page_list *dm_integrity_alloc_page_list(unsigned int n_pages)
{
struct page_list *pl;
- unsigned i;
+ unsigned int i;
pl = kvmalloc_array(n_pages + 1, sizeof(struct page_list), GFP_KERNEL | __GFP_ZERO);
if (!pl)
@@ -3583,7 +3624,8 @@ static struct page_list *dm_integrity_alloc_page_list(unsigned n_pages)
static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl)
{
- unsigned i;
+ unsigned int i;
+
for (i = 0; i < ic->journal_sections; i++)
kvfree(sl[i]);
kvfree(sl);
@@ -3593,7 +3635,7 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int
struct page_list *pl)
{
struct scatterlist **sl;
- unsigned i;
+ unsigned int i;
sl = kvmalloc_array(ic->journal_sections,
sizeof(struct scatterlist *),
@@ -3603,10 +3645,10 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int
for (i = 0; i < ic->journal_sections; i++) {
struct scatterlist *s;
- unsigned start_index, start_offset;
- unsigned end_index, end_offset;
- unsigned n_pages;
- unsigned idx;
+ unsigned int start_index, start_offset;
+ unsigned int end_index, end_offset;
+ unsigned int n_pages;
+ unsigned int idx;
page_list_location(ic, i, 0, &start_index, &start_offset);
page_list_location(ic, i, ic->journal_section_sectors - 1,
@@ -3624,7 +3666,8 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int
sg_init_table(s, n_pages);
for (idx = start_index; idx <= end_index; idx++) {
char *va = lowmem_page_address(pl[idx].page);
- unsigned start = 0, end = PAGE_SIZE;
+ unsigned int start = 0, end = PAGE_SIZE;
+
if (idx == start_index)
start = start_offset;
if (idx == end_index)
@@ -3642,7 +3685,7 @@ static void free_alg(struct alg_spec *a)
{
kfree_sensitive(a->alg_string);
kfree_sensitive(a->key);
- memset(a, 0, sizeof *a);
+ memset(a, 0, sizeof(*a));
}
static int get_alg_and_key(const char *arg, struct alg_spec *a, char **error, char *error_inval)
@@ -3711,7 +3754,7 @@ static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
static int create_journal(struct dm_integrity_c *ic, char **error)
{
int r = 0;
- unsigned i;
+ unsigned int i;
__u64 journal_pages, journal_desc_size, journal_tree_size;
unsigned char *crypt_data = NULL, *crypt_iv = NULL;
struct skcipher_request *req = NULL;
@@ -3738,7 +3781,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
goto bad;
}
if (ic->journal_crypt_alg.alg_string) {
- unsigned ivsize, blocksize;
+ unsigned int ivsize, blocksize;
struct journal_completion comp;
comp.ic = ic;
@@ -3805,13 +3848,14 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
sg_init_table(sg, ic->journal_pages + 1);
for (i = 0; i < ic->journal_pages; i++) {
char *va = lowmem_page_address(ic->journal_xor[i].page);
+
clear_page(va);
sg_set_buf(&sg[i], va, PAGE_SIZE);
}
- sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
+ sg_set_buf(&sg[i], &ic->commit_ids, sizeof(ic->commit_ids));
skcipher_request_set_crypt(req, sg, sg,
- PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
+ PAGE_SIZE * ic->journal_pages + sizeof(ic->commit_ids), crypt_iv);
init_completion(&comp.comp);
comp.in_flight = (atomic_t)ATOMIC_INIT(1);
if (do_crypt(true, req, &comp))
@@ -3827,7 +3871,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
crypto_free_skcipher(ic->journal_crypt);
ic->journal_crypt = NULL;
} else {
- unsigned crypt_len = roundup(ivsize, blocksize);
+ unsigned int crypt_len = roundup(ivsize, blocksize);
req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
if (!req) {
@@ -3877,7 +3921,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
memset(crypt_iv, 0x00, ivsize);
memset(crypt_data, 0x00, crypt_len);
- memcpy(crypt_data, &section_le, min((size_t)crypt_len, sizeof(section_le)));
+ memcpy(crypt_data, &section_le, min_t(size_t, crypt_len, sizeof(section_le)));
sg_init_one(&sg, crypt_data, crypt_len);
skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
@@ -3915,7 +3959,8 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
}
for (i = 0; i < N_COMMIT_IDS; i++) {
- unsigned j;
+ unsigned int j;
+
retest_commit_id:
for (j = 0; j < i; j++) {
if (ic->commit_ids[j] == ic->commit_ids[i]) {
@@ -3969,17 +4014,17 @@ bad:
* journal_mac
* recalculate
*/
-static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
+static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct dm_integrity_c *ic;
char dummy;
int r;
- unsigned extra_args;
+ unsigned int extra_args;
struct dm_arg_set as;
static const struct dm_arg _args[] = {
{0, 18, "Invalid number of feature args"},
};
- unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
+ unsigned int journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
bool should_write_sb;
__u64 threshold;
unsigned long long start;
@@ -4058,8 +4103,9 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
while (extra_args--) {
const char *opt_string;
- unsigned val;
+ unsigned int val;
unsigned long long llval;
+
opt_string = dm_shift_arg(&as);
if (!opt_string) {
r = -EINVAL;
@@ -4090,7 +4136,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
} else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) {
if (val < 1 << SECTOR_SHIFT ||
val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT ||
- (val & (val -1))) {
+ (val & (val - 1))) {
r = -EINVAL;
ti->error = "Invalid block_size argument";
goto bad;
@@ -4363,9 +4409,9 @@ try_smaller_buffer:
log2_blocks_per_bitmap_bit = log2_sectors_per_bitmap_bit - ic->sb->log2_sectors_per_block;
ic->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
- if (should_write_sb) {
+ if (should_write_sb)
ic->sb->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
- }
+
n_bitmap_bits = ((ic->provided_data_sectors >> ic->sb->log2_sectors_per_block)
+ (((sector_t)1 << log2_blocks_per_bitmap_bit) - 1)) >> log2_blocks_per_bitmap_bit;
ic->n_bitmap_blocks = DIV_ROUND_UP(n_bitmap_bits, BITMAP_BLOCK_SIZE * 8);
@@ -4391,7 +4437,7 @@ try_smaller_buffer:
DEBUG_print(" journal_entries_per_sector %u\n", ic->journal_entries_per_sector);
DEBUG_print(" journal_section_entries %u\n", ic->journal_section_entries);
DEBUG_print(" journal_section_sectors %u\n", ic->journal_section_sectors);
- DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
+ DEBUG_print(" journal_sections %u\n", (unsigned int)le32_to_cpu(ic->sb->journal_sections));
DEBUG_print(" journal_entries %u\n", ic->journal_entries);
DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
DEBUG_print(" data_device_sectors 0x%llx\n", bdev_nr_sectors(ic->dev->bdev));
@@ -4409,8 +4455,9 @@ try_smaller_buffer:
if (ic->internal_hash) {
size_t recalc_tags_size;
+
ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
- if (!ic->recalc_wq ) {
+ if (!ic->recalc_wq) {
ti->error = "Cannot allocate workqueue";
r = -ENOMEM;
goto bad;
@@ -4465,8 +4512,8 @@ try_smaller_buffer:
}
if (ic->mode == 'B') {
- unsigned i;
- unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
+ unsigned int i;
+ unsigned int n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
if (!ic->recalc_bitmap) {
@@ -4486,7 +4533,7 @@ try_smaller_buffer:
INIT_DELAYED_WORK(&ic->bitmap_flush_work, bitmap_flush_work);
for (i = 0; i < ic->n_bitmap_blocks; i++) {
struct bitmap_block_status *bbs = &ic->bbs[i];
- unsigned sector, pl_index, pl_offset;
+ unsigned int sector, pl_index, pl_offset;
INIT_WORK(&bbs->work, bitmap_block_work);
bbs->ic = ic;
@@ -4523,7 +4570,9 @@ try_smaller_buffer:
goto bad;
}
if (ic->mode == 'B') {
- unsigned max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8);
+ unsigned int max_io_len;
+
+ max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8);
if (!max_io_len)
max_io_len = 1U << 31;
DEBUG_print("max_io_len: old %u, new %u\n", ti->max_io_len, max_io_len);
@@ -4594,10 +4643,12 @@ static void dm_integrity_dtr(struct dm_target *ti)
if (ic->journal_io_scatterlist)
dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist);
if (ic->sk_requests) {
- unsigned i;
+ unsigned int i;
for (i = 0; i < ic->journal_sections; i++) {
- struct skcipher_request *req = ic->sk_requests[i];
+ struct skcipher_request *req;
+
+ req = ic->sk_requests[i];
if (req) {
kfree_sensitive(req->iv);
skcipher_request_free(req);
diff --git a/drivers/md/dm-io-rewind.c b/drivers/md/dm-io-rewind.c
index 0db53ccb94ba..6155b0117c9d 100644
--- a/drivers/md/dm-io-rewind.c
+++ b/drivers/md/dm-io-rewind.c
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022 Red Hat, Inc.
*/
@@ -57,7 +57,7 @@ static void dm_bio_integrity_rewind(struct bio *bio, unsigned int bytes_done)
{
struct bio_integrity_payload *bip = bio_integrity(bio);
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
- unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
+ unsigned int bytes = bio_integrity_bytes(bi, bytes_done >> 9);
bip->bip_iter.bi_sector -= bio_integrity_intervals(bi, bytes_done >> 9);
dm_bvec_iter_rewind(bip->bip_vec, &bip->bip_iter, bytes);
@@ -68,7 +68,6 @@ static void dm_bio_integrity_rewind(struct bio *bio, unsigned int bytes_done)
static inline void dm_bio_integrity_rewind(struct bio *bio,
unsigned int bytes_done)
{
- return;
}
#endif
@@ -104,7 +103,6 @@ static void dm_bio_crypt_rewind(struct bio *bio, unsigned int bytes)
static inline void dm_bio_crypt_rewind(struct bio *bio, unsigned int bytes)
{
- return;
}
#endif
@@ -131,7 +129,7 @@ static inline void dm_bio_rewind_iter(const struct bio *bio,
* rewinding from end of bio and restoring its original position.
* Caller is also responsibile for restoring bio's size.
*/
-static void dm_bio_rewind(struct bio *bio, unsigned bytes)
+static void dm_bio_rewind(struct bio *bio, unsigned int bytes)
{
if (bio_integrity(bio))
dm_bio_integrity_rewind(bio, bytes);
diff --git a/drivers/md/dm-io-tracker.h b/drivers/md/dm-io-tracker.h
index bdcc6273ebf0..bea1ca11855e 100644
--- a/drivers/md/dm-io-tracker.h
+++ b/drivers/md/dm-io-tracker.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2021 Red Hat, Inc. All rights reserved.
*
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 783564533459..dc2df76999b0 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2003 Sistina Software
* Copyright (C) 2006 Red Hat GmbH
@@ -38,7 +39,7 @@ struct io {
void *context;
void *vma_invalidate_address;
unsigned long vma_invalidate_size;
-} __attribute__((aligned(DM_IO_MAX_REGIONS)));
+} __aligned(DM_IO_MAX_REGIONS);
static struct kmem_cache *_dm_io_cache;
@@ -48,7 +49,7 @@ static struct kmem_cache *_dm_io_cache;
struct dm_io_client *dm_io_client_create(void)
{
struct dm_io_client *client;
- unsigned min_ios = dm_get_reserved_bio_based_ios();
+ unsigned int min_ios = dm_get_reserved_bio_based_ios();
int ret;
client = kzalloc(sizeof(*client), GFP_KERNEL);
@@ -65,7 +66,7 @@ struct dm_io_client *dm_io_client_create(void)
return client;
- bad:
+bad:
mempool_exit(&client->pool);
kfree(client);
return ERR_PTR(ret);
@@ -80,15 +81,17 @@ void dm_io_client_destroy(struct dm_io_client *client)
}
EXPORT_SYMBOL(dm_io_client_destroy);
-/*-----------------------------------------------------------------
+/*
+ *-------------------------------------------------------------------
* We need to keep track of which region a bio is doing io for.
* To avoid a memory allocation to store just 5 or 6 bits, we
* ensure the 'struct io' pointer is aligned so enough low bits are
* always zero and then combine it with the region number directly in
* bi_private.
- *---------------------------------------------------------------*/
+ *-------------------------------------------------------------------
+ */
static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
- unsigned region)
+ unsigned int region)
{
if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
DMCRIT("Unaligned struct io pointer %p", io);
@@ -99,7 +102,7 @@ static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
}
static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
- unsigned *region)
+ unsigned int *region)
{
unsigned long val = (unsigned long)bio->bi_private;
@@ -107,10 +110,12 @@ static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
*region = val & (DM_IO_MAX_REGIONS - 1);
}
-/*-----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* We need an io object to keep track of the number of bios that
* have been dispatched for a particular io.
- *---------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
static void complete_io(struct io *io)
{
unsigned long error_bits = io->error_bits;
@@ -137,7 +142,7 @@ static void dec_count(struct io *io, unsigned int region, blk_status_t error)
static void endio(struct bio *bio)
{
struct io *io;
- unsigned region;
+ unsigned int region;
blk_status_t error;
if (bio->bi_status && bio_data_dir(bio) == READ)
@@ -154,17 +159,19 @@ static void endio(struct bio *bio)
dec_count(io, region, error);
}
-/*-----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* These little objects provide an abstraction for getting a new
* destination page for io.
- *---------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
struct dpages {
void (*get_page)(struct dpages *dp,
- struct page **p, unsigned long *len, unsigned *offset);
+ struct page **p, unsigned long *len, unsigned int *offset);
void (*next_page)(struct dpages *dp);
union {
- unsigned context_u;
+ unsigned int context_u;
struct bvec_iter context_bi;
};
void *context_ptr;
@@ -177,9 +184,9 @@ struct dpages {
* Functions for getting the pages from a list.
*/
static void list_get_page(struct dpages *dp,
- struct page **p, unsigned long *len, unsigned *offset)
+ struct page **p, unsigned long *len, unsigned int *offset)
{
- unsigned o = dp->context_u;
+ unsigned int o = dp->context_u;
struct page_list *pl = (struct page_list *) dp->context_ptr;
*p = pl->page;
@@ -190,11 +197,12 @@ static void list_get_page(struct dpages *dp,
static void list_next_page(struct dpages *dp)
{
struct page_list *pl = (struct page_list *) dp->context_ptr;
+
dp->context_ptr = pl->next;
dp->context_u = 0;
}
-static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
+static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned int offset)
{
dp->get_page = list_get_page;
dp->next_page = list_next_page;
@@ -206,7 +214,7 @@ static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offse
* Functions for getting the pages from a bvec.
*/
static void bio_get_page(struct dpages *dp, struct page **p,
- unsigned long *len, unsigned *offset)
+ unsigned long *len, unsigned int *offset)
{
struct bio_vec bvec = bvec_iter_bvec((struct bio_vec *)dp->context_ptr,
dp->context_bi);
@@ -244,7 +252,7 @@ static void bio_dp_init(struct dpages *dp, struct bio *bio)
* Functions for getting the pages from a VMA.
*/
static void vm_get_page(struct dpages *dp,
- struct page **p, unsigned long *len, unsigned *offset)
+ struct page **p, unsigned long *len, unsigned int *offset)
{
*p = vmalloc_to_page(dp->context_ptr);
*offset = dp->context_u;
@@ -269,7 +277,7 @@ static void vm_dp_init(struct dpages *dp, void *data)
* Functions for getting the pages from kernel memory.
*/
static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
- unsigned *offset)
+ unsigned int *offset)
{
*p = virt_to_page(dp->context_ptr);
*offset = dp->context_u;
@@ -290,18 +298,20 @@ static void km_dp_init(struct dpages *dp, void *data)
dp->context_ptr = data;
}
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* IO routines that accept a list of pages.
- *---------------------------------------------------------------*/
-static void do_region(const blk_opf_t opf, unsigned region,
+ *---------------------------------------------------------------
+ */
+static void do_region(const blk_opf_t opf, unsigned int region,
struct dm_io_region *where, struct dpages *dp,
struct io *io)
{
struct bio *bio;
struct page *page;
unsigned long len;
- unsigned offset;
- unsigned num_bvecs;
+ unsigned int offset;
+ unsigned int num_bvecs;
sector_t remaining = where->count;
struct request_queue *q = bdev_get_queue(where->bdev);
sector_t num_sectors;
@@ -350,18 +360,20 @@ static void do_region(const blk_opf_t opf, unsigned region,
num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
remaining -= num_sectors;
- } else while (remaining) {
- /*
- * Try and add as many pages as possible.
- */
- dp->get_page(dp, &page, &len, &offset);
- len = min(len, to_bytes(remaining));
- if (!bio_add_page(bio, page, len, offset))
- break;
-
- offset = 0;
- remaining -= to_sector(len);
- dp->next_page(dp);
+ } else {
+ while (remaining) {
+ /*
+ * Try and add as many pages as possible.
+ */
+ dp->get_page(dp, &page, &len, &offset);
+ len = min(len, to_bytes(remaining));
+ if (!bio_add_page(bio, page, len, offset))
+ break;
+
+ offset = 0;
+ remaining -= to_sector(len);
+ dp->next_page(dp);
+ }
}
atomic_inc(&io->count);
@@ -508,7 +520,7 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
return 0;
}
-int dm_io(struct dm_io_request *io_req, unsigned num_regions,
+int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
struct dm_io_region *where, unsigned long *sync_error_bits)
{
int r;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 36fc6ae4737a..50a1259294d1 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
* Copyright (C) 2004 - 2006 Red Hat, Inc. All rights reserved.
@@ -31,13 +32,15 @@ struct dm_file {
* poll will wait until the global event number is greater than
* this value.
*/
- volatile unsigned global_event_nr;
+ volatile unsigned int global_event_nr;
};
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* The ioctl interface needs to be able to look up devices by
* name or uuid.
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
struct hash_cell {
struct rb_node name_node;
struct rb_node uuid_node;
@@ -51,10 +54,10 @@ struct hash_cell {
};
struct vers_iter {
- size_t param_size;
- struct dm_target_versions *vers, *old_vers;
- char *end;
- uint32_t flags;
+ size_t param_size;
+ struct dm_target_versions *vers, *old_vers;
+ char *end;
+ uint32_t flags;
};
@@ -78,16 +81,20 @@ static void dm_hash_exit(void)
dm_hash_remove_all(false, false, false);
}
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Code for looking up a device by name
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
static struct hash_cell *__get_name_cell(const char *str)
{
struct rb_node *n = name_rb_tree.rb_node;
while (n) {
struct hash_cell *hc = container_of(n, struct hash_cell, name_node);
- int c = strcmp(hc->name, str);
+ int c;
+
+ c = strcmp(hc->name, str);
if (!c) {
dm_get(hc->md);
return hc;
@@ -104,7 +111,9 @@ static struct hash_cell *__get_uuid_cell(const char *str)
while (n) {
struct hash_cell *hc = container_of(n, struct hash_cell, uuid_node);
- int c = strcmp(hc->uuid, str);
+ int c;
+
+ c = strcmp(hc->uuid, str);
if (!c) {
dm_get(hc->md);
return hc;
@@ -144,7 +153,9 @@ static void __link_name(struct hash_cell *new_hc)
while (*n) {
struct hash_cell *hc = container_of(*n, struct hash_cell, name_node);
- int c = strcmp(hc->name, new_hc->name);
+ int c;
+
+ c = strcmp(hc->name, new_hc->name);
BUG_ON(!c);
parent = *n;
n = c >= 0 ? &hc->name_node.rb_left : &hc->name_node.rb_right;
@@ -167,7 +178,9 @@ static void __link_uuid(struct hash_cell *new_hc)
while (*n) {
struct hash_cell *hc = container_of(*n, struct hash_cell, uuid_node);
- int c = strcmp(hc->uuid, new_hc->uuid);
+ int c;
+
+ c = strcmp(hc->uuid, new_hc->uuid);
BUG_ON(!c);
parent = *n;
n = c > 0 ? &hc->uuid_node.rb_left : &hc->uuid_node.rb_right;
@@ -195,9 +208,11 @@ static struct hash_cell *__get_dev_cell(uint64_t dev)
return hc;
}
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Inserting, removing and renaming a device.
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
static struct hash_cell *alloc_cell(const char *name, const char *uuid,
struct mapped_device *md)
{
@@ -295,6 +310,8 @@ static struct dm_table *__hash_remove(struct hash_cell *hc)
struct dm_table *table;
int srcu_idx;
+ lockdep_assert_held(&_hash_lock);
+
/* remove from the dev trees */
__unlink_name(hc);
__unlink_uuid(hc);
@@ -413,7 +430,7 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
struct hash_cell *hc;
struct dm_table *table;
struct mapped_device *md;
- unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
+ unsigned int change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
int srcu_idx;
/*
@@ -434,8 +451,7 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
hc = __get_name_cell(new);
if (hc) {
- DMERR("Unable to change %s on mapped device %s to one that "
- "already exists: %s",
+ DMERR("Unable to change %s on mapped device %s to one that already exists: %s",
change_uuid ? "uuid" : "name",
param->name, new);
dm_put(hc->md);
@@ -482,7 +498,7 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
dm_table_event(table);
dm_put_live_table(hc->md, srcu_idx);
- if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, param->event_nr))
+ if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, param->event_nr, false))
param->flags |= DM_UEVENT_GENERATED_FLAG;
md = hc->md;
@@ -500,9 +516,11 @@ void dm_deferred_remove(void)
dm_hash_remove_all(true, false, true);
}
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Implementation of the ioctl commands
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
/*
* All the ioctl commands get dispatched to functions with this
* prototype.
@@ -612,6 +630,7 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
*/
for (n = rb_first(&name_rb_tree); n; n = rb_next(n)) {
void *uuid_ptr;
+
hc = container_of(n, struct hash_cell, name_node);
if (!filter_device(hc, param->name, param->uuid))
continue;
@@ -652,36 +671,34 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
static void list_version_get_needed(struct target_type *tt, void *needed_param)
{
- size_t *needed = needed_param;
+ size_t *needed = needed_param;
- *needed += sizeof(struct dm_target_versions);
- *needed += strlen(tt->name) + 1;
- *needed += ALIGN_MASK;
+ *needed += sizeof(struct dm_target_versions);
+ *needed += strlen(tt->name) + 1;
+ *needed += ALIGN_MASK;
}
static void list_version_get_info(struct target_type *tt, void *param)
{
- struct vers_iter *info = param;
+ struct vers_iter *info = param;
- /* Check space - it might have changed since the first iteration */
- if ((char *)info->vers + sizeof(tt->version) + strlen(tt->name) + 1 >
- info->end) {
+ /* Check space - it might have changed since the first iteration */
+ if ((char *)info->vers + sizeof(tt->version) + strlen(tt->name) + 1 > info->end) {
+ info->flags = DM_BUFFER_FULL_FLAG;
+ return;
+ }
- info->flags = DM_BUFFER_FULL_FLAG;
- return;
- }
+ if (info->old_vers)
+ info->old_vers->next = (uint32_t) ((void *)info->vers - (void *)info->old_vers);
- if (info->old_vers)
- info->old_vers->next = (uint32_t) ((void *)info->vers -
- (void *)info->old_vers);
- info->vers->version[0] = tt->version[0];
- info->vers->version[1] = tt->version[1];
- info->vers->version[2] = tt->version[2];
- info->vers->next = 0;
- strcpy(info->vers->name, tt->name);
+ info->vers->version[0] = tt->version[0];
+ info->vers->version[1] = tt->version[1];
+ info->vers->version[2] = tt->version[2];
+ info->vers->next = 0;
+ strcpy(info->vers->name, tt->name);
- info->old_vers = info->vers;
- info->vers = align_ptr((void *)(info->vers + 1) + strlen(tt->name) + 1);
+ info->old_vers = info->vers;
+ info->vers = align_ptr((void *)(info->vers + 1) + strlen(tt->name) + 1);
}
static int __list_versions(struct dm_ioctl *param, size_t param_size, const char *name)
@@ -772,7 +789,7 @@ static struct dm_table *dm_get_inactive_table(struct mapped_device *md, int *src
down_read(&_hash_lock);
hc = dm_get_mdptr(md);
- if (!hc || hc->md != md) {
+ if (!hc) {
DMERR("device has been removed from the dev hash table.");
goto out;
}
@@ -841,6 +858,7 @@ static void __dev_status(struct mapped_device *md, struct dm_ioctl *param)
if (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) {
int srcu_idx;
+
table = dm_get_inactive_table(md, &srcu_idx);
if (table) {
if (!(dm_table_get_mode(table) & FMODE_WRITE))
@@ -921,9 +939,9 @@ static struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param)
* Sneakily write in both the name and the uuid
* while we have the cell.
*/
- strlcpy(param->name, hc->name, sizeof(param->name));
+ strscpy(param->name, hc->name, sizeof(param->name));
if (hc->uuid)
- strlcpy(param->uuid, hc->uuid, sizeof(param->uuid));
+ strscpy(param->uuid, hc->uuid, sizeof(param->uuid));
else
param->uuid[0] = '\0';
@@ -995,7 +1013,7 @@ static int dev_remove(struct file *filp, struct dm_ioctl *param, size_t param_si
dm_ima_measure_on_device_remove(md, false);
- if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr))
+ if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr, false))
param->flags |= DM_UEVENT_GENERATED_FLAG;
dm_put(md);
@@ -1021,7 +1039,7 @@ static int dev_rename(struct file *filp, struct dm_ioctl *param, size_t param_si
int r;
char *new_data = (char *) param + param->data_start;
struct mapped_device *md;
- unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
+ unsigned int change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
if (new_data < param->data ||
invalid_str(new_data, (void *) param + param_size) || !*new_data ||
@@ -1073,8 +1091,7 @@ static int dev_set_geometry(struct file *filp, struct dm_ioctl *param, size_t pa
goto out;
}
- if (indata[0] > 65535 || indata[1] > 255 ||
- indata[2] > 255 || indata[3] > ULONG_MAX) {
+ if (indata[0] > 65535 || indata[1] > 255 || indata[2] > 255) {
DMERR("Geometry exceeds range limits.");
goto out;
}
@@ -1096,7 +1113,7 @@ out:
static int do_suspend(struct dm_ioctl *param)
{
int r = 0;
- unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG;
+ unsigned int suspend_flags = DM_SUSPEND_LOCKFS_FLAG;
struct mapped_device *md;
md = find_device(param);
@@ -1125,10 +1142,11 @@ out:
static int do_resume(struct dm_ioctl *param)
{
int r = 0;
- unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG;
+ unsigned int suspend_flags = DM_SUSPEND_LOCKFS_FLAG;
struct hash_cell *hc;
struct mapped_device *md;
struct dm_table *new_map, *old_map = NULL;
+ bool need_resize_uevent = false;
down_write(&_hash_lock);
@@ -1149,6 +1167,8 @@ static int do_resume(struct dm_ioctl *param)
/* Do we need to load a new map ? */
if (new_map) {
+ sector_t old_size, new_size;
+
/* Suspend if it isn't already suspended */
if (param->flags & DM_SKIP_LOCKFS_FLAG)
suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
@@ -1157,6 +1177,7 @@ static int do_resume(struct dm_ioctl *param)
if (!dm_suspended_md(md))
dm_suspend(md, suspend_flags);
+ old_size = dm_get_size(md);
old_map = dm_swap_table(md, new_map);
if (IS_ERR(old_map)) {
dm_sync_table(md);
@@ -1164,6 +1185,9 @@ static int do_resume(struct dm_ioctl *param)
dm_put(md);
return PTR_ERR(old_map);
}
+ new_size = dm_get_size(md);
+ if (old_size && new_size && old_size != new_size)
+ need_resize_uevent = true;
if (dm_table_get_mode(new_map) & FMODE_WRITE)
set_disk_ro(dm_disk(md), 0);
@@ -1176,7 +1200,7 @@ static int do_resume(struct dm_ioctl *param)
if (!r) {
dm_ima_measure_on_device_resume(md, new_map ? true : false);
- if (!dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr))
+ if (!dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr, need_resize_uevent))
param->flags |= DM_UEVENT_GENERATED_FLAG;
}
}
@@ -1236,7 +1260,7 @@ static void retrieve_status(struct dm_table *table,
char *outbuf, *outptr;
status_type_t type;
size_t remaining, len, used = 0;
- unsigned status_flags = 0;
+ unsigned int status_flags = 0;
outptr = outbuf = get_result_buffer(param, param_size, &len);
@@ -1387,7 +1411,7 @@ static int populate_table(struct dm_table *table,
char *target_params;
if (!param->target_count) {
- DMERR("populate_table: no targets specified");
+ DMERR("%s: no targets specified", __func__);
return -EINVAL;
}
@@ -1476,7 +1500,7 @@ static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_si
/* stage inactive table */
down_write(&_hash_lock);
hc = dm_get_mdptr(md);
- if (!hc || hc->md != md) {
+ if (!hc) {
DMERR("device has been removed from the dev hash table.");
up_write(&_hash_lock);
r = -ENXIO;
@@ -1564,7 +1588,7 @@ static void retrieve_deps(struct dm_table *table,
/*
* Count the devices.
*/
- list_for_each (tmp, dm_table_get_devices(table))
+ list_for_each(tmp, dm_table_get_devices(table))
count++;
/*
@@ -1581,7 +1605,7 @@ static void retrieve_deps(struct dm_table *table,
*/
deps->count = count;
count = 0;
- list_for_each_entry (dd, dm_table_get_devices(table), list)
+ list_for_each_entry(dd, dm_table_get_devices(table), list)
deps->dev[count++] = huge_encode_dev(dd->dm_dev->bdev->bd_dev);
param->data_size = param->data_start + needed;
@@ -1641,8 +1665,8 @@ static int table_status(struct file *filp, struct dm_ioctl *param, size_t param_
* Returns a number <= 1 if message was processed by device mapper.
* Returns 2 if message should be delivered to the target.
*/
-static int message_for_md(struct mapped_device *md, unsigned argc, char **argv,
- char *result, unsigned maxlen)
+static int message_for_md(struct mapped_device *md, unsigned int argc, char **argv,
+ char *result, unsigned int maxlen)
{
int r;
@@ -1757,10 +1781,11 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para
#define IOCTL_FLAGS_NO_PARAMS 1
#define IOCTL_FLAGS_ISSUE_GLOBAL_EVENT 2
-/*-----------------------------------------------------------------
- * Implementation of open/close/ioctl on the special char
- * device.
- *---------------------------------------------------------------*/
+/*
+ *---------------------------------------------------------------
+ * Implementation of open/close/ioctl on the special char device.
+ *---------------------------------------------------------------
+ */
static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags)
{
static const struct {
@@ -1812,10 +1837,9 @@ static int check_version(unsigned int cmd, struct dm_ioctl __user *user)
if (copy_from_user(version, user->version, sizeof(version)))
return -EFAULT;
- if ((DM_VERSION_MAJOR != version[0]) ||
- (DM_VERSION_MINOR < version[1])) {
- DMERR("ioctl interface mismatch: "
- "kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)",
+ if ((version[0] != DM_VERSION_MAJOR) ||
+ (version[1] > DM_VERSION_MINOR)) {
+ DMERR("ioctl interface mismatch: kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)",
DM_VERSION_MAJOR, DM_VERSION_MINOR,
DM_VERSION_PATCHLEVEL,
version[0], version[1], version[2], cmd);
@@ -1852,7 +1876,7 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
struct dm_ioctl *dmi;
int secure_data;
const size_t minimum_data_size = offsetof(struct dm_ioctl, data);
- unsigned noio_flag;
+ unsigned int noio_flag;
if (copy_from_user(param_kernel, user, minimum_data_size))
return -EFAULT;
@@ -2079,9 +2103,9 @@ static const struct file_operations _ctl_fops = {
static struct miscdevice _dm_misc = {
.minor = MAPPER_CTRL_MINOR,
- .name = DM_NAME,
+ .name = DM_NAME,
.nodename = DM_DIR "/" DM_CONTROL_NODE,
- .fops = &_ctl_fops
+ .fops = &_ctl_fops
};
MODULE_ALIAS_MISCDEV(MAPPER_CTRL_MINOR);
@@ -2128,7 +2152,7 @@ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid)
mutex_lock(&dm_hash_cells_mutex);
hc = dm_get_mdptr(md);
- if (!hc || hc->md != md) {
+ if (!hc) {
r = -ENXIO;
goto out;
}
@@ -2241,7 +2265,9 @@ int __init dm_early_create(struct dm_ioctl *dmi,
err_destroy_table:
dm_table_destroy(t);
err_hash_remove:
+ down_write(&_hash_lock);
(void) __hash_remove(__get_name_cell(dmi->name));
+ up_write(&_hash_lock);
/* release reference from __get_name_cell */
dm_put(md);
err_destroy_dm:
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 4d3bbbea2e9a..a158c6e5fbd7 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2002 Sistina Software (UK) Limited.
* Copyright (C) 2006 Red Hat GmbH
@@ -34,14 +35,14 @@
#define DEFAULT_SUB_JOB_SIZE_KB 512
#define MAX_SUB_JOB_SIZE_KB 1024
-static unsigned kcopyd_subjob_size_kb = DEFAULT_SUB_JOB_SIZE_KB;
+static unsigned int kcopyd_subjob_size_kb = DEFAULT_SUB_JOB_SIZE_KB;
-module_param(kcopyd_subjob_size_kb, uint, S_IRUGO | S_IWUSR);
+module_param(kcopyd_subjob_size_kb, uint, 0644);
MODULE_PARM_DESC(kcopyd_subjob_size_kb, "Sub-job size for dm-kcopyd clients");
-static unsigned dm_get_kcopyd_subjob_size(void)
+static unsigned int dm_get_kcopyd_subjob_size(void)
{
- unsigned sub_job_size_kb;
+ unsigned int sub_job_size_kb;
sub_job_size_kb = __dm_get_module_param(&kcopyd_subjob_size_kb,
DEFAULT_SUB_JOB_SIZE_KB,
@@ -50,15 +51,17 @@ static unsigned dm_get_kcopyd_subjob_size(void)
return sub_job_size_kb << 1;
}
-/*-----------------------------------------------------------------
+/*
+ *----------------------------------------------------------------
* Each kcopyd client has its own little pool of preallocated
* pages for kcopyd io.
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
struct dm_kcopyd_client {
struct page_list *pages;
- unsigned nr_reserved_pages;
- unsigned nr_free_pages;
- unsigned sub_job_size;
+ unsigned int nr_reserved_pages;
+ unsigned int nr_free_pages;
+ unsigned int sub_job_size;
struct dm_io_client *io_client;
@@ -109,7 +112,7 @@ static DEFINE_SPINLOCK(throttle_spinlock);
* The reason for this is unknown but possibly due to jiffies rounding errors
* or read/write cache inside the disk.
*/
-#define SLEEP_MSEC 100
+#define SLEEP_USEC 100000
/*
* Maximum number of sleep events. There is a theoretical livelock if more
@@ -119,7 +122,7 @@ static DEFINE_SPINLOCK(throttle_spinlock);
static void io_job_start(struct dm_kcopyd_throttle *t)
{
- unsigned throttle, now, difference;
+ unsigned int throttle, now, difference;
int slept = 0, skew;
if (unlikely(!t))
@@ -148,6 +151,7 @@ try_again:
if (unlikely(t->total_period >= (1 << ACCOUNT_INTERVAL_SHIFT))) {
int shift = fls(t->total_period >> ACCOUNT_INTERVAL_SHIFT);
+
t->total_period >>= shift;
t->io_period >>= shift;
}
@@ -157,7 +161,7 @@ try_again:
if (unlikely(skew > 0) && slept < MAX_SLEEPS) {
slept++;
spin_unlock_irq(&throttle_spinlock);
- msleep(SLEEP_MSEC);
+ fsleep(SLEEP_USEC);
goto try_again;
}
@@ -182,7 +186,7 @@ static void io_job_finish(struct dm_kcopyd_throttle *t)
goto skip_limit;
if (!t->num_io_jobs) {
- unsigned now, difference;
+ unsigned int now, difference;
now = jiffies;
difference = now - t->last_jiffies;
@@ -303,9 +307,9 @@ static void drop_pages(struct page_list *pl)
/*
* Allocate and reserve nr_pages for the use of a specific client.
*/
-static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned nr_pages)
+static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned int nr_pages)
{
- unsigned i;
+ unsigned int i;
struct page_list *pl = NULL, *next;
for (i = 0; i < nr_pages; i++) {
@@ -333,15 +337,17 @@ static void client_free_pages(struct dm_kcopyd_client *kc)
kc->nr_free_pages = kc->nr_reserved_pages = 0;
}
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* kcopyd_jobs need to be allocated by the *clients* of kcopyd,
* for this reason we use a mempool to prevent the client from
* ever having to do io (which could cause a deadlock).
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
struct kcopyd_job {
struct dm_kcopyd_client *kc;
struct list_head list;
- unsigned flags;
+ unsigned int flags;
/*
* Error state of the job.
@@ -582,7 +588,7 @@ static int run_io_job(struct kcopyd_job *job)
static int run_pages_job(struct kcopyd_job *job)
{
int r;
- unsigned nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9);
+ unsigned int nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9);
r = kcopyd_get_pages(job->kc, nr_pages, &job->pages);
if (!r) {
@@ -603,7 +609,7 @@ static int run_pages_job(struct kcopyd_job *job)
* of successful jobs.
*/
static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
- int (*fn) (struct kcopyd_job *))
+ int (*fn)(struct kcopyd_job *))
{
struct kcopyd_job *job;
int r, count = 0;
@@ -673,6 +679,7 @@ static void do_work(struct work_struct *work)
static void dispatch_job(struct kcopyd_job *job)
{
struct dm_kcopyd_client *kc = job->kc;
+
atomic_inc(&kc->nr_jobs);
if (unlikely(!job->source.count))
push(&kc->callback_jobs, job);
@@ -819,7 +826,7 @@ void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
job->pages = NULL;
job->op = REQ_OP_READ;
} else {
- memset(&job->source, 0, sizeof job->source);
+ memset(&job->source, 0, sizeof(job->source));
job->source.count = job->dests[0].count;
job->pages = &zero_page_list;
@@ -849,8 +856,8 @@ void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
EXPORT_SYMBOL(dm_kcopyd_copy);
void dm_kcopyd_zero(struct dm_kcopyd_client *kc,
- unsigned num_dests, struct dm_io_region *dests,
- unsigned flags, dm_kcopyd_notify_fn fn, void *context)
+ unsigned int num_dests, struct dm_io_region *dests,
+ unsigned int flags, dm_kcopyd_notify_fn fn, void *context)
{
dm_kcopyd_copy(kc, NULL, num_dests, dests, flags, fn, context);
}
@@ -900,13 +907,15 @@ int kcopyd_cancel(struct kcopyd_job *job, int block)
}
#endif /* 0 */
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Client setup
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *throttle)
{
int r;
- unsigned reserve_pages;
+ unsigned int reserve_pages;
struct dm_kcopyd_client *kc;
kc = kzalloc(sizeof(*kc), GFP_KERNEL);
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 3212ef6aa81b..3e622dcc9dbd 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2001-2003 Sistina Software (UK) Limited.
*
@@ -64,7 +65,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->private = lc;
return 0;
- bad:
+bad:
kfree(lc);
return ret;
}
@@ -95,7 +96,7 @@ static int linear_map(struct dm_target *ti, struct bio *bio)
}
static void linear_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
struct linear_c *lc = (struct linear_c *) ti->private;
size_t sz = 0;
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c
index 9ab93ebea889..5aace6ee6d47 100644
--- a/drivers/md/dm-log-userspace-base.c
+++ b/drivers/md/dm-log-userspace-base.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2006-2009 Red Hat, Inc.
*
@@ -123,7 +124,7 @@ retry:
}
static int build_constructor_string(struct dm_target *ti,
- unsigned argc, char **argv,
+ unsigned int argc, char **argv,
char **ctr_str)
{
int i, str_size;
@@ -188,7 +189,7 @@ static void do_flush(struct work_struct *work)
* to the userspace ctr function.
*/
static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
- unsigned argc, char **argv)
+ unsigned int argc, char **argv)
{
int r = 0;
int str_size;
@@ -345,8 +346,6 @@ static void userspace_dtr(struct dm_dirty_log *log)
kfree(lc->usr_argv_str);
kfree(lc);
-
- return;
}
static int userspace_presuspend(struct dm_dirty_log *log)
@@ -660,8 +659,6 @@ static void userspace_mark_region(struct dm_dirty_log *log, region_t region)
fe->region = region;
list_add(&fe->list, &lc->mark_list);
spin_unlock_irqrestore(&lc->flush_lock, flags);
-
- return;
}
/*
@@ -697,8 +694,6 @@ static void userspace_clear_region(struct dm_dirty_log *log, region_t region)
fe->region = region;
list_add(&fe->list, &lc->clear_list);
spin_unlock_irqrestore(&lc->flush_lock, flags);
-
- return;
}
/*
@@ -755,7 +750,6 @@ static void userspace_set_region_sync(struct dm_dirty_log *log,
* It would be nice to be able to report failures.
* However, it is easy enough to detect and resolve.
*/
- return;
}
/*
@@ -792,7 +786,7 @@ static region_t userspace_get_sync_count(struct dm_dirty_log *log)
* Returns: amount of space consumed
*/
static int userspace_status(struct dm_dirty_log *log, status_type_t status_type,
- char *result, unsigned maxlen)
+ char *result, unsigned int maxlen)
{
int r = 0;
char *table_args;
@@ -926,7 +920,6 @@ static void __exit userspace_dirty_log_exit(void)
kmem_cache_destroy(_flush_entry_cache);
DMINFO("version " DM_LOG_USERSPACE_VSN " unloaded");
- return;
}
module_init(userspace_dirty_log_init);
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
index fdf8ec304f8d..f125b0f553fa 100644
--- a/drivers/md/dm-log-userspace-transfer.c
+++ b/drivers/md/dm-log-userspace-transfer.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2006-2009 Red Hat, Inc.
*
@@ -108,9 +109,8 @@ static int fill_pkg(struct cn_msg *msg, struct dm_ulog_request *tfr)
if (pkg->error != -EAGAIN)
*(pkg->data_size) = 0;
} else if (tfr->data_size > *(pkg->data_size)) {
- DMERR("Insufficient space to receive package [%u] "
- "(%u vs %zu)", tfr->request_type,
- tfr->data_size, *(pkg->data_size));
+ DMERR("Insufficient space to receive package [%u] (%u vs %zu)",
+ tfr->request_type, tfr->data_size, *(pkg->data_size));
*(pkg->data_size) = 0;
pkg->error = -ENOSPC;
@@ -142,7 +142,7 @@ static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
fill_pkg(msg, NULL);
else if (msg->len < sizeof(*tfr))
DMERR("Incomplete message received (expected %u, got %u): [%u]",
- (unsigned)sizeof(*tfr), msg->len, msg->seq);
+ (unsigned int)sizeof(*tfr), msg->len, msg->seq);
else
fill_pkg(NULL, tfr);
spin_unlock(&receiving_list_lock);
diff --git a/drivers/md/dm-log-userspace-transfer.h b/drivers/md/dm-log-userspace-transfer.h
index 04ee874f9153..f2f970af906f 100644
--- a/drivers/md/dm-log-userspace-transfer.h
+++ b/drivers/md/dm-log-userspace-transfer.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2006-2009 Red Hat, Inc.
*
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 178e13a5b059..cbd0f81f4a35 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014 Facebook. All rights reserved.
*
@@ -231,13 +232,13 @@ static int write_metadata(struct log_writes_c *lc, void *entry,
goto error;
}
- ptr = kmap_atomic(page);
+ ptr = kmap_local_page(page);
memcpy(ptr, entry, entrylen);
if (datalen)
memcpy(ptr + entrylen, data, datalen);
memset(ptr + entrylen + datalen, 0,
lc->sectorsize - entrylen - datalen);
- kunmap_atomic(ptr);
+ kunmap_local(ptr);
ret = bio_add_page(bio, page, lc->sectorsize, 0);
if (ret != lc->sectorsize) {
@@ -286,11 +287,11 @@ static int write_inline_data(struct log_writes_c *lc, void *entry,
goto error_bio;
}
- ptr = kmap_atomic(page);
+ ptr = kmap_local_page(page);
memcpy(ptr, data, pg_datalen);
if (pg_sectorlen > pg_datalen)
memset(ptr + pg_datalen, 0, pg_sectorlen - pg_datalen);
- kunmap_atomic(ptr);
+ kunmap_local(ptr);
ret = bio_add_page(bio, page, pg_sectorlen, 0);
if (ret != pg_sectorlen) {
@@ -742,9 +743,9 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_KILL;
}
- dst = kmap_atomic(page);
+ dst = kmap_local_page(page);
memcpy_from_bvec(dst, &bv);
- kunmap_atomic(dst);
+ kunmap_local(dst);
block->vecs[i].bv_page = page;
block->vecs[i].bv_len = bv.bv_len;
block->vec_cnt++;
@@ -792,10 +793,10 @@ static int normal_end_io(struct dm_target *ti, struct bio *bio,
* INFO format: <logged entries> <highest allocated sector>
*/
static void log_writes_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result,
- unsigned maxlen)
+ unsigned int status_flags, char *result,
+ unsigned int maxlen)
{
- unsigned sz = 0;
+ unsigned int sz = 0;
struct log_writes_c *lc = ti->private;
switch (type) {
@@ -844,8 +845,8 @@ static int log_writes_iterate_devices(struct dm_target *ti,
* Messages supported:
* mark <mark data> - specify the marked data.
*/
-static int log_writes_message(struct dm_target *ti, unsigned argc, char **argv,
- char *result, unsigned maxlen)
+static int log_writes_message(struct dm_target *ti, unsigned int argc, char **argv,
+ char *result, unsigned int maxlen)
{
int r = -EINVAL;
struct log_writes_c *lc = ti->private;
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index cf10fa667797..afd94d2e7295 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2003 Sistina Software
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
@@ -182,10 +183,12 @@ void dm_dirty_log_destroy(struct dm_dirty_log *log)
}
EXPORT_SYMBOL(dm_dirty_log_destroy);
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Persistent and core logs share a lot of their implementation.
* FIXME: need a reload method to be called from a resume
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
/*
* Magic for persistent mirrors: "MiRr"
*/
@@ -223,7 +226,7 @@ struct log_c {
unsigned int region_count;
region_t sync_count;
- unsigned bitset_uint32_count;
+ unsigned int bitset_uint32_count;
uint32_t *clean_bits;
uint32_t *sync_bits;
uint32_t *recovering_bits; /* FIXME: this seems excessive */
@@ -255,28 +258,30 @@ struct log_c {
* The touched member needs to be updated every time we access
* one of the bitsets.
*/
-static inline int log_test_bit(uint32_t *bs, unsigned bit)
+static inline int log_test_bit(uint32_t *bs, unsigned int bit)
{
return test_bit_le(bit, bs) ? 1 : 0;
}
static inline void log_set_bit(struct log_c *l,
- uint32_t *bs, unsigned bit)
+ uint32_t *bs, unsigned int bit)
{
__set_bit_le(bit, bs);
l->touched_cleaned = 1;
}
static inline void log_clear_bit(struct log_c *l,
- uint32_t *bs, unsigned bit)
+ uint32_t *bs, unsigned int bit)
{
__clear_bit_le(bit, bs);
l->touched_dirtied = 1;
}
-/*----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Header IO
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
static void header_to_disk(struct log_header_core *core, struct log_header_disk *disk)
{
disk->magic = cpu_to_le32(core->magic);
@@ -352,11 +357,13 @@ static int _check_region_size(struct dm_target *ti, uint32_t region_size)
return 1;
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* core log constructor/destructor
*
* argv contains region_size followed optionally by [no]sync
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
#define BYTE_SHIFT 3
static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
unsigned int argc, char **argv,
@@ -382,8 +389,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
else if (!strcmp(argv[1], "nosync"))
sync = NOSYNC;
else {
- DMWARN("unrecognised sync argument to "
- "dirty region log: %s", argv[1]);
+ DMWARN("unrecognised sync argument to dirty region log: %s", argv[1]);
return -EINVAL;
}
}
@@ -441,8 +447,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
*/
buf_size =
dm_round_up((LOG_OFFSET << SECTOR_SHIFT) + bitset_size,
- bdev_logical_block_size(lc->header_location.
- bdev));
+ bdev_logical_block_size(lc->header_location.bdev));
if (buf_size > bdev_nr_bytes(dev->bdev)) {
DMWARN("log device %s too small: need %llu bytes",
@@ -531,11 +536,13 @@ static void core_dtr(struct dm_dirty_log *log)
destroy_log_context(lc);
}
-/*----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------------
* disk log constructor/destructor
*
* argv contains log_device region_size followed optionally by [no]sync
- *--------------------------------------------------------------*/
+ *---------------------------------------------------------------------
+ */
static int disk_ctr(struct dm_dirty_log *log, struct dm_target *ti,
unsigned int argc, char **argv)
{
@@ -582,7 +589,7 @@ static void fail_log_device(struct log_c *lc)
static int disk_resume(struct dm_dirty_log *log)
{
int r;
- unsigned i;
+ unsigned int i;
struct log_c *lc = (struct log_c *) log->context;
size_t size = lc->bitset_uint32_count * sizeof(uint32_t);
@@ -646,12 +653,14 @@ static int disk_resume(struct dm_dirty_log *log)
static uint32_t core_get_region_size(struct dm_dirty_log *log)
{
struct log_c *lc = (struct log_c *) log->context;
+
return lc->region_size;
}
static int core_resume(struct dm_dirty_log *log)
{
struct log_c *lc = (struct log_c *) log->context;
+
lc->sync_search = 0;
return 0;
}
@@ -659,12 +668,14 @@ static int core_resume(struct dm_dirty_log *log)
static int core_is_clean(struct dm_dirty_log *log, region_t region)
{
struct log_c *lc = (struct log_c *) log->context;
+
return log_test_bit(lc->clean_bits, region);
}
static int core_in_sync(struct dm_dirty_log *log, region_t region, int block)
{
struct log_c *lc = (struct log_c *) log->context;
+
return log_test_bit(lc->sync_bits, region);
}
@@ -717,12 +728,14 @@ static int disk_flush(struct dm_dirty_log *log)
static void core_mark_region(struct dm_dirty_log *log, region_t region)
{
struct log_c *lc = (struct log_c *) log->context;
+
log_clear_bit(lc, lc->clean_bits, region);
}
static void core_clear_region(struct dm_dirty_log *log, region_t region)
{
struct log_c *lc = (struct log_c *) log->context;
+
if (likely(!lc->flush_failed))
log_set_bit(lc, lc->clean_bits, region);
}
@@ -757,8 +770,8 @@ static void core_set_region_sync(struct dm_dirty_log *log, region_t region,
log_clear_bit(lc, lc->recovering_bits, region);
if (in_sync) {
log_set_bit(lc, lc->sync_bits, region);
- lc->sync_count++;
- } else if (log_test_bit(lc->sync_bits, region)) {
+ lc->sync_count++;
+ } else if (log_test_bit(lc->sync_bits, region)) {
lc->sync_count--;
log_clear_bit(lc, lc->sync_bits, region);
}
@@ -766,14 +779,16 @@ static void core_set_region_sync(struct dm_dirty_log *log, region_t region,
static region_t core_get_sync_count(struct dm_dirty_log *log)
{
- struct log_c *lc = (struct log_c *) log->context;
+ struct log_c *lc = (struct log_c *) log->context;
- return lc->sync_count;
+ return lc->sync_count;
}
#define DMEMIT_SYNC \
- if (lc->sync != DEFAULTSYNC) \
- DMEMIT("%ssync ", lc->sync == NOSYNC ? "no" : "")
+ do { \
+ if (lc->sync != DEFAULTSYNC) \
+ DMEMIT("%ssync ", lc->sync == NOSYNC ? "no" : ""); \
+ } while (0)
static int core_status(struct dm_dirty_log *log, status_type_t status,
char *result, unsigned int maxlen)
@@ -781,7 +796,7 @@ static int core_status(struct dm_dirty_log *log, status_type_t status,
int sz = 0;
struct log_c *lc = log->context;
- switch(status) {
+ switch (status) {
case STATUSTYPE_INFO:
DMEMIT("1 %s", log->type->name);
break;
@@ -806,7 +821,7 @@ static int disk_status(struct dm_dirty_log *log, status_type_t status,
int sz = 0;
struct log_c *lc = log->context;
- switch(status) {
+ switch (status) {
case STATUSTYPE_INFO:
DMEMIT("3 %s %s %c", log->type->name, lc->log_dev->name,
lc->log_dev_flush_failed ? 'F' :
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 0e325469a252..61ab1a8d2c9c 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2003 Sistina Software Limited.
* Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
@@ -27,9 +28,11 @@
#include <linux/atomic.h>
#include <linux/blk-mq.h>
+static struct workqueue_struct *dm_mpath_wq;
+
#define DM_MSG_PREFIX "multipath"
#define DM_PG_INIT_DELAY_MSECS 2000
-#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
+#define DM_PG_INIT_DELAY_DEFAULT ((unsigned int) -1)
#define QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT 0
static unsigned long queue_if_no_path_timeout_secs = QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT;
@@ -39,7 +42,7 @@ struct pgpath {
struct list_head list;
struct priority_group *pg; /* Owning PG */
- unsigned fail_count; /* Cumulative failure count */
+ unsigned int fail_count; /* Cumulative failure count */
struct dm_path path;
struct delayed_work activate_path;
@@ -59,8 +62,8 @@ struct priority_group {
struct multipath *m; /* Owning multipath instance */
struct path_selector ps;
- unsigned pg_num; /* Reference number */
- unsigned nr_pgpaths; /* Number of paths in PG */
+ unsigned int pg_num; /* Reference number */
+ unsigned int nr_pgpaths; /* Number of paths in PG */
struct list_head pgpaths;
bool bypassed:1; /* Temporarily bypass this PG? */
@@ -78,14 +81,14 @@ struct multipath {
struct priority_group *next_pg; /* Switch to this PG if set */
atomic_t nr_valid_paths; /* Total number of usable paths */
- unsigned nr_priority_groups;
+ unsigned int nr_priority_groups;
struct list_head priority_groups;
const char *hw_handler_name;
char *hw_handler_params;
wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
- unsigned pg_init_retries; /* Number of times to retry pg_init */
- unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
+ unsigned int pg_init_retries; /* Number of times to retry pg_init */
+ unsigned int pg_init_delay_msecs; /* Number of msecs before pg_init retry */
atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */
atomic_t pg_init_count; /* Number of times pg_init called */
@@ -117,10 +120,11 @@ static void activate_path_work(struct work_struct *work);
static void process_queued_bios(struct work_struct *work);
static void queue_if_no_path_timeout_work(struct timer_list *t);
-/*-----------------------------------------------
+/*
+ *-----------------------------------------------
* Multipath state flags.
- *-----------------------------------------------*/
-
+ *-----------------------------------------------
+ */
#define MPATHF_QUEUE_IO 0 /* Must we queue all I/O? */
#define MPATHF_QUEUE_IF_NO_PATH 1 /* Queue I/O if last path fails? */
#define MPATHF_SAVED_QUEUE_IF_NO_PATH 2 /* Saved state during suspension */
@@ -135,6 +139,7 @@ static bool mpath_double_check_test_bit(int MPATHF_bit, struct multipath *m)
if (r) {
unsigned long flags;
+
spin_lock_irqsave(&m->lock, flags);
r = test_bit(MPATHF_bit, &m->flags);
spin_unlock_irqrestore(&m->lock, flags);
@@ -143,10 +148,11 @@ static bool mpath_double_check_test_bit(int MPATHF_bit, struct multipath *m)
return r;
}
-/*-----------------------------------------------
+/*
+ *-----------------------------------------------
* Allocation routines
- *-----------------------------------------------*/
-
+ *-----------------------------------------------
+ */
static struct pgpath *alloc_pgpath(void)
{
struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
@@ -302,10 +308,11 @@ static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mp
dm_bio_record(bio_details, bio);
}
-/*-----------------------------------------------
+/*
+ *-----------------------------------------------
* Path selection
- *-----------------------------------------------*/
-
+ *-----------------------------------------------
+ */
static int __pg_init_all_paths(struct multipath *m)
{
struct pgpath *pgpath;
@@ -397,7 +404,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
unsigned long flags;
struct priority_group *pg;
struct pgpath *pgpath;
- unsigned bypassed = 1;
+ unsigned int bypassed = 1;
if (!atomic_read(&m->nr_valid_paths)) {
spin_lock_irqsave(&m->lock, flags);
@@ -467,13 +474,11 @@ failed:
* it has been invoked.
*/
#define dm_report_EIO(m) \
-do { \
DMDEBUG_LIMIT("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d", \
dm_table_device_name((m)->ti->table), \
test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \
test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \
- dm_noflush_suspending((m)->ti)); \
-} while (0)
+ dm_noflush_suspending((m)->ti))
/*
* Check whether bios must be queued in the device-mapper core rather
@@ -707,6 +712,7 @@ static void process_queued_bios(struct work_struct *work)
blk_start_plug(&plug);
while ((bio = bio_list_pop(&bios))) {
struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
+
dm_bio_restore(get_bio_details_from_mpio(mpio), bio);
r = __multipath_map_bio(m, bio, mpio);
switch (r) {
@@ -733,15 +739,15 @@ static void process_queued_bios(struct work_struct *work)
/*
* If we run out of usable paths, should we queue I/O or error it?
*/
-static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
+static int queue_if_no_path(struct multipath *m, bool f_queue_if_no_path,
bool save_old_value, const char *caller)
{
unsigned long flags;
bool queue_if_no_path_bit, saved_queue_if_no_path_bit;
const char *dm_dev_name = dm_table_device_name(m->ti->table);
- DMDEBUG("%s: %s caller=%s queue_if_no_path=%d save_old_value=%d",
- dm_dev_name, __func__, caller, queue_if_no_path, save_old_value);
+ DMDEBUG("%s: %s caller=%s f_queue_if_no_path=%d save_old_value=%d",
+ dm_dev_name, __func__, caller, f_queue_if_no_path, save_old_value);
spin_lock_irqsave(&m->lock, flags);
@@ -754,11 +760,11 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
dm_dev_name);
} else
assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path_bit);
- } else if (!queue_if_no_path && saved_queue_if_no_path_bit) {
+ } else if (!f_queue_if_no_path && saved_queue_if_no_path_bit) {
/* due to "fail_if_no_path" message, need to honor it. */
clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
}
- assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path);
+ assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, f_queue_if_no_path);
DMDEBUG("%s: after %s changes; QIFNP = %d; SQIFNP = %d; DNFS = %d",
dm_dev_name, __func__,
@@ -768,7 +774,7 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
spin_unlock_irqrestore(&m->lock, flags);
- if (!queue_if_no_path) {
+ if (!f_queue_if_no_path) {
dm_table_run_md_queue_async(m->ti->table);
process_queued_io_list(m);
}
@@ -825,7 +831,8 @@ static void trigger_event(struct work_struct *work)
dm_table_event(m->ti->table);
}
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Constructor/argument parsing:
* <#multipath feature args> [<arg>]*
* <#hw_handler args> [hw_handler [<arg>]*]
@@ -834,13 +841,14 @@ static void trigger_event(struct work_struct *work)
* [<selector> <#selector args> [<arg>]*
* <#paths> <#per-path selector args>
* [<path> [<arg>]* ]+ ]+
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
struct dm_target *ti)
{
int r;
struct path_selector_type *pst;
- unsigned ps_argc;
+ unsigned int ps_argc;
static const struct dm_arg _args[] = {
{0, 1024, "invalid number of path selector args"},
@@ -983,7 +991,7 @@ static struct priority_group *parse_priority_group(struct dm_arg_set *as,
};
int r;
- unsigned i, nr_selector_args, nr_args;
+ unsigned int i, nr_selector_args, nr_args;
struct priority_group *pg;
struct dm_target *ti = m->ti;
@@ -1049,7 +1057,7 @@ static struct priority_group *parse_priority_group(struct dm_arg_set *as,
static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
{
- unsigned hw_argc;
+ unsigned int hw_argc;
int ret;
struct dm_target *ti = m->ti;
@@ -1086,7 +1094,7 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
goto fail;
}
j = sprintf(p, "%d", hw_argc - 1);
- for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
+ for (i = 0, p += j + 1; i <= hw_argc - 2; i++, p += j + 1)
j = sprintf(p, "%s", as->argv[i]);
}
dm_consume_args(as, hw_argc - 1);
@@ -1101,7 +1109,7 @@ fail:
static int parse_features(struct dm_arg_set *as, struct multipath *m)
{
int r;
- unsigned argc;
+ unsigned int argc;
struct dm_target *ti = m->ti;
const char *arg_name;
@@ -1170,7 +1178,7 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
return r;
}
-static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
+static int multipath_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
/* target arguments */
static const struct dm_arg _args[] = {
@@ -1181,8 +1189,8 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
int r;
struct multipath *m;
struct dm_arg_set as;
- unsigned pg_count = 0;
- unsigned next_pg_num;
+ unsigned int pg_count = 0;
+ unsigned int next_pg_num;
unsigned long flags;
as.argc = argc;
@@ -1224,7 +1232,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
/* parse the priority groups */
while (as.argc) {
struct priority_group *pg;
- unsigned nr_valid_paths = atomic_read(&m->nr_valid_paths);
+ unsigned int nr_valid_paths = atomic_read(&m->nr_valid_paths);
pg = parse_priority_group(&as, m);
if (IS_ERR(pg)) {
@@ -1347,7 +1355,7 @@ static int fail_path(struct pgpath *pgpath)
dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
pgpath->path.dev->name, atomic_read(&m->nr_valid_paths));
- schedule_work(&m->trigger_event);
+ queue_work(dm_mpath_wq, &m->trigger_event);
enable_nopath_timeout(m);
@@ -1365,7 +1373,7 @@ static int reinstate_path(struct pgpath *pgpath)
int r = 0, run_queue = 0;
unsigned long flags;
struct multipath *m = pgpath->pg->m;
- unsigned nr_valid_paths;
+ unsigned int nr_valid_paths;
spin_lock_irqsave(&m->lock, flags);
@@ -1454,13 +1462,13 @@ static void bypass_pg(struct multipath *m, struct priority_group *pg,
static int switch_pg_num(struct multipath *m, const char *pgstr)
{
struct priority_group *pg;
- unsigned pgnum;
+ unsigned int pgnum;
unsigned long flags;
char dummy;
if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
!m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
- DMWARN("invalid PG number supplied to switch_pg_num");
+ DMWARN("invalid PG number supplied to %s", __func__);
return -EINVAL;
}
@@ -1487,7 +1495,7 @@ static int switch_pg_num(struct multipath *m, const char *pgstr)
static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
{
struct priority_group *pg;
- unsigned pgnum;
+ unsigned int pgnum;
char dummy;
if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
@@ -1789,14 +1797,14 @@ static void multipath_resume(struct dm_target *ti)
* num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
*/
static void multipath_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
int sz = 0, pg_counter, pgpath_counter;
unsigned long flags;
struct multipath *m = ti->private;
struct priority_group *pg;
struct pgpath *p;
- unsigned pg_num;
+ unsigned int pg_num;
char state;
spin_lock_irqsave(&m->lock, flags);
@@ -1821,7 +1829,7 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags))
DMEMIT("retain_attached_hw_handler ");
if (m->queue_mode != DM_TYPE_REQUEST_BASED) {
- switch(m->queue_mode) {
+ switch (m->queue_mode) {
case DM_TYPE_BIO_BASED:
DMEMIT("queue_mode bio ");
break;
@@ -1948,8 +1956,8 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
spin_unlock_irqrestore(&m->lock, flags);
}
-static int multipath_message(struct dm_target *ti, unsigned argc, char **argv,
- char *result, unsigned maxlen)
+static int multipath_message(struct dm_target *ti, unsigned int argc, char **argv,
+ char *result, unsigned int maxlen)
{
int r = -EINVAL;
struct dm_dev *dev;
@@ -2116,6 +2124,7 @@ static int multipath_busy(struct dm_target *ti)
/* no paths available, for blk-mq: rely on IO mapping to delay requeue */
if (!atomic_read(&m->nr_valid_paths)) {
unsigned long flags;
+
spin_lock_irqsave(&m->lock, flags);
if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
spin_unlock_irqrestore(&m->lock, flags);
@@ -2168,9 +2177,11 @@ static int multipath_busy(struct dm_target *ti)
return busy;
}
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Module setup
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
static struct target_type multipath_target = {
.name = "multipath",
.version = {1, 14, 0},
@@ -2196,12 +2207,11 @@ static struct target_type multipath_target = {
static int __init dm_multipath_init(void)
{
- int r;
+ int r = -ENOMEM;
kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
if (!kmultipathd) {
DMERR("failed to create workqueue kmpathd");
- r = -ENOMEM;
goto bad_alloc_kmultipathd;
}
@@ -2215,10 +2225,15 @@ static int __init dm_multipath_init(void)
WQ_MEM_RECLAIM);
if (!kmpath_handlerd) {
DMERR("failed to create workqueue kmpath_handlerd");
- r = -ENOMEM;
goto bad_alloc_kmpath_handlerd;
}
+ dm_mpath_wq = alloc_workqueue("dm_mpath_wq", 0, 0);
+ if (!dm_mpath_wq) {
+ DMERR("failed to create workqueue dm_mpath_wq");
+ goto bad_alloc_dm_mpath_wq;
+ }
+
r = dm_register_target(&multipath_target);
if (r < 0) {
DMERR("request-based register failed %d", r);
@@ -2229,6 +2244,8 @@ static int __init dm_multipath_init(void)
return 0;
bad_register_target:
+ destroy_workqueue(dm_mpath_wq);
+bad_alloc_dm_mpath_wq:
destroy_workqueue(kmpath_handlerd);
bad_alloc_kmpath_handlerd:
destroy_workqueue(kmultipathd);
@@ -2238,6 +2255,7 @@ bad_alloc_kmultipathd:
static void __exit dm_multipath_exit(void)
{
+ destroy_workqueue(dm_mpath_wq);
destroy_workqueue(kmpath_handlerd);
destroy_workqueue(kmultipathd);
@@ -2247,8 +2265,7 @@ static void __exit dm_multipath_exit(void)
module_init(dm_multipath_init);
module_exit(dm_multipath_exit);
-module_param_named(queue_if_no_path_timeout_secs,
- queue_if_no_path_timeout_secs, ulong, S_IRUGO | S_IWUSR);
+module_param_named(queue_if_no_path_timeout_secs, queue_if_no_path_timeout_secs, ulong, 0644);
MODULE_PARM_DESC(queue_if_no_path_timeout_secs, "No available paths queue IO timeout in seconds");
MODULE_DESCRIPTION(DM_NAME " multipath target");
diff --git a/drivers/md/dm-mpath.h b/drivers/md/dm-mpath.h
index e230f7196259..0e168e0c82dd 100644
--- a/drivers/md/dm-mpath.h
+++ b/drivers/md/dm-mpath.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004 Red Hat, Inc. All rights reserved.
*
@@ -17,6 +18,6 @@ struct dm_path {
};
/* Callback for hwh_pg_init_fn to use when complete */
-void dm_pg_init_complete(struct dm_path *path, unsigned err_flags);
+void dm_pg_init_complete(struct dm_path *path, unsigned int err_flags);
#endif
diff --git a/drivers/md/dm-path-selector.c b/drivers/md/dm-path-selector.c
index fa0ccc585cb4..3e4cb81ce512 100644
--- a/drivers/md/dm-path-selector.c
+++ b/drivers/md/dm-path-selector.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2003 Sistina Software.
* Copyright (C) 2004 Red Hat, Inc. All rights reserved.
@@ -114,6 +115,7 @@ int dm_register_path_selector(struct path_selector_type *pst)
return r;
}
+EXPORT_SYMBOL_GPL(dm_register_path_selector);
int dm_unregister_path_selector(struct path_selector_type *pst)
{
@@ -135,6 +137,4 @@ int dm_unregister_path_selector(struct path_selector_type *pst)
return 0;
}
-
-EXPORT_SYMBOL_GPL(dm_register_path_selector);
EXPORT_SYMBOL_GPL(dm_unregister_path_selector);
diff --git a/drivers/md/dm-path-selector.h b/drivers/md/dm-path-selector.h
index 83cac2b04b66..3861b2d8b963 100644
--- a/drivers/md/dm-path-selector.h
+++ b/drivers/md/dm-path-selector.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2003 Sistina Software.
* Copyright (C) 2004 Red Hat, Inc. All rights reserved.
@@ -52,44 +53,43 @@ struct path_selector_type {
/*
* Constructs a path selector object, takes custom arguments
*/
- int (*create) (struct path_selector *ps, unsigned argc, char **argv);
- void (*destroy) (struct path_selector *ps);
+ int (*create)(struct path_selector *ps, unsigned int argc, char **argv);
+ void (*destroy)(struct path_selector *ps);
/*
* Add an opaque path object, along with some selector specific
* path args (eg, path priority).
*/
- int (*add_path) (struct path_selector *ps, struct dm_path *path,
- int argc, char **argv, char **error);
+ int (*add_path)(struct path_selector *ps, struct dm_path *path,
+ int argc, char **argv, char **error);
/*
* Chooses a path for this io, if no paths are available then
* NULL will be returned.
*/
- struct dm_path *(*select_path) (struct path_selector *ps,
- size_t nr_bytes);
+ struct dm_path *(*select_path)(struct path_selector *ps, size_t nr_bytes);
/*
* Notify the selector that a path has failed.
*/
- void (*fail_path) (struct path_selector *ps, struct dm_path *p);
+ void (*fail_path)(struct path_selector *ps, struct dm_path *p);
/*
* Ask selector to reinstate a path.
*/
- int (*reinstate_path) (struct path_selector *ps, struct dm_path *p);
+ int (*reinstate_path)(struct path_selector *ps, struct dm_path *p);
/*
* Table content based on parameters added in ps_add_path_fn
* or path selector status
*/
- int (*status) (struct path_selector *ps, struct dm_path *path,
- status_type_t type, char *result, unsigned int maxlen);
+ int (*status)(struct path_selector *ps, struct dm_path *path,
+ status_type_t type, char *result, unsigned int maxlen);
- int (*start_io) (struct path_selector *ps, struct dm_path *path,
- size_t nr_bytes);
- int (*end_io) (struct path_selector *ps, struct dm_path *path,
- size_t nr_bytes, u64 start_time);
+ int (*start_io)(struct path_selector *ps, struct dm_path *path,
+ size_t nr_bytes);
+ int (*end_io)(struct path_selector *ps, struct dm_path *path,
+ size_t nr_bytes, u64 start_time);
};
/* Register a path selector */
diff --git a/drivers/md/dm-ps-historical-service-time.c b/drivers/md/dm-ps-historical-service-time.c
index 1d82c95d323d..b49e10d76d03 100644
--- a/drivers/md/dm-ps-historical-service-time.c
+++ b/drivers/md/dm-ps-historical-service-time.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Historical Service Time
*
diff --git a/drivers/md/dm-ps-io-affinity.c b/drivers/md/dm-ps-io-affinity.c
index f74501e65a8e..461ee6b2044d 100644
--- a/drivers/md/dm-ps-io-affinity.c
+++ b/drivers/md/dm-ps-io-affinity.c
@@ -108,7 +108,7 @@ free_pi:
return ret;
}
-static int ioa_create(struct path_selector *ps, unsigned argc, char **argv)
+static int ioa_create(struct path_selector *ps, unsigned int argc, char **argv)
{
struct selector *s;
@@ -138,7 +138,7 @@ free_selector:
static void ioa_destroy(struct path_selector *ps)
{
struct selector *s = ps->context;
- unsigned cpu;
+ unsigned int cpu;
for_each_cpu(cpu, s->path_mask)
ioa_free_path(s, cpu);
@@ -162,7 +162,7 @@ static int ioa_status(struct path_selector *ps, struct dm_path *path,
return sz;
}
- switch(type) {
+ switch (type) {
case STATUSTYPE_INFO:
DMEMIT("%d ", atomic_read(&s->map_misses));
break;
diff --git a/drivers/md/dm-ps-queue-length.c b/drivers/md/dm-ps-queue-length.c
index cef70657bbbc..e305f05ad1e5 100644
--- a/drivers/md/dm-ps-queue-length.c
+++ b/drivers/md/dm-ps-queue-length.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2004-2005 IBM Corp. All Rights Reserved.
* Copyright (C) 2006-2009 NEC Corporation.
@@ -35,7 +36,7 @@ struct selector {
struct path_info {
struct list_head list;
struct dm_path *path;
- unsigned repeat_count;
+ unsigned int repeat_count;
atomic_t qlen; /* the number of in-flight I/Os */
};
@@ -52,7 +53,7 @@ static struct selector *alloc_selector(void)
return s;
}
-static int ql_create(struct path_selector *ps, unsigned argc, char **argv)
+static int ql_create(struct path_selector *ps, unsigned int argc, char **argv)
{
struct selector *s = alloc_selector();
@@ -84,9 +85,9 @@ static void ql_destroy(struct path_selector *ps)
}
static int ql_status(struct path_selector *ps, struct dm_path *path,
- status_type_t type, char *result, unsigned maxlen)
+ status_type_t type, char *result, unsigned int maxlen)
{
- unsigned sz = 0;
+ unsigned int sz = 0;
struct path_info *pi;
/* When called with NULL path, return selector status/args. */
@@ -116,14 +117,14 @@ static int ql_add_path(struct path_selector *ps, struct dm_path *path,
{
struct selector *s = ps->context;
struct path_info *pi;
- unsigned repeat_count = QL_MIN_IO;
+ unsigned int repeat_count = QL_MIN_IO;
char dummy;
unsigned long flags;
/*
* Arguments: [<repeat_count>]
- * <repeat_count>: The number of I/Os before switching path.
- * If not given, default (QL_MIN_IO) is used.
+ * <repeat_count>: The number of I/Os before switching path.
+ * If not given, default (QL_MIN_IO) is used.
*/
if (argc > 1) {
*error = "queue-length ps: incorrect number of arguments";
diff --git a/drivers/md/dm-ps-round-robin.c b/drivers/md/dm-ps-round-robin.c
index 27f44c5fa04e..0f04b673597a 100644
--- a/drivers/md/dm-ps-round-robin.c
+++ b/drivers/md/dm-ps-round-robin.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2003 Sistina Software.
* Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
@@ -20,13 +21,15 @@
#define RR_MIN_IO 1
#define RR_VERSION "1.2.0"
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Path-handling code, paths are held in lists
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
struct path_info {
struct list_head list;
struct dm_path *path;
- unsigned repeat_count;
+ unsigned int repeat_count;
};
static void free_paths(struct list_head *paths)
@@ -39,10 +42,11 @@ static void free_paths(struct list_head *paths)
}
}
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Round-robin selector
- *---------------------------------------------------------------*/
-
+ *---------------------------------------------------------------
+ */
struct selector {
struct list_head valid_paths;
struct list_head invalid_paths;
@@ -62,7 +66,7 @@ static struct selector *alloc_selector(void)
return s;
}
-static int rr_create(struct path_selector *ps, unsigned argc, char **argv)
+static int rr_create(struct path_selector *ps, unsigned int argc, char **argv)
{
struct selector *s;
@@ -93,7 +97,7 @@ static int rr_status(struct path_selector *ps, struct dm_path *path,
if (!path)
DMEMIT("0 ");
else {
- switch(type) {
+ switch (type) {
case STATUSTYPE_INFO:
break;
case STATUSTYPE_TABLE:
@@ -119,7 +123,7 @@ static int rr_add_path(struct path_selector *ps, struct dm_path *path,
{
struct selector *s = ps->context;
struct path_info *pi;
- unsigned repeat_count = RR_MIN_IO;
+ unsigned int repeat_count = RR_MIN_IO;
char dummy;
unsigned long flags;
diff --git a/drivers/md/dm-ps-service-time.c b/drivers/md/dm-ps-service-time.c
index 3ec9c33265c5..969d31c40272 100644
--- a/drivers/md/dm-ps-service-time.c
+++ b/drivers/md/dm-ps-service-time.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2007-2009 NEC Corporation. All Rights Reserved.
*
@@ -30,8 +31,8 @@ struct selector {
struct path_info {
struct list_head list;
struct dm_path *path;
- unsigned repeat_count;
- unsigned relative_throughput;
+ unsigned int repeat_count;
+ unsigned int relative_throughput;
atomic_t in_flight_size; /* Total size of in-flight I/Os */
};
@@ -48,7 +49,7 @@ static struct selector *alloc_selector(void)
return s;
}
-static int st_create(struct path_selector *ps, unsigned argc, char **argv)
+static int st_create(struct path_selector *ps, unsigned int argc, char **argv)
{
struct selector *s = alloc_selector();
@@ -80,9 +81,9 @@ static void st_destroy(struct path_selector *ps)
}
static int st_status(struct path_selector *ps, struct dm_path *path,
- status_type_t type, char *result, unsigned maxlen)
+ status_type_t type, char *result, unsigned int maxlen)
{
- unsigned sz = 0;
+ unsigned int sz = 0;
struct path_info *pi;
if (!path)
@@ -113,22 +114,21 @@ static int st_add_path(struct path_selector *ps, struct dm_path *path,
{
struct selector *s = ps->context;
struct path_info *pi;
- unsigned repeat_count = ST_MIN_IO;
- unsigned relative_throughput = 1;
+ unsigned int repeat_count = ST_MIN_IO;
+ unsigned int relative_throughput = 1;
char dummy;
unsigned long flags;
/*
* Arguments: [<repeat_count> [<relative_throughput>]]
- * <repeat_count>: The number of I/Os before switching path.
- * If not given, default (ST_MIN_IO) is used.
- * <relative_throughput>: The relative throughput value of
+ * <repeat_count>: The number of I/Os before switching path.
+ * If not given, default (ST_MIN_IO) is used.
+ * <relative_throughput>: The relative throughput value of
* the path among all paths in the path-group.
- * The valid range: 0-<ST_MAX_RELATIVE_THROUGHPUT>
+ * The valid range: 0-<ST_MAX_RELATIVE_THROUGHPUT>
* If not given, minimum value '1' is used.
* If '0' is given, the path isn't selected while
- * other paths having a positive value are
- * available.
+ * other paths having a positive value are available.
*/
if (argc > 2) {
*error = "service-time ps: incorrect number of arguments";
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 54263679a7b1..60632b409b80 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2010-2011 Neil Brown
* Copyright (C) 2010-2018 Red Hat, Inc. All rights reserved.
@@ -29,10 +30,10 @@
*/
#define MIN_RAID456_JOURNAL_SPACE (4*2048)
-static bool devices_handle_discard_safely = false;
+static bool devices_handle_discard_safely;
/*
- * The following flags are used by dm-raid.c to set up the array state.
+ * The following flags are used by dm-raid to set up the array state.
* They must be cleared before md_run is called.
*/
#define FirstUse 10 /* rdev flag */
@@ -362,8 +363,8 @@ static struct {
const int mode;
const char *param;
} _raid456_journal_mode[] = {
- { R5C_JOURNAL_MODE_WRITE_THROUGH , "writethrough" },
- { R5C_JOURNAL_MODE_WRITE_BACK , "writeback" }
+ { R5C_JOURNAL_MODE_WRITE_THROUGH, "writethrough" },
+ { R5C_JOURNAL_MODE_WRITE_BACK, "writeback" }
};
/* Return MD raid4/5/6 journal mode for dm @journal_mode one */
@@ -1081,7 +1082,7 @@ static int validate_raid_redundancy(struct raid_set *rs)
if ((!rs->dev[i].rdev.sb_page ||
!test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
(++rebuilds_per_group >= copies))
- goto too_many;
+ goto too_many;
}
break;
default:
@@ -1114,7 +1115,7 @@ too_many:
* [stripe_cache <sectors>] Stripe cache size for higher RAIDs
* [region_size <sectors>] Defines granularity of bitmap
* [journal_dev <dev>] raid4/5/6 journaling deviice
- * (i.e. write hole closing log)
+ * (i.e. write hole closing log)
*
* RAID10-only options:
* [raid10_copies <# copies>] Number of copies. (Default: 2)
@@ -1988,7 +1989,7 @@ struct dm_raid_superblock {
__le64 sectors; /* Used device size in sectors */
/*
- * Additonal Bit field of devices indicating failures to support
+ * Additional Bit field of devices indicating failures to support
* up to 256 devices with the 1.9.0 on-disk metadata format
*/
__le64 extended_failed_devices[DISKS_ARRAY_ELEMS - 1];
@@ -2855,7 +2856,7 @@ static int rs_setup_reshape(struct raid_set *rs)
*
* - in case of adding disk(s), array size has
* to grow after the disk adding reshape,
- * which'll hapen in the event handler;
+ * which'll happen in the event handler;
* reshape will happen forward, so space has to
* be available at the beginning of each disk
*
@@ -3148,7 +3149,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
* If a takeover is needed, userspace sets any additional
* devices to rebuild and we can check for a valid request here.
*
- * If acceptible, set the level to the new requested
+ * If acceptable, set the level to the new requested
* one, prohibit requesting recovery, allow the raid
* set to run and store superblocks during resume.
*/
@@ -3183,12 +3184,12 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
/* Out-of-place space has to be available to allow for a reshape unless raid1! */
if (reshape_sectors || rs_is_raid1(rs)) {
/*
- * We can only prepare for a reshape here, because the
- * raid set needs to run to provide the repective reshape
- * check functions via its MD personality instance.
- *
- * So do the reshape check after md_run() succeeded.
- */
+ * We can only prepare for a reshape here, because the
+ * raid set needs to run to provide the repective reshape
+ * check functions via its MD personality instance.
+ *
+ * So do the reshape check after md_run() succeeded.
+ */
r = rs_prepare_reshape(rs);
if (r)
goto bad;
@@ -3712,7 +3713,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
}
static int raid_message(struct dm_target *ti, unsigned int argc, char **argv,
- char *result, unsigned maxlen)
+ char *result, unsigned int maxlen)
{
struct raid_set *rs = ti->private;
struct mddev *mddev = &rs->md;
@@ -4001,7 +4002,7 @@ static int raid_preresume(struct dm_target *ti)
}
/* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) or grown device size */
- if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
+ if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
(test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags) ||
(rs->requested_bitmap_chunk_sectors &&
mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)))) {
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 06a38dc32025..bc417a5e5b89 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2003 Sistina Software Limited.
* Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
@@ -19,6 +20,8 @@
#include <linux/dm-kcopyd.h>
#include <linux/dm-region-hash.h>
+static struct workqueue_struct *dm_raid1_wq;
+
#define DM_MSG_PREFIX "raid1"
#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */
@@ -32,9 +35,11 @@
static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Mirror set structures.
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
enum dm_raid1_error {
DM_RAID1_WRITE_ERROR,
DM_RAID1_FLUSH_ERROR,
@@ -82,7 +87,7 @@ struct mirror_set {
struct work_struct trigger_event;
- unsigned nr_mirrors;
+ unsigned int nr_mirrors;
struct mirror mirror[];
};
@@ -236,8 +241,8 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
* Better to issue requests to same failing device
* than to risk returning corrupt data.
*/
- DMERR("Primary mirror (%s) failed while out-of-sync: "
- "Reads may fail.", m->dev->name);
+ DMERR("Primary mirror (%s) failed while out-of-sync: Reads may fail.",
+ m->dev->name);
goto out;
}
@@ -248,7 +253,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
DMWARN("All sides of mirror have failed.");
out:
- schedule_work(&ms->trigger_event);
+ queue_work(dm_raid1_wq, &ms->trigger_event);
}
static int mirror_flush(struct dm_target *ti)
@@ -285,13 +290,15 @@ static int mirror_flush(struct dm_target *ti)
return 0;
}
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Recovery.
*
* When a mirror is first activated we may find that some regions
* are in the no-sync state. We have to recover these by
* recopying from the default mirror to all the others.
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
static void recovery_complete(int read_err, unsigned long write_err,
void *context)
{
@@ -327,7 +334,7 @@ static void recovery_complete(int read_err, unsigned long write_err,
static void recover(struct mirror_set *ms, struct dm_region *reg)
{
- unsigned i;
+ unsigned int i;
struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
struct mirror *m;
unsigned long flags = 0;
@@ -408,9 +415,11 @@ static void do_recovery(struct mirror_set *ms)
}
}
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Reads
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
{
struct mirror *m = get_default_mirror(ms);
@@ -498,9 +507,11 @@ static void hold_bio(struct mirror_set *ms, struct bio *bio)
spin_unlock_irq(&ms->lock);
}
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Reads
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
static void read_callback(unsigned long error, void *context)
{
struct bio *bio = context;
@@ -517,8 +528,7 @@ static void read_callback(unsigned long error, void *context)
fail_mirror(m, DM_RAID1_READ_ERROR);
if (likely(default_ok(m)) || mirror_available(m->ms, bio)) {
- DMWARN_LIMIT("Read failure on mirror device %s. "
- "Trying alternative device.",
+ DMWARN_LIMIT("Read failure on mirror device %s. Trying alternative device.",
m->dev->name);
queue_bio(m->ms, bio, bio_data_dir(bio));
return;
@@ -579,21 +589,21 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
}
}
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------------
* Writes.
*
* We do different things with the write io depending on the
* state of the region that it's in:
*
- * SYNC: increment pending, use kcopyd to write to *all* mirrors
+ * SYNC: increment pending, use kcopyd to write to *all* mirrors
* RECOVERING: delay the io until recovery completes
* NOSYNC: increment pending, just write to the default mirror
- *---------------------------------------------------------------*/
-
-
+ *---------------------------------------------------------------------
+ */
static void write_callback(unsigned long error, void *context)
{
- unsigned i;
+ unsigned int i;
struct bio *bio = (struct bio *) context;
struct mirror_set *ms;
int should_wake = 0;
@@ -842,9 +852,11 @@ static void trigger_event(struct work_struct *work)
dm_table_event(ms->ti->table);
}
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* kmirrord
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
static void do_mirror(struct work_struct *work)
{
struct mirror_set *ms = container_of(work, struct mirror_set,
@@ -868,9 +880,11 @@ static void do_mirror(struct work_struct *work)
do_failures(ms, &failures);
}
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Target functions
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
static struct mirror_set *alloc_context(unsigned int nr_mirrors,
uint32_t region_size,
struct dm_target *ti,
@@ -903,7 +917,7 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
if (IS_ERR(ms->io_client)) {
ti->error = "Error creating dm_io client";
kfree(ms);
- return NULL;
+ return NULL;
}
ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
@@ -963,10 +977,10 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
* Create dirty log: log_type #log_params <log_params>
*/
static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
- unsigned argc, char **argv,
- unsigned *args_used)
+ unsigned int argc, char **argv,
+ unsigned int *args_used)
{
- unsigned param_count;
+ unsigned int param_count;
struct dm_dirty_log *dl;
char dummy;
@@ -997,10 +1011,10 @@ static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
return dl;
}
-static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
- unsigned *args_used)
+static int parse_features(struct mirror_set *ms, unsigned int argc, char **argv,
+ unsigned int *args_used)
{
- unsigned num_features;
+ unsigned int num_features;
struct dm_target *ti = ms->ti;
char dummy;
int i;
@@ -1389,7 +1403,7 @@ static char device_status_char(struct mirror *m)
static void mirror_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
unsigned int m, sz = 0;
int num_feature_args = 0;
@@ -1458,7 +1472,7 @@ static int mirror_iterate_devices(struct dm_target *ti,
{
struct mirror_set *ms = ti->private;
int ret = 0;
- unsigned i;
+ unsigned int i;
for (i = 0; !ret && i < ms->nr_mirrors; i++)
ret = fn(ti, ms->mirror[i].dev,
@@ -1484,22 +1498,28 @@ static struct target_type mirror_target = {
static int __init dm_mirror_init(void)
{
- int r;
+ int r = -ENOMEM;
+
+ dm_raid1_wq = alloc_workqueue("dm_raid1_wq", 0, 0);
+ if (!dm_raid1_wq)
+ goto bad_target;
r = dm_register_target(&mirror_target);
if (r < 0) {
- DMERR("Failed to register mirror target");
+ destroy_workqueue(dm_raid1_wq);
goto bad_target;
}
return 0;
bad_target:
+ DMERR("Failed to register mirror target");
return r;
}
static void __exit dm_mirror_exit(void)
{
+ destroy_workqueue(dm_raid1_wq);
dm_unregister_target(&mirror_target);
}
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index 1f760451e6f4..852cfa37d48a 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2003 Sistina Software Limited.
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
@@ -18,7 +19,8 @@
#define DM_MSG_PREFIX "region hash"
-/*-----------------------------------------------------------------
+/*
+ *------------------------------------------------------------------
* Region hash
*
* The mirror splits itself up into discrete regions. Each
@@ -53,20 +55,21 @@
* lists in the region_hash, with the 'state', 'list' and
* 'delayed_bios' fields of the regions. This is used from irq
* context, so all other uses will have to suspend local irqs.
- *---------------------------------------------------------------*/
+ *------------------------------------------------------------------
+ */
struct dm_region_hash {
uint32_t region_size;
- unsigned region_shift;
+ unsigned int region_shift;
/* holds persistent region state */
struct dm_dirty_log *log;
/* hash table */
rwlock_t hash_lock;
- unsigned mask;
- unsigned nr_buckets;
- unsigned prime;
- unsigned shift;
+ unsigned int mask;
+ unsigned int nr_buckets;
+ unsigned int prime;
+ unsigned int shift;
struct list_head *buckets;
/*
@@ -74,7 +77,7 @@ struct dm_region_hash {
*/
int flush_failure;
- unsigned max_recovery; /* Max # of regions to recover in parallel */
+ unsigned int max_recovery; /* Max # of regions to recover in parallel */
spinlock_t region_lock;
atomic_t recovery_in_flight;
@@ -163,12 +166,12 @@ struct dm_region_hash *dm_region_hash_create(
struct bio_list *bios),
void (*wakeup_workers)(void *context),
void (*wakeup_all_recovery_waiters)(void *context),
- sector_t target_begin, unsigned max_recovery,
+ sector_t target_begin, unsigned int max_recovery,
struct dm_dirty_log *log, uint32_t region_size,
region_t nr_regions)
{
struct dm_region_hash *rh;
- unsigned nr_buckets, max_buckets;
+ unsigned int nr_buckets, max_buckets;
size_t i;
int ret;
@@ -236,7 +239,7 @@ EXPORT_SYMBOL_GPL(dm_region_hash_create);
void dm_region_hash_destroy(struct dm_region_hash *rh)
{
- unsigned h;
+ unsigned int h;
struct dm_region *reg, *nreg;
BUG_ON(!list_empty(&rh->quiesced_regions));
@@ -263,9 +266,9 @@ struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh)
}
EXPORT_SYMBOL_GPL(dm_rh_dirty_log);
-static unsigned rh_hash(struct dm_region_hash *rh, region_t region)
+static unsigned int rh_hash(struct dm_region_hash *rh, region_t region)
{
- return (unsigned) ((region * rh->prime) >> rh->shift) & rh->mask;
+ return (unsigned int) ((region * rh->prime) >> rh->shift) & rh->mask;
}
static struct dm_region *__rh_lookup(struct dm_region_hash *rh, region_t region)
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index a41209a43506..f7e9a3632eb3 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 Red Hat, Inc. All rights reserved.
*
@@ -23,33 +24,33 @@ struct dm_rq_target_io {
union map_info info;
struct dm_stats_aux stats_aux;
unsigned long duration_jiffies;
- unsigned n_sectors;
- unsigned completed;
+ unsigned int n_sectors;
+ unsigned int completed;
};
#define DM_MQ_NR_HW_QUEUES 1
#define DM_MQ_QUEUE_DEPTH 2048
-static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
-static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
+static unsigned int dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
+static unsigned int dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
/*
* Request-based DM's mempools' reserved IOs set by the user.
*/
#define RESERVED_REQUEST_BASED_IOS 256
-static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
+static unsigned int reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
-unsigned dm_get_reserved_rq_based_ios(void)
+unsigned int dm_get_reserved_rq_based_ios(void)
{
return __dm_get_module_param(&reserved_rq_based_ios,
RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS);
}
-static unsigned dm_get_blk_mq_nr_hw_queues(void)
+static unsigned int dm_get_blk_mq_nr_hw_queues(void)
{
return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32);
}
-static unsigned dm_get_blk_mq_queue_depth(void)
+static unsigned int dm_get_blk_mq_queue_depth(void)
{
return __dm_get_module_param(&dm_mq_queue_depth,
DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH);
@@ -127,6 +128,7 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
{
if (unlikely(dm_stats_used(&md->stats))) {
struct dm_rq_target_io *tio = tio_from_request(orig);
+
tio->duration_jiffies = jiffies - tio->duration_jiffies;
dm_stats_account_io(&md->stats, rq_data_dir(orig),
blk_rq_pos(orig), tio->n_sectors, true,
@@ -434,6 +436,7 @@ static void dm_start_request(struct mapped_device *md, struct request *orig)
if (unlikely(dm_stats_used(&md->stats))) {
struct dm_rq_target_io *tio = tio_from_request(orig);
+
tio->duration_jiffies = jiffies;
tio->n_sectors = blk_rq_sectors(orig);
dm_stats_account_io(&md->stats, rq_data_dir(orig),
@@ -581,16 +584,16 @@ void dm_mq_cleanup_mapped_device(struct mapped_device *md)
}
}
-module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
+module_param(reserved_rq_based_ios, uint, 0644);
MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
/* Unused, but preserved for userspace compatibility */
static bool use_blk_mq = true;
-module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
+module_param(use_blk_mq, bool, 0644);
MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
-module_param(dm_mq_nr_hw_queues, uint, S_IRUGO | S_IWUSR);
+module_param(dm_mq_nr_hw_queues, uint, 0644);
MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices");
-module_param(dm_mq_queue_depth, uint, S_IRUGO | S_IWUSR);
+module_param(dm_mq_queue_depth, uint, 0644);
MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices");
diff --git a/drivers/md/dm-rq.h b/drivers/md/dm-rq.h
index 1eea0da641db..1f73e194e792 100644
--- a/drivers/md/dm-rq.h
+++ b/drivers/md/dm-rq.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Internal header file for device mapper
*
@@ -38,7 +39,7 @@ void dm_stop_queue(struct request_queue *q);
void dm_mq_kick_requeue_list(struct mapped_device *md);
-unsigned dm_get_reserved_rq_based_ios(void);
+unsigned int dm_get_reserved_rq_based_ios(void);
ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf);
ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 680cc05ec654..f14e5df27874 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2001-2002 Sistina Software (UK) Limited.
* Copyright (C) 2006-2008 Red Hat GmbH
@@ -21,10 +22,12 @@
#define DM_PREFETCH_CHUNKS 12
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Persistent snapshots, by persistent we mean that the snapshot
* will survive a reboot.
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
/*
* We need to store a record of which parts of the origin have
@@ -92,7 +95,7 @@ struct core_exception {
};
struct commit_callback {
- void (*callback)(void *, int success);
+ void (*callback)(void *ref, int success);
void *context;
};
@@ -273,6 +276,7 @@ static void skip_metadata(struct pstore *ps)
{
uint32_t stride = ps->exceptions_per_area + 1;
chunk_t next_free = ps->next_free;
+
if (sector_div(next_free, stride) == NUM_SNAPSHOT_HDR_CHUNKS)
ps->next_free++;
}
@@ -303,7 +307,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
{
int r;
struct disk_header *dh;
- unsigned chunk_size;
+ unsigned int chunk_size;
int chunk_size_supplied = 1;
char *chunk_err;
@@ -354,8 +358,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
return 0;
if (chunk_size_supplied)
- DMWARN("chunk size %u in device metadata overrides "
- "table chunk size of %u.",
+ DMWARN("chunk size %u in device metadata overrides table chunk size of %u.",
chunk_size, ps->store->chunk_size);
/* We had a bogus chunk_size. Fix stuff up. */
@@ -515,15 +518,18 @@ static int read_exceptions(struct pstore *ps,
if (unlikely(prefetch_area < ps->current_area))
prefetch_area = ps->current_area;
- if (DM_PREFETCH_CHUNKS) do {
- chunk_t pf_chunk = area_location(ps, prefetch_area);
- if (unlikely(pf_chunk >= dm_bufio_get_device_size(client)))
- break;
- dm_bufio_prefetch(client, pf_chunk, 1);
- prefetch_area++;
- if (unlikely(!prefetch_area))
- break;
- } while (prefetch_area <= ps->current_area + DM_PREFETCH_CHUNKS);
+ if (DM_PREFETCH_CHUNKS) {
+ do {
+ chunk_t pf_chunk = area_location(ps, prefetch_area);
+
+ if (unlikely(pf_chunk >= dm_bufio_get_device_size(client)))
+ break;
+ dm_bufio_prefetch(client, pf_chunk, 1);
+ prefetch_area++;
+ if (unlikely(!prefetch_area))
+ break;
+ } while (prefetch_area <= ps->current_area + DM_PREFETCH_CHUNKS);
+ }
chunk = area_location(ps, ps->current_area);
@@ -690,7 +696,7 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
static void persistent_commit_exception(struct dm_exception_store *store,
struct dm_exception *e, int valid,
- void (*callback) (void *, int success),
+ void (*callback)(void *, int success),
void *callback_context)
{
unsigned int i;
@@ -874,6 +880,7 @@ static int persistent_ctr(struct dm_exception_store *store, char *options)
if (options) {
char overflow = toupper(options[0]);
+
if (overflow == 'O')
store->userspace_supports_overflow = true;
else {
@@ -895,11 +902,11 @@ err_workqueue:
return r;
}
-static unsigned persistent_status(struct dm_exception_store *store,
+static unsigned int persistent_status(struct dm_exception_store *store,
status_type_t status, char *result,
- unsigned maxlen)
+ unsigned int maxlen)
{
- unsigned sz = 0;
+ unsigned int sz = 0;
switch (status) {
case STATUSTYPE_INFO:
@@ -958,8 +965,7 @@ int dm_persistent_snapshot_init(void)
r = dm_exception_store_type_register(&_persistent_compat_type);
if (r) {
- DMERR("Unable to register old-style persistent exception "
- "store type");
+ DMERR("Unable to register old-style persistent exception store type");
dm_exception_store_type_unregister(&_persistent_type);
return r;
}
diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c
index 0e0ae4c36b37..1e07a745bedd 100644
--- a/drivers/md/dm-snap-transient.c
+++ b/drivers/md/dm-snap-transient.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2001-2002 Sistina Software (UK) Limited.
* Copyright (C) 2006-2008 Red Hat GmbH
@@ -16,9 +17,11 @@
#define DM_MSG_PREFIX "transient snapshot"
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Implementation of the store for non-persistent snapshots.
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
struct transient_c {
sector_t next_free;
};
@@ -53,7 +56,7 @@ static int transient_prepare_exception(struct dm_exception_store *store,
static void transient_commit_exception(struct dm_exception_store *store,
struct dm_exception *e, int valid,
- void (*callback) (void *, int success),
+ void (*callback)(void *, int success),
void *callback_context)
{
/* Just succeed */
@@ -84,11 +87,11 @@ static int transient_ctr(struct dm_exception_store *store, char *options)
return 0;
}
-static unsigned transient_status(struct dm_exception_store *store,
+static unsigned int transient_status(struct dm_exception_store *store,
status_type_t status, char *result,
- unsigned maxlen)
+ unsigned int maxlen)
{
- unsigned sz = 0;
+ unsigned int sz = 0;
switch (status) {
case STATUSTYPE_INFO:
@@ -140,8 +143,7 @@ int dm_transient_snapshot_init(void)
r = dm_exception_store_type_register(&_transient_compat_type);
if (r) {
- DMWARN("Unable to register old-style transient "
- "exception store type");
+ DMWARN("Unable to register old-style transient exception store type");
dm_exception_store_type_unregister(&_transient_type);
return r;
}
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index d1c2f84d27e3..f766c21408f1 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2001-2002 Sistina Software (UK) Limited.
*
@@ -41,7 +42,7 @@ static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
struct dm_exception_table {
uint32_t hash_mask;
- unsigned hash_shift;
+ unsigned int hash_shift;
struct hlist_bl_head *table;
};
@@ -106,7 +107,7 @@ struct dm_snapshot {
/* The on disk metadata handler */
struct dm_exception_store *store;
- unsigned in_progress;
+ unsigned int in_progress;
struct wait_queue_head in_progress_wait;
struct dm_kcopyd_client *kcopyd_client;
@@ -122,11 +123,11 @@ struct dm_snapshot {
* The merge operation failed if this flag is set.
* Failure modes are handled as follows:
* - I/O error reading the header
- * => don't load the target; abort.
+ * => don't load the target; abort.
* - Header does not have "valid" flag set
- * => use the origin; forget about the snapshot.
+ * => use the origin; forget about the snapshot.
* - I/O error when reading exceptions
- * => don't load the target; abort.
+ * => don't load the target; abort.
* (We can't use the intermediate origin state.)
* - I/O error while merging
* => stop merging; set merge_failed; process I/O normally.
@@ -161,7 +162,7 @@ struct dm_snapshot {
*/
#define DEFAULT_COW_THRESHOLD 2048
-static unsigned cow_threshold = DEFAULT_COW_THRESHOLD;
+static unsigned int cow_threshold = DEFAULT_COW_THRESHOLD;
module_param_named(snapshot_cow_threshold, cow_threshold, uint, 0644);
MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write");
@@ -244,12 +245,14 @@ struct dm_snap_tracked_chunk {
static void init_tracked_chunk(struct bio *bio)
{
struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
+
INIT_HLIST_NODE(&c->node);
}
static bool is_bio_tracked(struct bio *bio)
{
struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
+
return !hlist_unhashed(&c->node);
}
@@ -297,12 +300,12 @@ static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
/*
* This conflicting I/O is extremely improbable in the caller,
- * so msleep(1) is sufficient and there is no need for a wait queue.
+ * so fsleep(1000) is sufficient and there is no need for a wait queue.
*/
static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
{
while (__chunk_is_tracked(s, chunk))
- msleep(1);
+ fsleep(1000);
}
/*
@@ -324,7 +327,7 @@ struct origin {
struct dm_origin {
struct dm_dev *dev;
struct dm_target *ti;
- unsigned split_boundary;
+ unsigned int split_boundary;
struct list_head hash_list;
};
@@ -377,7 +380,7 @@ static void exit_origin_hash(void)
kfree(_dm_origins);
}
-static unsigned origin_hash(struct block_device *bdev)
+static unsigned int origin_hash(struct block_device *bdev)
{
return bdev->bd_dev & ORIGIN_MASK;
}
@@ -388,7 +391,7 @@ static struct origin *__lookup_origin(struct block_device *origin)
struct origin *o;
ol = &_origins[origin_hash(origin)];
- list_for_each_entry (o, ol, hash_list)
+ list_for_each_entry(o, ol, hash_list)
if (bdev_equal(o->bdev, origin))
return o;
@@ -398,6 +401,7 @@ static struct origin *__lookup_origin(struct block_device *origin)
static void __insert_origin(struct origin *o)
{
struct list_head *sl = &_origins[origin_hash(o->bdev)];
+
list_add_tail(&o->hash_list, sl);
}
@@ -407,7 +411,7 @@ static struct dm_origin *__lookup_dm_origin(struct block_device *origin)
struct dm_origin *o;
ol = &_dm_origins[origin_hash(origin)];
- list_for_each_entry (o, ol, hash_list)
+ list_for_each_entry(o, ol, hash_list)
if (bdev_equal(o->dev->bdev, origin))
return o;
@@ -417,6 +421,7 @@ static struct dm_origin *__lookup_dm_origin(struct block_device *origin)
static void __insert_dm_origin(struct dm_origin *o)
{
struct list_head *sl = &_dm_origins[origin_hash(o->dev->bdev)];
+
list_add_tail(&o->hash_list, sl);
}
@@ -490,8 +495,7 @@ static int __validate_exception_handover(struct dm_snapshot *snap)
if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest,
&snap_merge) == 2) ||
snap_dest) {
- snap->ti->error = "Snapshot cow pairing for exception "
- "table handover failed";
+ snap->ti->error = "Snapshot cow pairing for exception table handover failed";
return -EINVAL;
}
@@ -518,8 +522,7 @@ static int __validate_exception_handover(struct dm_snapshot *snap)
if (!snap_src->store->type->prepare_merge ||
!snap_src->store->type->commit_merge) {
- snap->ti->error = "Snapshot exception store does not "
- "support snapshot-merge.";
+ snap->ti->error = "Snapshot exception store does not support snapshot-merge.";
return -EINVAL;
}
@@ -652,7 +655,7 @@ static void dm_exception_table_unlock(struct dm_exception_table_lock *lock)
}
static int dm_exception_table_init(struct dm_exception_table *et,
- uint32_t size, unsigned hash_shift)
+ uint32_t size, unsigned int hash_shift)
{
unsigned int i;
@@ -850,7 +853,7 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new)
static uint32_t __minimum_chunk_size(struct origin *o)
{
struct dm_snapshot *snap;
- unsigned chunk_size = rounddown_pow_of_two(UINT_MAX);
+ unsigned int chunk_size = rounddown_pow_of_two(UINT_MAX);
if (o)
list_for_each_entry(snap, &o->snapshots, list)
@@ -867,6 +870,7 @@ static int calc_max_buckets(void)
{
/* use a fixed size of 2MB */
unsigned long mem = 2 * 1024 * 1024;
+
mem /= sizeof(struct hlist_bl_head);
return mem;
@@ -937,8 +941,7 @@ static int __remove_single_exception_chunk(struct dm_snapshot *s,
e = dm_lookup_exception(&s->complete, old_chunk);
if (!e) {
- DMERR("Corruption detected: exception for block %llu is "
- "on disk but not in memory",
+ DMERR("Corruption detected: exception for block %llu is on disk but not in memory",
(unsigned long long)old_chunk);
return -EINVAL;
}
@@ -965,8 +968,7 @@ static int __remove_single_exception_chunk(struct dm_snapshot *s,
e->new_chunk++;
} else if (old_chunk != e->old_chunk +
dm_consecutive_chunk_count(e)) {
- DMERR("Attempt to merge block %llu from the "
- "middle of a chunk range [%llu - %llu]",
+ DMERR("Attempt to merge block %llu from the middle of a chunk range [%llu - %llu]",
(unsigned long long)old_chunk,
(unsigned long long)e->old_chunk,
(unsigned long long)
@@ -1010,7 +1012,7 @@ out:
}
static int origin_write_extent(struct dm_snapshot *merging_snap,
- sector_t sector, unsigned chunk_size);
+ sector_t sector, unsigned int chunk_size);
static void merge_callback(int read_err, unsigned long write_err,
void *context);
@@ -1059,8 +1061,7 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
&new_chunk);
if (linear_chunks <= 0) {
if (linear_chunks < 0) {
- DMERR("Read error in exception store: "
- "shutting down merge");
+ DMERR("Read error in exception store: shutting down merge");
down_write(&s->lock);
s->merge_failed = true;
up_write(&s->lock);
@@ -1183,7 +1184,7 @@ static int parse_snapshot_features(struct dm_arg_set *as, struct dm_snapshot *s,
struct dm_target *ti)
{
int r;
- unsigned argc;
+ unsigned int argc;
const char *arg_name;
static const struct dm_arg _args[] = {
@@ -1241,7 +1242,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
int r = -EINVAL;
char *origin_path, *cow_path;
dev_t origin_dev, cow_dev;
- unsigned args_used, num_flush_bios = 1;
+ unsigned int args_used, num_flush_bios = 1;
fmode_t origin_mode = FMODE_READ;
if (argc < 4) {
@@ -1493,7 +1494,7 @@ static void snapshot_dtr(struct dm_target *ti)
unregister_snapshot(s);
while (atomic_read(&s->pending_exceptions_count))
- msleep(1);
+ fsleep(1000);
/*
* Ensure instructions in mempool_exit aren't reordered
* before atomic_read.
@@ -1551,6 +1552,7 @@ static bool wait_for_in_progress(struct dm_snapshot *s, bool unlock_origins)
* throttling is unlikely to negatively impact performance.
*/
DECLARE_WAITQUEUE(wait, current);
+
__add_wait_queue(&s->in_progress_wait, &wait);
__set_current_state(TASK_UNINTERRUPTIBLE);
spin_unlock(&s->in_progress_wait.lock);
@@ -2208,12 +2210,10 @@ static int snapshot_preresume(struct dm_target *ti)
if (snap_src && snap_dest) {
down_read(&snap_src->lock);
if (s == snap_src) {
- DMERR("Unable to resume snapshot source until "
- "handover completes.");
+ DMERR("Unable to resume snapshot source until handover completes.");
r = -EINVAL;
} else if (!dm_suspended(snap_src->ti)) {
- DMERR("Unable to perform snapshot handover until "
- "source is suspended.");
+ DMERR("Unable to perform snapshot handover until source is suspended.");
r = -EINVAL;
}
up_read(&snap_src->lock);
@@ -2315,11 +2315,11 @@ static void snapshot_merge_resume(struct dm_target *ti)
}
static void snapshot_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
- unsigned sz = 0;
+ unsigned int sz = 0;
struct dm_snapshot *snap = ti->private;
- unsigned num_features;
+ unsigned int num_features;
switch (type) {
case STATUSTYPE_INFO:
@@ -2344,8 +2344,7 @@ static void snapshot_status(struct dm_target *ti, status_type_t type,
(unsigned long long)sectors_allocated,
(unsigned long long)total_sectors,
(unsigned long long)metadata_sectors);
- }
- else
+ } else
DMEMIT("Unknown");
}
@@ -2419,10 +2418,11 @@ static void snapshot_io_hints(struct dm_target *ti, struct queue_limits *limits)
}
}
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Origin methods
- *---------------------------------------------------------------*/
-
+ *---------------------------------------------------------------
+ */
/*
* If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
* supplied bio was ignored. The caller may submit it immediately.
@@ -2446,7 +2446,7 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
chunk_t chunk;
/* Do all the snapshots on this origin */
- list_for_each_entry (snap, snapshots, list) {
+ list_for_each_entry(snap, snapshots, list) {
/*
* Don't make new exceptions in a merging snapshot
* because it has effectively been deleted
@@ -2566,6 +2566,7 @@ again:
if (o) {
if (limit) {
struct dm_snapshot *s;
+
list_for_each_entry(s, &o->snapshots, list)
if (unlikely(!wait_for_in_progress(s, true)))
goto again;
@@ -2592,7 +2593,7 @@ again:
* size must be a multiple of merging_snap's chunk_size.
*/
static int origin_write_extent(struct dm_snapshot *merging_snap,
- sector_t sector, unsigned size)
+ sector_t sector, unsigned int size)
{
int must_wait = 0;
sector_t n;
@@ -2668,7 +2669,7 @@ static void origin_dtr(struct dm_target *ti)
static int origin_map(struct dm_target *ti, struct bio *bio)
{
struct dm_origin *o = ti->private;
- unsigned available_sectors;
+ unsigned int available_sectors;
bio_set_dev(bio, o->dev->bdev);
@@ -2679,7 +2680,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
available_sectors = o->split_boundary -
- ((unsigned)bio->bi_iter.bi_sector & (o->split_boundary - 1));
+ ((unsigned int)bio->bi_iter.bi_sector & (o->split_boundary - 1));
if (bio_sectors(bio) > available_sectors)
dm_accept_partial_bio(bio, available_sectors);
@@ -2713,7 +2714,7 @@ static void origin_postsuspend(struct dm_target *ti)
}
static void origin_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
struct dm_origin *o = ti->private;
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index f105a71915ab..c21a19ab73f7 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
#include <linux/errno.h>
#include <linux/numa.h>
#include <linux/slab.h>
@@ -42,12 +42,12 @@ struct dm_stat_shared {
struct dm_stat {
struct list_head list_entry;
int id;
- unsigned stat_flags;
+ unsigned int stat_flags;
size_t n_entries;
sector_t start;
sector_t end;
sector_t step;
- unsigned n_histogram_entries;
+ unsigned int n_histogram_entries;
unsigned long long *histogram_boundaries;
const char *program_id;
const char *aux_data;
@@ -63,7 +63,7 @@ struct dm_stat {
struct dm_stats_last_position {
sector_t last_sector;
- unsigned last_rw;
+ unsigned int last_rw;
};
/*
@@ -250,8 +250,8 @@ static void dm_stats_recalc_precise_timestamps(struct dm_stats *stats)
}
static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
- sector_t step, unsigned stat_flags,
- unsigned n_histogram_entries,
+ sector_t step, unsigned int stat_flags,
+ unsigned int n_histogram_entries,
unsigned long long *histogram_boundaries,
const char *program_id, const char *aux_data,
void (*suspend_callback)(struct mapped_device *),
@@ -336,6 +336,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
if (s->n_histogram_entries) {
unsigned long long *hi;
+
hi = dm_kvzalloc(s->histogram_alloc_size, NUMA_NO_NODE);
if (!hi) {
r = -ENOMEM;
@@ -357,6 +358,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
s->stat_percpu[cpu] = p;
if (s->n_histogram_entries) {
unsigned long long *hi;
+
hi = dm_kvzalloc(s->histogram_alloc_size, cpu_to_node(cpu));
if (!hi) {
r = -ENOMEM;
@@ -470,11 +472,11 @@ do_sync_free:
}
static int dm_stats_list(struct dm_stats *stats, const char *program,
- char *result, unsigned maxlen)
+ char *result, unsigned int maxlen)
{
struct dm_stat *s;
sector_t len;
- unsigned sz = 0;
+ unsigned int sz = 0;
/*
* Output format:
@@ -494,7 +496,8 @@ static int dm_stats_list(struct dm_stats *stats, const char *program,
if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
DMEMIT(" precise_timestamps");
if (s->n_histogram_entries) {
- unsigned i;
+ unsigned int i;
+
DMEMIT(" histogram:");
for (i = 0; i < s->n_histogram_entries; i++) {
if (i)
@@ -518,7 +521,7 @@ static void dm_stat_round(struct dm_stat *s, struct dm_stat_shared *shared,
* This is racy, but so is part_round_stats_single.
*/
unsigned long long now, difference;
- unsigned in_flight_read, in_flight_write;
+ unsigned int in_flight_read, in_flight_write;
if (likely(!(s->stat_flags & STAT_PRECISE_TIMESTAMPS)))
now = jiffies;
@@ -529,8 +532,8 @@ static void dm_stat_round(struct dm_stat *s, struct dm_stat_shared *shared,
if (!difference)
return;
- in_flight_read = (unsigned)atomic_read(&shared->in_flight[READ]);
- in_flight_write = (unsigned)atomic_read(&shared->in_flight[WRITE]);
+ in_flight_read = (unsigned int)atomic_read(&shared->in_flight[READ]);
+ in_flight_write = (unsigned int)atomic_read(&shared->in_flight[WRITE]);
if (in_flight_read)
p->io_ticks[READ] += difference;
if (in_flight_write)
@@ -567,6 +570,7 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
*/
#if BITS_PER_LONG == 32
unsigned long flags;
+
local_irq_save(flags);
#else
preempt_disable();
@@ -578,6 +582,7 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
atomic_inc(&shared->in_flight[idx]);
} else {
unsigned long long duration;
+
dm_stat_round(s, shared, p);
atomic_dec(&shared->in_flight[idx]);
p->sectors[idx] += len;
@@ -591,15 +596,15 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
duration = stats_aux->duration_ns;
}
if (s->n_histogram_entries) {
- unsigned lo = 0, hi = s->n_histogram_entries + 1;
+ unsigned int lo = 0, hi = s->n_histogram_entries + 1;
+
while (lo + 1 < hi) {
- unsigned mid = (lo + hi) / 2;
- if (s->histogram_boundaries[mid - 1] > duration) {
+ unsigned int mid = (lo + hi) / 2;
+
+ if (s->histogram_boundaries[mid - 1] > duration)
hi = mid;
- } else {
+ else
lo = mid;
- }
-
}
p->histogram[lo]++;
}
@@ -651,7 +656,7 @@ static void __dm_stat_bio(struct dm_stat *s, int bi_rw,
}
void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
- sector_t bi_sector, unsigned bi_sectors, bool end,
+ sector_t bi_sector, unsigned int bi_sectors, bool end,
unsigned long start_time,
struct dm_stats_aux *stats_aux)
{
@@ -740,7 +745,8 @@ static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared
shared->tmp.io_ticks_total += READ_ONCE(p->io_ticks_total);
shared->tmp.time_in_queue += READ_ONCE(p->time_in_queue);
if (s->n_histogram_entries) {
- unsigned i;
+ unsigned int i;
+
for (i = 0; i < s->n_histogram_entries + 1; i++)
shared->tmp.histogram[i] += READ_ONCE(p->histogram[i]);
}
@@ -774,7 +780,8 @@ static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end,
p->time_in_queue -= shared->tmp.time_in_queue;
local_irq_enable();
if (s->n_histogram_entries) {
- unsigned i;
+ unsigned int i;
+
for (i = 0; i < s->n_histogram_entries + 1; i++) {
local_irq_disable();
p = &s->stat_percpu[smp_processor_id()][x];
@@ -811,7 +818,7 @@ static int dm_stats_clear(struct dm_stats *stats, int id)
static unsigned long long dm_jiffies_to_msec64(struct dm_stat *s, unsigned long long j)
{
unsigned long long result;
- unsigned mult;
+ unsigned int mult;
if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
return j;
@@ -831,9 +838,9 @@ static unsigned long long dm_jiffies_to_msec64(struct dm_stat *s, unsigned long
static int dm_stats_print(struct dm_stats *stats, int id,
size_t idx_start, size_t idx_len,
- bool clear, char *result, unsigned maxlen)
+ bool clear, char *result, unsigned int maxlen)
{
- unsigned sz = 0;
+ unsigned int sz = 0;
struct dm_stat *s;
size_t x;
sector_t start, end, step;
@@ -889,10 +896,10 @@ static int dm_stats_print(struct dm_stats *stats, int id,
dm_jiffies_to_msec64(s, shared->tmp.io_ticks[READ]),
dm_jiffies_to_msec64(s, shared->tmp.io_ticks[WRITE]));
if (s->n_histogram_entries) {
- unsigned i;
- for (i = 0; i < s->n_histogram_entries + 1; i++) {
+ unsigned int i;
+
+ for (i = 0; i < s->n_histogram_entries + 1; i++)
DMEMIT("%s%llu", !i ? " " : ":", shared->tmp.histogram[i]);
- }
}
DMEMIT("\n");
@@ -938,11 +945,11 @@ static int dm_stats_set_aux(struct dm_stats *stats, int id, const char *aux_data
return 0;
}
-static int parse_histogram(const char *h, unsigned *n_histogram_entries,
+static int parse_histogram(const char *h, unsigned int *n_histogram_entries,
unsigned long long **histogram_boundaries)
{
const char *q;
- unsigned n;
+ unsigned int n;
unsigned long long last;
*n_histogram_entries = 1;
@@ -962,6 +969,7 @@ static int parse_histogram(const char *h, unsigned *n_histogram_entries,
unsigned long long hi;
int s;
char ch;
+
s = sscanf(h, "%llu%c", &hi, &ch);
if (!s || (s == 2 && ch != ','))
return -EINVAL;
@@ -977,23 +985,21 @@ static int parse_histogram(const char *h, unsigned *n_histogram_entries,
}
static int message_stats_create(struct mapped_device *md,
- unsigned argc, char **argv,
- char *result, unsigned maxlen)
+ unsigned int argc, char **argv,
+ char *result, unsigned int maxlen)
{
int r;
int id;
char dummy;
unsigned long long start, end, len, step;
- unsigned divisor;
+ unsigned int divisor;
const char *program_id, *aux_data;
- unsigned stat_flags = 0;
-
- unsigned n_histogram_entries = 0;
+ unsigned int stat_flags = 0;
+ unsigned int n_histogram_entries = 0;
unsigned long long *histogram_boundaries = NULL;
-
struct dm_arg_set as, as_backup;
const char *a;
- unsigned feature_args;
+ unsigned int feature_args;
/*
* Input format:
@@ -1046,7 +1052,8 @@ static int message_stats_create(struct mapped_device *md,
else if (!strncasecmp(a, "histogram:", 10)) {
if (n_histogram_entries)
goto ret_einval;
- if ((r = parse_histogram(a + 10, &n_histogram_entries, &histogram_boundaries)))
+ r = parse_histogram(a + 10, &n_histogram_entries, &histogram_boundaries);
+ if (r)
goto ret;
} else
goto ret_einval;
@@ -1102,7 +1109,7 @@ ret:
}
static int message_stats_delete(struct mapped_device *md,
- unsigned argc, char **argv)
+ unsigned int argc, char **argv)
{
int id;
char dummy;
@@ -1117,7 +1124,7 @@ static int message_stats_delete(struct mapped_device *md,
}
static int message_stats_clear(struct mapped_device *md,
- unsigned argc, char **argv)
+ unsigned int argc, char **argv)
{
int id;
char dummy;
@@ -1132,8 +1139,8 @@ static int message_stats_clear(struct mapped_device *md,
}
static int message_stats_list(struct mapped_device *md,
- unsigned argc, char **argv,
- char *result, unsigned maxlen)
+ unsigned int argc, char **argv,
+ char *result, unsigned int maxlen)
{
int r;
const char *program = NULL;
@@ -1155,8 +1162,8 @@ static int message_stats_list(struct mapped_device *md,
}
static int message_stats_print(struct mapped_device *md,
- unsigned argc, char **argv, bool clear,
- char *result, unsigned maxlen)
+ unsigned int argc, char **argv, bool clear,
+ char *result, unsigned int maxlen)
{
int id;
char dummy;
@@ -1182,7 +1189,7 @@ static int message_stats_print(struct mapped_device *md,
}
static int message_stats_set_aux(struct mapped_device *md,
- unsigned argc, char **argv)
+ unsigned int argc, char **argv)
{
int id;
char dummy;
@@ -1196,8 +1203,8 @@ static int message_stats_set_aux(struct mapped_device *md,
return dm_stats_set_aux(dm_get_stats(md), id, argv[2]);
}
-int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
- char *result, unsigned maxlen)
+int dm_stats_message(struct mapped_device *md, unsigned int argc, char **argv,
+ char *result, unsigned int maxlen)
{
int r;
@@ -1240,5 +1247,5 @@ void dm_statistics_exit(void)
DMCRIT("shared_memory_amount leaked: %lu", shared_memory_amount);
}
-module_param_named(stats_current_allocated_bytes, shared_memory_amount, ulong, S_IRUGO);
+module_param_named(stats_current_allocated_bytes, shared_memory_amount, ulong, 0444);
MODULE_PARM_DESC(stats_current_allocated_bytes, "Memory currently used by statistics");
diff --git a/drivers/md/dm-stats.h b/drivers/md/dm-stats.h
index 09c81a1ec057..0bc152c8e4f3 100644
--- a/drivers/md/dm-stats.h
+++ b/drivers/md/dm-stats.h
@@ -26,11 +26,11 @@ void dm_stats_cleanup(struct dm_stats *st);
struct mapped_device;
-int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
- char *result, unsigned maxlen);
+int dm_stats_message(struct mapped_device *md, unsigned int argc, char **argv,
+ char *result, unsigned int maxlen);
void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
- sector_t bi_sector, unsigned bi_sectors, bool end,
+ sector_t bi_sector, unsigned int bi_sectors, bool end,
unsigned long start_time,
struct dm_stats_aux *aux);
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index baa085cc67bd..8d6951157106 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2001-2003 Sistina Software (UK) Limited.
*
@@ -15,6 +16,8 @@
#include <linux/slab.h>
#include <linux/log2.h>
+static struct workqueue_struct *dm_stripe_wq;
+
#define DM_MSG_PREFIX "striped"
#define DM_IO_ERROR_THRESHOLD 15
@@ -108,15 +111,13 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
width = ti->len;
if (sector_div(width, stripes)) {
- ti->error = "Target length not divisible by "
- "number of stripes";
+ ti->error = "Target length not divisible by number of stripes";
return -EINVAL;
}
tmp_len = width;
if (sector_div(tmp_len, chunk_size)) {
- ti->error = "Target length not divisible by "
- "chunk size";
+ ti->error = "Target length not divisible by chunk size";
return -EINVAL;
}
@@ -124,15 +125,13 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
* Do we have enough arguments for that many stripes ?
*/
if (argc != (2 + 2 * stripes)) {
- ti->error = "Not enough destinations "
- "specified";
+ ti->error = "Not enough destinations specified";
return -EINVAL;
}
sc = kmalloc(struct_size(sc, stripe, stripes), GFP_KERNEL);
if (!sc) {
- ti->error = "Memory allocation for striped context "
- "failed";
+ ti->error = "Memory allocation for striped context failed";
return -ENOMEM;
}
@@ -262,18 +261,18 @@ static int stripe_map_range(struct stripe_c *sc, struct bio *bio,
sc->stripe[target_stripe].physical_start;
bio->bi_iter.bi_size = to_bytes(end - begin);
return DM_MAPIO_REMAPPED;
- } else {
- /* The range doesn't map to the target stripe */
- bio_endio(bio);
- return DM_MAPIO_SUBMITTED;
}
+
+ /* The range doesn't map to the target stripe */
+ bio_endio(bio);
+ return DM_MAPIO_SUBMITTED;
}
static int stripe_map(struct dm_target *ti, struct bio *bio)
{
struct stripe_c *sc = ti->private;
uint32_t stripe;
- unsigned target_bio_nr;
+ unsigned int target_bio_nr;
if (bio->bi_opf & REQ_PREFLUSH) {
target_bio_nr = dm_bio_get_target_bio_nr(bio);
@@ -359,7 +358,7 @@ static size_t stripe_dax_recovery_write(struct dm_target *ti, pgoff_t pgoff,
*/
static void stripe_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
struct stripe_c *sc = (struct stripe_c *) ti->private;
unsigned int sz = 0;
@@ -368,14 +367,12 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
switch (type) {
case STATUSTYPE_INFO:
DMEMIT("%d ", sc->stripes);
- for (i = 0; i < sc->stripes; i++) {
+ for (i = 0; i < sc->stripes; i++)
DMEMIT("%s ", sc->stripe[i].dev->name);
- }
+
DMEMIT("1 ");
- for (i = 0; i < sc->stripes; i++) {
- DMEMIT("%c", atomic_read(&(sc->stripe[i].error_count)) ?
- 'D' : 'A');
- }
+ for (i = 0; i < sc->stripes; i++)
+ DMEMIT("%c", atomic_read(&(sc->stripe[i].error_count)) ? 'D' : 'A');
break;
case STATUSTYPE_TABLE:
@@ -406,7 +403,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
static int stripe_end_io(struct dm_target *ti, struct bio *bio,
blk_status_t *error)
{
- unsigned i;
+ unsigned int i;
char major_minor[16];
struct stripe_c *sc = ti->private;
@@ -433,7 +430,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
atomic_inc(&(sc->stripe[i].error_count));
if (atomic_read(&(sc->stripe[i].error_count)) <
DM_IO_ERROR_THRESHOLD)
- schedule_work(&sc->trigger_event);
+ queue_work(dm_stripe_wq, &sc->trigger_event);
}
return DM_ENDIO_DONE;
@@ -444,7 +441,7 @@ static int stripe_iterate_devices(struct dm_target *ti,
{
struct stripe_c *sc = ti->private;
int ret = 0;
- unsigned i = 0;
+ unsigned int i = 0;
do {
ret = fn(ti, sc->stripe[i].dev,
@@ -459,7 +456,7 @@ static void stripe_io_hints(struct dm_target *ti,
struct queue_limits *limits)
{
struct stripe_c *sc = ti->private;
- unsigned chunk_size = sc->chunk_size << SECTOR_SHIFT;
+ unsigned int chunk_size = sc->chunk_size << SECTOR_SHIFT;
blk_limits_io_min(limits, chunk_size);
blk_limits_io_opt(limits, chunk_size * sc->stripes);
@@ -486,9 +483,14 @@ int __init dm_stripe_init(void)
{
int r;
+ dm_stripe_wq = alloc_workqueue("dm_stripe_wq", 0, 0);
+ if (!dm_stripe_wq)
+ return -ENOMEM;
r = dm_register_target(&stripe_target);
- if (r < 0)
+ if (r < 0) {
+ destroy_workqueue(dm_stripe_wq);
DMWARN("target registration failed");
+ }
return r;
}
@@ -496,4 +498,5 @@ int __init dm_stripe_init(void)
void dm_stripe_exit(void)
{
dm_unregister_target(&stripe_target);
+ destroy_workqueue(dm_stripe_wq);
}
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c
index 534dc2ca8bb0..ee2432927e90 100644
--- a/drivers/md/dm-switch.c
+++ b/drivers/md/dm-switch.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2010-2012 by Dell Inc. All rights reserved.
* Copyright (C) 2011-2013 Red Hat, Inc.
@@ -38,9 +39,9 @@ struct switch_path {
struct switch_ctx {
struct dm_target *ti;
- unsigned nr_paths; /* Number of paths in path_list. */
+ unsigned int nr_paths; /* Number of paths in path_list. */
- unsigned region_size; /* Region size in 512-byte sectors */
+ unsigned int region_size; /* Region size in 512-byte sectors */
unsigned long nr_regions; /* Number of regions making up the device */
signed char region_size_bits; /* log2 of region_size or -1 */
@@ -56,8 +57,8 @@ struct switch_ctx {
struct switch_path path_list[];
};
-static struct switch_ctx *alloc_switch_ctx(struct dm_target *ti, unsigned nr_paths,
- unsigned region_size)
+static struct switch_ctx *alloc_switch_ctx(struct dm_target *ti, unsigned int nr_paths,
+ unsigned int region_size)
{
struct switch_ctx *sctx;
@@ -73,7 +74,7 @@ static struct switch_ctx *alloc_switch_ctx(struct dm_target *ti, unsigned nr_pat
return sctx;
}
-static int alloc_region_table(struct dm_target *ti, unsigned nr_paths)
+static int alloc_region_table(struct dm_target *ti, unsigned int nr_paths)
{
struct switch_ctx *sctx = ti->private;
sector_t nr_regions = ti->len;
@@ -124,7 +125,7 @@ static int alloc_region_table(struct dm_target *ti, unsigned nr_paths)
}
static void switch_get_position(struct switch_ctx *sctx, unsigned long region_nr,
- unsigned long *region_index, unsigned *bit)
+ unsigned long *region_index, unsigned int *bit)
{
if (sctx->region_entries_per_slot_bits >= 0) {
*region_index = region_nr >> sctx->region_entries_per_slot_bits;
@@ -137,10 +138,10 @@ static void switch_get_position(struct switch_ctx *sctx, unsigned long region_nr
*bit *= sctx->region_table_entry_bits;
}
-static unsigned switch_region_table_read(struct switch_ctx *sctx, unsigned long region_nr)
+static unsigned int switch_region_table_read(struct switch_ctx *sctx, unsigned long region_nr)
{
unsigned long region_index;
- unsigned bit;
+ unsigned int bit;
switch_get_position(sctx, region_nr, &region_index, &bit);
@@ -151,9 +152,9 @@ static unsigned switch_region_table_read(struct switch_ctx *sctx, unsigned long
/*
* Find which path to use at given offset.
*/
-static unsigned switch_get_path_nr(struct switch_ctx *sctx, sector_t offset)
+static unsigned int switch_get_path_nr(struct switch_ctx *sctx, sector_t offset)
{
- unsigned path_nr;
+ unsigned int path_nr;
sector_t p;
p = offset;
@@ -172,10 +173,10 @@ static unsigned switch_get_path_nr(struct switch_ctx *sctx, sector_t offset)
}
static void switch_region_table_write(struct switch_ctx *sctx, unsigned long region_nr,
- unsigned value)
+ unsigned int value)
{
unsigned long region_index;
- unsigned bit;
+ unsigned int bit;
region_table_slot_t pte;
switch_get_position(sctx, region_nr, &region_index, &bit);
@@ -191,7 +192,7 @@ static void switch_region_table_write(struct switch_ctx *sctx, unsigned long reg
*/
static void initialise_region_table(struct switch_ctx *sctx)
{
- unsigned path_nr = 0;
+ unsigned int path_nr = 0;
unsigned long region_nr;
for (region_nr = 0; region_nr < sctx->nr_regions; region_nr++) {
@@ -249,7 +250,7 @@ static void switch_dtr(struct dm_target *ti)
* Optional args are to allow for future extension: currently this
* parameter must be 0.
*/
-static int switch_ctr(struct dm_target *ti, unsigned argc, char **argv)
+static int switch_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
static const struct dm_arg _args[] = {
{1, (KMALLOC_MAX_SIZE - sizeof(struct switch_ctx)) / sizeof(struct switch_path), "Invalid number of paths"},
@@ -259,7 +260,7 @@ static int switch_ctr(struct dm_target *ti, unsigned argc, char **argv)
struct switch_ctx *sctx;
struct dm_arg_set as;
- unsigned nr_paths, region_size, nr_optional_args;
+ unsigned int nr_paths, region_size, nr_optional_args;
int r;
as.argc = argc;
@@ -320,7 +321,7 @@ static int switch_map(struct dm_target *ti, struct bio *bio)
{
struct switch_ctx *sctx = ti->private;
sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector);
- unsigned path_nr = switch_get_path_nr(sctx, offset);
+ unsigned int path_nr = switch_get_path_nr(sctx, offset);
bio_set_dev(bio, sctx->path_list[path_nr].dmdev->bdev);
bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset;
@@ -371,9 +372,9 @@ static __always_inline unsigned long parse_hex(const char **string)
}
static int process_set_region_mappings(struct switch_ctx *sctx,
- unsigned argc, char **argv)
+ unsigned int argc, char **argv)
{
- unsigned i;
+ unsigned int i;
unsigned long region_index = 0;
for (i = 1; i < argc; i++) {
@@ -466,8 +467,8 @@ static int process_set_region_mappings(struct switch_ctx *sctx,
*
* Only set_region_mappings is supported.
*/
-static int switch_message(struct dm_target *ti, unsigned argc, char **argv,
- char *result, unsigned maxlen)
+static int switch_message(struct dm_target *ti, unsigned int argc, char **argv,
+ char *result, unsigned int maxlen)
{
static DEFINE_MUTEX(message_mutex);
@@ -487,10 +488,10 @@ static int switch_message(struct dm_target *ti, unsigned argc, char **argv,
}
static void switch_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
struct switch_ctx *sctx = ti->private;
- unsigned sz = 0;
+ unsigned int sz = 0;
int path_nr;
switch (type) {
@@ -519,7 +520,7 @@ static void switch_status(struct dm_target *ti, status_type_t type,
static int switch_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
{
struct switch_ctx *sctx = ti->private;
- unsigned path_nr;
+ unsigned int path_nr;
path_nr = switch_get_path_nr(sctx, 0);
diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
index e28c92478536..bfaef27ca79f 100644
--- a/drivers/md/dm-sysfs.c
+++ b/drivers/md/dm-sysfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2008 Red Hat, Inc. All rights reserved.
*
@@ -11,13 +12,13 @@
struct dm_sysfs_attr {
struct attribute attr;
- ssize_t (*show)(struct mapped_device *, char *);
- ssize_t (*store)(struct mapped_device *, const char *, size_t count);
+ ssize_t (*show)(struct mapped_device *md, char *p);
+ ssize_t (*store)(struct mapped_device *md, const char *p, size_t count);
};
#define DM_ATTR_RO(_name) \
struct dm_sysfs_attr dm_attr_##_name = \
- __ATTR(_name, S_IRUGO, dm_attr_##_name##_show, NULL)
+ __ATTR(_name, 0444, dm_attr_##_name##_show, NULL)
static ssize_t dm_attr_show(struct kobject *kobj, struct attribute *attr,
char *page)
@@ -42,7 +43,7 @@ static ssize_t dm_attr_show(struct kobject *kobj, struct attribute *attr,
#define DM_ATTR_RW(_name) \
struct dm_sysfs_attr dm_attr_##_name = \
- __ATTR(_name, S_IRUGO | S_IWUSR, dm_attr_##_name##_show, dm_attr_##_name##_store)
+ __ATTR(_name, 0644, dm_attr_##_name##_show, dm_attr_##_name##_store)
static ssize_t dm_attr_store(struct kobject *kobj, struct attribute *attr,
const char *page, size_t count)
@@ -119,7 +120,7 @@ static const struct sysfs_ops dm_sysfs_ops = {
.store = dm_attr_store,
};
-static struct kobj_type dm_ktype = {
+static const struct kobj_type dm_ktype = {
.sysfs_ops = &dm_sysfs_ops,
.default_groups = dm_groups,
.release = dm_kobject_release,
@@ -142,6 +143,7 @@ int dm_sysfs_init(struct mapped_device *md)
void dm_sysfs_exit(struct mapped_device *md)
{
struct kobject *kobj = dm_kobject(md);
+
kobject_put(kobj);
wait_for_completion(dm_get_completion_from_kobject(kobj));
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 8541d5688f3a..2055a758541d 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2001 Sistina Software (UK) Limited.
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
@@ -72,7 +73,7 @@ static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
n = get_child(n, CHILDREN_PER_NODE - 1);
if (n >= t->counts[l])
- return (sector_t) - 1;
+ return (sector_t) -1;
return get_node(t, l, n)[KEYS_PER_NODE - 1];
}
@@ -126,7 +127,7 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
}
int dm_table_create(struct dm_table **result, fmode_t mode,
- unsigned num_targets, struct mapped_device *md)
+ unsigned int num_targets, struct mapped_device *md)
{
struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
@@ -211,7 +212,7 @@ static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
{
struct dm_dev_internal *dd;
- list_for_each_entry (dd, l, list)
+ list_for_each_entry(dd, l, list)
if (dd->dm_dev->bdev->bd_dev == dev)
return dd;
@@ -234,8 +235,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
return 0;
if ((start >= dev_size) || (start + len > dev_size)) {
- DMERR("%s: %pg too small for target: "
- "start=%llu, len=%llu, dev_size=%llu",
+ DMERR("%s: %pg too small for target: start=%llu, len=%llu, dev_size=%llu",
dm_device_name(ti->table->md), bdev,
(unsigned long long)start,
(unsigned long long)len,
@@ -280,8 +280,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
return 0;
if (start & (logical_block_size_sectors - 1)) {
- DMERR("%s: start=%llu not aligned to h/w "
- "logical block size %u of %pg",
+ DMERR("%s: start=%llu not aligned to h/w logical block size %u of %pg",
dm_device_name(ti->table->md),
(unsigned long long)start,
limits->logical_block_size, bdev);
@@ -289,8 +288,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
}
if (len & (logical_block_size_sectors - 1)) {
- DMERR("%s: len=%llu not aligned to h/w "
- "logical block size %u of %pg",
+ DMERR("%s: len=%llu not aligned to h/w logical block size %u of %pg",
dm_device_name(ti->table->md),
(unsigned long long)len,
limits->logical_block_size, bdev);
@@ -364,6 +362,8 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
if (!dev)
return -ENODEV;
}
+ if (dev == disk_devt(t->md->disk))
+ return -EINVAL;
dd = find_device(&t->devices, dev);
if (!dd) {
@@ -371,7 +371,8 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
if (!dd)
return -ENOMEM;
- if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) {
+ r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev);
+ if (r) {
kfree(dd);
return r;
}
@@ -470,10 +471,10 @@ static int adjoin(struct dm_table *t, struct dm_target *ti)
* On the other hand, dm-switch needs to process bulk data using messages and
* excessive use of GFP_NOIO could cause trouble.
*/
-static char **realloc_argv(unsigned *size, char **old_argv)
+static char **realloc_argv(unsigned int *size, char **old_argv)
{
char **argv;
- unsigned new_size;
+ unsigned int new_size;
gfp_t gfp;
if (*size) {
@@ -499,7 +500,7 @@ static char **realloc_argv(unsigned *size, char **old_argv)
int dm_split_args(int *argc, char ***argvp, char *input)
{
char *start, *end = input, *out, **argv = NULL;
- unsigned array_size = 0;
+ unsigned int array_size = 0;
*argc = 0;
@@ -732,9 +733,8 @@ int dm_table_add_target(struct dm_table *t, const char *type,
/*
* Target argument parsing helpers.
*/
-static int validate_next_arg(const struct dm_arg *arg,
- struct dm_arg_set *arg_set,
- unsigned *value, char **error, unsigned grouped)
+static int validate_next_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
+ unsigned int *value, char **error, unsigned int grouped)
{
const char *arg_str = dm_shift_arg(arg_set);
char dummy;
@@ -752,14 +752,14 @@ static int validate_next_arg(const struct dm_arg *arg,
}
int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
- unsigned *value, char **error)
+ unsigned int *value, char **error)
{
return validate_next_arg(arg, arg_set, value, error, 0);
}
EXPORT_SYMBOL(dm_read_arg);
int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
- unsigned *value, char **error)
+ unsigned int *value, char **error)
{
return validate_next_arg(arg, arg_set, value, error, 1);
}
@@ -780,7 +780,7 @@ const char *dm_shift_arg(struct dm_arg_set *as)
}
EXPORT_SYMBOL(dm_shift_arg);
-void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
+void dm_consume_args(struct dm_arg_set *as, unsigned int num_args)
{
BUG_ON(as->argc < num_args);
as->argc -= num_args;
@@ -856,7 +856,7 @@ static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
static int dm_table_determine_type(struct dm_table *t)
{
- unsigned bio_based = 0, request_based = 0, hybrid = 0;
+ unsigned int bio_based = 0, request_based = 0, hybrid = 0;
struct dm_target *ti;
struct list_head *devices = dm_table_get_devices(t);
enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
@@ -881,8 +881,7 @@ static int dm_table_determine_type(struct dm_table *t)
bio_based = 1;
if (bio_based && request_based) {
- DMERR("Inconsistent table: different target types"
- " can't be mixed up");
+ DMERR("Inconsistent table: different target types can't be mixed up");
return -EINVAL;
}
}
@@ -1185,8 +1184,7 @@ static int dm_table_register_integrity(struct dm_table *t)
* profile the new profile should not conflict.
*/
if (blk_integrity_compare(dm_disk(md), template_disk) < 0) {
- DMERR("%s: conflict with existing integrity profile: "
- "%s profile mismatch",
+ DMERR("%s: conflict with existing integrity profile: %s profile mismatch",
dm_device_name(t->md),
template_disk->disk_name);
return 1;
@@ -1527,7 +1525,7 @@ static bool dm_table_any_dev_attr(struct dm_table *t,
if (ti->type->iterate_devices &&
ti->type->iterate_devices(ti, func, data))
return true;
- }
+ }
return false;
}
@@ -1535,7 +1533,7 @@ static bool dm_table_any_dev_attr(struct dm_table *t,
static int count_device(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
- unsigned *num_devices = data;
+ unsigned int *num_devices = data;
(*num_devices)++;
@@ -1565,7 +1563,7 @@ bool dm_table_has_no_data_devices(struct dm_table *t)
{
for (unsigned int i = 0; i < t->num_targets; i++) {
struct dm_target *ti = dm_table_get_target(t, i);
- unsigned num_devices = 0;
+ unsigned int num_devices = 0;
if (!ti->type->iterate_devices)
return false;
@@ -1708,8 +1706,7 @@ combine_limits:
* for the table.
*/
if (blk_stack_limits(limits, &ti_limits, 0) < 0)
- DMWARN("%s: adding target device "
- "(start sect %llu len %llu) "
+ DMWARN("%s: adding target device (start sect %llu len %llu) "
"caused an alignment inconsistency",
dm_device_name(t->md),
(unsigned long long) ti->begin,
@@ -1971,8 +1968,7 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
blk_queue_flag_set(QUEUE_FLAG_DAX, q);
if (dm_table_supports_dax(t, device_not_dax_synchronous_capable))
set_dax_synchronous(t->md->dax_dev);
- }
- else
+ } else
blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL))
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 8cd5184e62f0..26ea22b1a0d7 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2001 Sistina Software (UK) Limited
*
@@ -92,6 +93,7 @@ int dm_register_target(struct target_type *tt)
up_write(&_lock);
return rv;
}
+EXPORT_SYMBOL(dm_register_target);
void dm_unregister_target(struct target_type *tt)
{
@@ -105,6 +107,7 @@ void dm_unregister_target(struct target_type *tt)
up_write(&_lock);
}
+EXPORT_SYMBOL(dm_unregister_target);
/*
* io-err: always fails an io, useful for bringing
@@ -170,6 +173,3 @@ void dm_target_exit(void)
{
dm_unregister_target(&error_target);
}
-
-EXPORT_SYMBOL(dm_register_target);
-EXPORT_SYMBOL(dm_unregister_target);
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 6bcc4c4786d8..fd464fb024c3 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011-2012 Red Hat, Inc.
*
@@ -14,7 +15,8 @@
#include <linux/device-mapper.h>
#include <linux/workqueue.h>
-/*--------------------------------------------------------------------------
+/*
+ *--------------------------------------------------------------------------
* As far as the metadata goes, there is:
*
* - A superblock in block zero, taking up fewer than 512 bytes for
@@ -70,7 +72,8 @@
*
* All metadata io is in THIN_METADATA_BLOCK_SIZE sized/aligned chunks
* from the block manager.
- *--------------------------------------------------------------------------*/
+ *--------------------------------------------------------------------------
+ */
#define DM_MSG_PREFIX "thin metadata"
@@ -239,10 +242,11 @@ struct dm_thin_device {
uint32_t snapshotted_time;
};
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* superblock validator
- *--------------------------------------------------------------*/
-
+ *--------------------------------------------------------------
+ */
#define SUPERBLOCK_CSUM_XOR 160774
static void sb_prepare_for_write(struct dm_block_validator *v,
@@ -265,15 +269,15 @@ static int sb_check(struct dm_block_validator *v,
__le32 csum_le;
if (dm_block_location(b) != le64_to_cpu(disk_super->blocknr)) {
- DMERR("sb_check failed: blocknr %llu: "
- "wanted %llu", le64_to_cpu(disk_super->blocknr),
+ DMERR("%s failed: blocknr %llu: wanted %llu",
+ __func__, le64_to_cpu(disk_super->blocknr),
(unsigned long long)dm_block_location(b));
return -ENOTBLK;
}
if (le64_to_cpu(disk_super->magic) != THIN_SUPERBLOCK_MAGIC) {
- DMERR("sb_check failed: magic %llu: "
- "wanted %llu", le64_to_cpu(disk_super->magic),
+ DMERR("%s failed: magic %llu: wanted %llu",
+ __func__, le64_to_cpu(disk_super->magic),
(unsigned long long)THIN_SUPERBLOCK_MAGIC);
return -EILSEQ;
}
@@ -282,8 +286,8 @@ static int sb_check(struct dm_block_validator *v,
block_size - sizeof(__le32),
SUPERBLOCK_CSUM_XOR));
if (csum_le != disk_super->csum) {
- DMERR("sb_check failed: csum %u: wanted %u",
- le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum));
+ DMERR("%s failed: csum %u: wanted %u",
+ __func__, le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum));
return -EILSEQ;
}
@@ -296,10 +300,11 @@ static struct dm_block_validator sb_validator = {
.check = sb_check
};
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Methods for the btree value types
- *--------------------------------------------------------------*/
-
+ *--------------------------------------------------------------
+ */
static uint64_t pack_block_time(dm_block_t b, uint32_t t)
{
return (b << 24) | t;
@@ -318,12 +323,12 @@ static void unpack_block_time(uint64_t v, dm_block_t *b, uint32_t *t)
*/
typedef int (*run_fn)(struct dm_space_map *, dm_block_t, dm_block_t);
-static void with_runs(struct dm_space_map *sm, const __le64 *value_le, unsigned count, run_fn fn)
+static void with_runs(struct dm_space_map *sm, const __le64 *value_le, unsigned int count, run_fn fn)
{
uint64_t b, begin, end;
uint32_t t;
bool in_run = false;
- unsigned i;
+ unsigned int i;
for (i = 0; i < count; i++, value_le++) {
/* We know value_le is 8 byte aligned */
@@ -348,13 +353,13 @@ static void with_runs(struct dm_space_map *sm, const __le64 *value_le, unsigned
fn(sm, begin, end);
}
-static void data_block_inc(void *context, const void *value_le, unsigned count)
+static void data_block_inc(void *context, const void *value_le, unsigned int count)
{
with_runs((struct dm_space_map *) context,
(const __le64 *) value_le, count, dm_sm_inc_blocks);
}
-static void data_block_dec(void *context, const void *value_le, unsigned count)
+static void data_block_dec(void *context, const void *value_le, unsigned int count)
{
with_runs((struct dm_space_map *) context,
(const __le64 *) value_le, count, dm_sm_dec_blocks);
@@ -374,21 +379,21 @@ static int data_block_equal(void *context, const void *value1_le, const void *va
return b1 == b2;
}
-static void subtree_inc(void *context, const void *value, unsigned count)
+static void subtree_inc(void *context, const void *value, unsigned int count)
{
struct dm_btree_info *info = context;
const __le64 *root_le = value;
- unsigned i;
+ unsigned int i;
for (i = 0; i < count; i++, root_le++)
dm_tm_inc(info->tm, le64_to_cpu(*root_le));
}
-static void subtree_dec(void *context, const void *value, unsigned count)
+static void subtree_dec(void *context, const void *value, unsigned int count)
{
struct dm_btree_info *info = context;
const __le64 *root_le = value;
- unsigned i;
+ unsigned int i;
for (i = 0; i < count; i++, root_le++)
if (dm_btree_del(info, le64_to_cpu(*root_le)))
@@ -398,6 +403,7 @@ static void subtree_dec(void *context, const void *value, unsigned count)
static int subtree_equal(void *context, const void *value1_le, const void *value2_le)
{
__le64 v1_le, v2_le;
+
memcpy(&v1_le, value1_le, sizeof(v1_le));
memcpy(&v2_le, value2_le, sizeof(v2_le));
@@ -448,10 +454,10 @@ static int superblock_lock(struct dm_pool_metadata *pmd,
static int __superblock_all_zeroes(struct dm_block_manager *bm, int *result)
{
int r;
- unsigned i;
+ unsigned int i;
struct dm_block *b;
__le64 *data_le, zero = cpu_to_le64(0);
- unsigned block_size = dm_bm_block_size(bm) / sizeof(__le64);
+ unsigned int block_size = dm_bm_block_size(bm) / sizeof(__le64);
/*
* We can't use a validator here - it may be all zeroes.
@@ -971,7 +977,7 @@ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
{
int r;
- unsigned open_devices = 0;
+ unsigned int open_devices = 0;
struct dm_thin_device *td, *tmp;
down_read(&pmd->root_lock);
@@ -1530,9 +1536,9 @@ static int __find_block(struct dm_thin_device *td, dm_block_t block,
dm_block_t keys[2] = { td->id, block };
struct dm_btree_info *info;
- if (can_issue_io) {
+ if (can_issue_io)
info = &pmd->info;
- } else
+ else
info = &pmd->nb_info;
r = dm_btree_lookup(info, pmd->root, keys, &value);
@@ -1606,8 +1612,8 @@ static int __find_mapped_range(struct dm_thin_device *td,
if (r) {
if (r == -ENODATA)
break;
- else
- return r;
+
+ return r;
}
if ((lookup.block != pool_end) ||
@@ -1679,7 +1685,7 @@ int dm_thin_insert_block(struct dm_thin_device *td, dm_block_t block,
static int __remove_range(struct dm_thin_device *td, dm_block_t begin, dm_block_t end)
{
int r;
- unsigned count, total_count = 0;
+ unsigned int count, total_count = 0;
struct dm_pool_metadata *pmd = td->pmd;
dm_block_t keys[1] = { td->id };
__le64 value;
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index 4d7a2caf21d9..2f64f48b5f19 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2010-2011 Red Hat, Inc.
*
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 64cfcf46881d..6cd105c1cef3 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011-2012 Red Hat UK.
*
@@ -32,7 +33,7 @@
#define COMMIT_PERIOD HZ
#define NO_SPACE_TIMEOUT_SECS 60
-static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
+static unsigned int no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
"A percentage of time allocated for copy on write");
@@ -254,7 +255,7 @@ struct pool {
struct delayed_work no_space_timeout;
unsigned long last_commit_jiffies;
- unsigned ref_count;
+ unsigned int ref_count;
spinlock_t lock;
struct bio_list deferred_flush_bios;
@@ -293,7 +294,7 @@ static enum pool_mode get_pool_mode(struct pool *pool)
static void notify_of_pool_mode_change(struct pool *pool)
{
- const char *descs[] = {
+ static const char *descs[] = {
"write",
"out-of-data-space",
"read-only",
@@ -1037,6 +1038,7 @@ out:
static void free_discard_mapping(struct dm_thin_new_mapping *m)
{
struct thin_c *tc = m->tc;
+
if (m->cell)
cell_defer_no_holder(tc, m->cell);
mempool_free(m, &tc->pool->mapping_pool);
@@ -1180,9 +1182,9 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
discard_parent = bio_alloc(NULL, 1, 0, GFP_NOIO);
discard_parent->bi_end_io = passdown_endio;
discard_parent->bi_private = m;
- if (m->maybe_shared)
- passdown_double_checking_shared_status(m, discard_parent);
- else {
+ if (m->maybe_shared)
+ passdown_double_checking_shared_status(m, discard_parent);
+ else {
struct discard_op op;
begin_discard(&op, tc, discard_parent);
@@ -2159,7 +2161,7 @@ static void process_thin_deferred_bios(struct thin_c *tc)
struct bio *bio;
struct bio_list bios;
struct blk_plug plug;
- unsigned count = 0;
+ unsigned int count = 0;
if (tc->requeue_mode) {
error_thin_bio_list(tc, &tc->deferred_bio_list,
@@ -2207,6 +2209,7 @@ static void process_thin_deferred_bios(struct thin_c *tc)
throttle_work_update(&pool->throttle);
dm_pool_issue_prefetches(pool->pmd);
}
+ cond_resched();
}
blk_finish_plug(&plug);
}
@@ -2228,9 +2231,9 @@ static int cmp_cells(const void *lhs, const void *rhs)
return 0;
}
-static unsigned sort_cells(struct pool *pool, struct list_head *cells)
+static unsigned int sort_cells(struct pool *pool, struct list_head *cells)
{
- unsigned count = 0;
+ unsigned int count = 0;
struct dm_bio_prison_cell *cell, *tmp;
list_for_each_entry_safe(cell, tmp, cells, user_list) {
@@ -2251,7 +2254,7 @@ static void process_thin_deferred_cells(struct thin_c *tc)
struct pool *pool = tc->pool;
struct list_head cells;
struct dm_bio_prison_cell *cell;
- unsigned i, j, count;
+ unsigned int i, j, count;
INIT_LIST_HEAD(&cells);
@@ -2289,6 +2292,7 @@ static void process_thin_deferred_cells(struct thin_c *tc)
else
pool->process_cell(tc, cell);
}
+ cond_resched();
} while (!list_empty(&cells));
}
@@ -2411,6 +2415,7 @@ static void do_worker(struct work_struct *ws)
static void do_waker(struct work_struct *ws)
{
struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
+
wake_worker(pool);
queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
}
@@ -2473,6 +2478,7 @@ static struct noflush_work *to_noflush(struct work_struct *ws)
static void do_noflush_start(struct work_struct *ws)
{
struct noflush_work *w = to_noflush(ws);
+
w->tc->requeue_mode = true;
requeue_io(w->tc);
pool_work_complete(&w->pw);
@@ -2481,6 +2487,7 @@ static void do_noflush_start(struct work_struct *ws)
static void do_noflush_stop(struct work_struct *ws)
{
struct noflush_work *w = to_noflush(ws);
+
w->tc->requeue_mode = false;
pool_work_complete(&w->pw);
}
@@ -2799,9 +2806,11 @@ static void requeue_bios(struct pool *pool)
rcu_read_unlock();
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Binding of control targets to a pool object
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
static bool is_factor(sector_t block_size, uint32_t n)
{
return !sector_div(block_size, n);
@@ -2865,9 +2874,11 @@ static void unbind_control_target(struct pool *pool, struct dm_target *ti)
pool->ti = NULL;
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Pool creation
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
/* Initialize pool features. */
static void pool_features_init(struct pool_features *pf)
{
@@ -3091,9 +3102,11 @@ static struct pool *__pool_find(struct mapped_device *pool_md,
return pool;
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Pool target methods
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
static void pool_dtr(struct dm_target *ti)
{
struct pool_c *pt = ti->private;
@@ -3113,7 +3126,7 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
struct dm_target *ti)
{
int r;
- unsigned argc;
+ unsigned int argc;
const char *arg_name;
static const struct dm_arg _args[] = {
@@ -3234,6 +3247,7 @@ static dm_block_t calc_metadata_threshold(struct pool_c *pt)
* delete after you've grown the device).
*/
dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4;
+
return min((dm_block_t)1024ULL /* 4M */, quarter);
}
@@ -3250,7 +3264,7 @@ static dm_block_t calc_metadata_threshold(struct pool_c *pt)
* read_only: Don't allow any changes to be made to the pool metadata.
* error_if_no_space: error IOs, instead of queueing, if no space.
*/
-static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
+static int pool_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
int r, pool_created = 0;
struct pool_c *pt;
@@ -3645,7 +3659,7 @@ static void pool_postsuspend(struct dm_target *ti)
(void) commit(pool);
}
-static int check_arg_count(unsigned argc, unsigned args_required)
+static int check_arg_count(unsigned int argc, unsigned int args_required)
{
if (argc != args_required) {
DMWARN("Message received with %u arguments instead of %u.",
@@ -3668,7 +3682,7 @@ static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
return -EINVAL;
}
-static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool)
+static int process_create_thin_mesg(unsigned int argc, char **argv, struct pool *pool)
{
dm_thin_id dev_id;
int r;
@@ -3691,7 +3705,7 @@ static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *poo
return 0;
}
-static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool)
+static int process_create_snap_mesg(unsigned int argc, char **argv, struct pool *pool)
{
dm_thin_id dev_id;
dm_thin_id origin_dev_id;
@@ -3719,7 +3733,7 @@ static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *poo
return 0;
}
-static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
+static int process_delete_mesg(unsigned int argc, char **argv, struct pool *pool)
{
dm_thin_id dev_id;
int r;
@@ -3739,7 +3753,7 @@ static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
return r;
}
-static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool)
+static int process_set_transaction_id_mesg(unsigned int argc, char **argv, struct pool *pool)
{
dm_thin_id old_id, new_id;
int r;
@@ -3768,7 +3782,7 @@ static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct po
return 0;
}
-static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
+static int process_reserve_metadata_snap_mesg(unsigned int argc, char **argv, struct pool *pool)
{
int r;
@@ -3785,7 +3799,7 @@ static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct
return r;
}
-static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
+static int process_release_metadata_snap_mesg(unsigned int argc, char **argv, struct pool *pool)
{
int r;
@@ -3809,8 +3823,8 @@ static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct
* reserve_metadata_snap
* release_metadata_snap
*/
-static int pool_message(struct dm_target *ti, unsigned argc, char **argv,
- char *result, unsigned maxlen)
+static int pool_message(struct dm_target *ti, unsigned int argc, char **argv,
+ char *result, unsigned int maxlen)
{
int r = -EINVAL;
struct pool_c *pt = ti->private;
@@ -3850,9 +3864,9 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv,
}
static void emit_flags(struct pool_features *pf, char *result,
- unsigned sz, unsigned maxlen)
+ unsigned int sz, unsigned int maxlen)
{
- unsigned count = !pf->zero_new_blocks + !pf->discard_enabled +
+ unsigned int count = !pf->zero_new_blocks + !pf->discard_enabled +
!pf->discard_passdown + (pf->mode == PM_READ_ONLY) +
pf->error_if_no_space;
DMEMIT("%u ", count);
@@ -3880,10 +3894,10 @@ static void emit_flags(struct pool_features *pf, char *result,
* <pool mode> <discard config> <no space config> <needs_check>
*/
static void pool_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
int r;
- unsigned sz = 0;
+ unsigned int sz = 0;
uint64_t transaction_id;
dm_block_t nr_free_blocks_data;
dm_block_t nr_free_blocks_metadata;
@@ -4095,9 +4109,11 @@ static struct target_type pool_target = {
.io_hints = pool_io_hints,
};
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Thin target methods
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
static void thin_get(struct thin_c *tc)
{
refcount_inc(&tc->refcount);
@@ -4145,7 +4161,7 @@ static void thin_dtr(struct dm_target *ti)
* If the pool device has discards disabled, they get disabled for the thin
* device as well.
*/
-static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
+static int thin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
int r;
struct thin_c *tc;
@@ -4367,7 +4383,7 @@ static int thin_preresume(struct dm_target *ti)
* <nr mapped sectors> <highest mapped sector>
*/
static void thin_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
int r;
ssize_t sz = 0;
@@ -4518,7 +4534,7 @@ static void dm_thin_exit(void)
module_init(dm_thin_init);
module_exit(dm_thin_exit);
-module_param_named(no_space_timeout, no_space_timeout_secs, uint, S_IRUGO | S_IWUSR);
+module_param_named(no_space_timeout, no_space_timeout_secs, uint, 0644);
MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds");
MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
diff --git a/drivers/md/dm-uevent.c b/drivers/md/dm-uevent.c
index 8671267200d8..491738263292 100644
--- a/drivers/md/dm-uevent.c
+++ b/drivers/md/dm-uevent.c
@@ -3,7 +3,7 @@
* Device Mapper Uevent Support (dm-uevent)
*
* Copyright IBM Corporation, 2007
- * Author: Mike Anderson <andmike@linux.vnet.ibm.com>
+ * Author: Mike Anderson <andmike@linux.vnet.ibm.com>
*/
#include <linux/list.h>
#include <linux/slab.h>
@@ -60,7 +60,7 @@ static struct dm_uevent *dm_build_path_uevent(struct mapped_device *md,
enum kobject_action action,
const char *dm_action,
const char *path,
- unsigned nr_valid_paths)
+ unsigned int nr_valid_paths)
{
struct dm_uevent *event;
@@ -168,7 +168,7 @@ EXPORT_SYMBOL_GPL(dm_send_uevents);
*
*/
void dm_path_uevent(enum dm_uevent_type event_type, struct dm_target *ti,
- const char *path, unsigned nr_valid_paths)
+ const char *path, unsigned int nr_valid_paths)
{
struct mapped_device *md = dm_table_get_md(ti->table);
struct dm_uevent *event;
diff --git a/drivers/md/dm-uevent.h b/drivers/md/dm-uevent.h
index d30d226f2a18..12a5d4fb7d44 100644
--- a/drivers/md/dm-uevent.h
+++ b/drivers/md/dm-uevent.h
@@ -3,7 +3,7 @@
* Device Mapper Uevent Support
*
* Copyright IBM Corporation, 2007
- * Author: Mike Anderson <andmike@linux.vnet.ibm.com>
+ * Author: Mike Anderson <andmike@linux.vnet.ibm.com>
*/
#ifndef DM_UEVENT_H
#define DM_UEVENT_H
@@ -20,7 +20,7 @@ extern void dm_uevent_exit(void);
extern void dm_send_uevents(struct list_head *events, struct kobject *kobj);
extern void dm_path_uevent(enum dm_uevent_type event_type,
struct dm_target *ti, const char *path,
- unsigned nr_valid_paths);
+ unsigned int nr_valid_paths);
#else
@@ -37,7 +37,7 @@ static inline void dm_send_uevents(struct list_head *events,
}
static inline void dm_path_uevent(enum dm_uevent_type event_type,
struct dm_target *ti, const char *path,
- unsigned nr_valid_paths)
+ unsigned int nr_valid_paths)
{
}
diff --git a/drivers/md/dm-unstripe.c b/drivers/md/dm-unstripe.c
index fdc8921e5c19..e7b7d5983a16 100644
--- a/drivers/md/dm-unstripe.c
+++ b/drivers/md/dm-unstripe.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017 Intel Corporation.
*
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 23cffce56403..962fc32c947c 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -59,14 +59,14 @@ static int fec_decode_rs8(struct dm_verity *v, struct dm_verity_fec_io *fio,
* to the data block. Caller is responsible for releasing buf.
*/
static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
- unsigned *offset, struct dm_buffer **buf)
+ unsigned int *offset, struct dm_buffer **buf)
{
u64 position, block, rem;
u8 *res;
position = (index + rsb) * v->fec->roots;
block = div64_u64_rem(position, v->fec->io_size, &rem);
- *offset = (unsigned)rem;
+ *offset = (unsigned int)rem;
res = dm_bufio_read(v->fec->bufio, block, buf);
if (IS_ERR(res)) {
@@ -102,7 +102,7 @@ static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
*/
static inline u8 *fec_buffer_rs_block(struct dm_verity *v,
struct dm_verity_fec_io *fio,
- unsigned i, unsigned j)
+ unsigned int i, unsigned int j)
{
return &fio->bufs[i][j * v->fec->rsn];
}
@@ -111,7 +111,7 @@ static inline u8 *fec_buffer_rs_block(struct dm_verity *v,
* Return an index to the current RS block when called inside
* fec_for_each_buffer_rs_block.
*/
-static inline unsigned fec_buffer_rs_index(unsigned i, unsigned j)
+static inline unsigned int fec_buffer_rs_index(unsigned int i, unsigned int j)
{
return (i << DM_VERITY_FEC_BUF_RS_BITS) + j;
}
@@ -121,12 +121,12 @@ static inline unsigned fec_buffer_rs_index(unsigned i, unsigned j)
* starting from block_offset.
*/
static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
- u64 rsb, int byte_index, unsigned block_offset,
+ u64 rsb, int byte_index, unsigned int block_offset,
int neras)
{
int r, corrected = 0, res;
struct dm_buffer *buf;
- unsigned n, i, offset;
+ unsigned int n, i, offset;
u8 *par, *block;
par = fec_read_parity(v, rsb, block_offset, &offset, &buf);
@@ -197,7 +197,7 @@ static int fec_is_erasure(struct dm_verity *v, struct dm_verity_io *io,
* fits into buffers. Check for erasure locations if @neras is non-NULL.
*/
static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
- u64 rsb, u64 target, unsigned block_offset,
+ u64 rsb, u64 target, unsigned int block_offset,
int *neras)
{
bool is_zero;
@@ -208,7 +208,7 @@ static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
u64 block, ileaved;
u8 *bbuf, *rs_block;
u8 want_digest[HASH_MAX_DIGESTSIZE];
- unsigned n, k;
+ unsigned int n, k;
if (neras)
*neras = 0;
@@ -304,7 +304,7 @@ done:
*/
static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
{
- unsigned n;
+ unsigned int n;
if (!fio->rs)
fio->rs = mempool_alloc(&v->fec->rs_pool, GFP_NOIO);
@@ -344,7 +344,7 @@ static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
*/
static void fec_init_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
{
- unsigned n;
+ unsigned int n;
fec_for_each_buffer(fio, n)
memset(fio->bufs[n], 0, v->fec->rsn << DM_VERITY_FEC_BUF_RS_BITS);
@@ -362,7 +362,7 @@ static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io,
bool use_erasures)
{
int r, neras = 0;
- unsigned pos;
+ unsigned int pos;
r = fec_alloc_bufs(v, fio);
if (unlikely(r < 0))
@@ -484,7 +484,7 @@ done:
*/
void verity_fec_finish_io(struct dm_verity_io *io)
{
- unsigned n;
+ unsigned int n;
struct dm_verity_fec *f = io->v->fec;
struct dm_verity_fec_io *fio = fec_io(io);
@@ -522,8 +522,8 @@ void verity_fec_init_io(struct dm_verity_io *io)
/*
* Append feature arguments and values to the status table.
*/
-unsigned verity_fec_status_table(struct dm_verity *v, unsigned sz,
- char *result, unsigned maxlen)
+unsigned int verity_fec_status_table(struct dm_verity *v, unsigned int sz,
+ char *result, unsigned int maxlen)
{
if (!verity_fec_is_enabled(v))
return sz;
@@ -589,7 +589,7 @@ bool verity_is_fec_opt_arg(const char *arg_name)
}
int verity_fec_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
- unsigned *argc, const char *arg_name)
+ unsigned int *argc, const char *arg_name)
{
int r;
struct dm_target *ti = v->ti;
diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h
index 3c46c8d61883..8454070d2824 100644
--- a/drivers/md/dm-verity-fec.h
+++ b/drivers/md/dm-verity-fec.h
@@ -55,10 +55,10 @@ struct dm_verity_fec_io {
struct rs_control *rs; /* Reed-Solomon state */
int erasures[DM_VERITY_FEC_MAX_RSN]; /* erasures for decode_rs8 */
u8 *bufs[DM_VERITY_FEC_BUF_MAX]; /* bufs for deinterleaving */
- unsigned nbufs; /* number of buffers allocated */
+ unsigned int nbufs; /* number of buffers allocated */
u8 *output; /* buffer for corrected output */
size_t output_pos;
- unsigned level; /* recursion level */
+ unsigned int level; /* recursion level */
};
#ifdef CONFIG_DM_VERITY_FEC
@@ -72,15 +72,15 @@ extern int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
enum verity_block_type type, sector_t block,
u8 *dest, struct bvec_iter *iter);
-extern unsigned verity_fec_status_table(struct dm_verity *v, unsigned sz,
- char *result, unsigned maxlen);
+extern unsigned int verity_fec_status_table(struct dm_verity *v, unsigned int sz,
+ char *result, unsigned int maxlen);
extern void verity_fec_finish_io(struct dm_verity_io *io);
extern void verity_fec_init_io(struct dm_verity_io *io);
extern bool verity_is_fec_opt_arg(const char *arg_name);
extern int verity_fec_parse_opt_args(struct dm_arg_set *as,
- struct dm_verity *v, unsigned *argc,
+ struct dm_verity *v, unsigned int *argc,
const char *arg_name);
extern void verity_fec_dtr(struct dm_verity *v);
@@ -106,9 +106,9 @@ static inline int verity_fec_decode(struct dm_verity *v,
return -EOPNOTSUPP;
}
-static inline unsigned verity_fec_status_table(struct dm_verity *v,
- unsigned sz, char *result,
- unsigned maxlen)
+static inline unsigned int verity_fec_status_table(struct dm_verity *v,
+ unsigned int sz, char *result,
+ unsigned int maxlen)
{
return sz;
}
@@ -128,7 +128,7 @@ static inline bool verity_is_fec_opt_arg(const char *arg_name)
static inline int verity_fec_parse_opt_args(struct dm_arg_set *as,
struct dm_verity *v,
- unsigned *argc,
+ unsigned int *argc,
const char *arg_name)
{
return -EINVAL;
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index ccf5b852fbf7..ade83ef3b439 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -41,9 +41,9 @@
#define DM_VERITY_OPTS_MAX (4 + DM_VERITY_OPTS_FEC + \
DM_VERITY_ROOT_HASH_VERIFICATION_OPTS)
-static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
+static unsigned int dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
-module_param_named(prefetch_cluster, dm_verity_prefetch_cluster, uint, S_IRUGO | S_IWUSR);
+module_param_named(prefetch_cluster, dm_verity_prefetch_cluster, uint, 0644);
static DEFINE_STATIC_KEY_FALSE(use_tasklet_enabled);
@@ -51,7 +51,7 @@ struct dm_verity_prefetch_work {
struct work_struct work;
struct dm_verity *v;
sector_t block;
- unsigned n_blocks;
+ unsigned int n_blocks;
};
/*
@@ -110,22 +110,24 @@ static int verity_hash_update(struct dm_verity *v, struct ahash_request *req,
sg_init_one(&sg, data, len);
ahash_request_set_crypt(req, &sg, NULL, len);
return crypto_wait_req(crypto_ahash_update(req), wait);
- } else {
- do {
- int r;
- size_t this_step = min_t(size_t, len, PAGE_SIZE - offset_in_page(data));
- flush_kernel_vmap_range((void *)data, this_step);
- sg_init_table(&sg, 1);
- sg_set_page(&sg, vmalloc_to_page(data), this_step, offset_in_page(data));
- ahash_request_set_crypt(req, &sg, NULL, this_step);
- r = crypto_wait_req(crypto_ahash_update(req), wait);
- if (unlikely(r))
- return r;
- data += this_step;
- len -= this_step;
- } while (len);
- return 0;
}
+
+ do {
+ int r;
+ size_t this_step = min_t(size_t, len, PAGE_SIZE - offset_in_page(data));
+
+ flush_kernel_vmap_range((void *)data, this_step);
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, vmalloc_to_page(data), this_step, offset_in_page(data));
+ ahash_request_set_crypt(req, &sg, NULL, this_step);
+ r = crypto_wait_req(crypto_ahash_update(req), wait);
+ if (unlikely(r))
+ return r;
+ data += this_step;
+ len -= this_step;
+ } while (len);
+
+ return 0;
}
/*
@@ -164,7 +166,7 @@ static int verity_hash_final(struct dm_verity *v, struct ahash_request *req,
r = verity_hash_update(v, req, v->salt, v->salt_size, wait);
if (r < 0) {
- DMERR("verity_hash_final failed updating salt: %d", r);
+ DMERR("%s failed updating salt: %d", __func__, r);
goto out;
}
}
@@ -196,10 +198,10 @@ out:
}
static void verity_hash_at_level(struct dm_verity *v, sector_t block, int level,
- sector_t *hash_block, unsigned *offset)
+ sector_t *hash_block, unsigned int *offset)
{
sector_t position = verity_position_at_level(v, block, level);
- unsigned idx;
+ unsigned int idx;
*hash_block = v->hash_level_block[level] + (position >> v->hash_per_block_bits);
@@ -287,7 +289,7 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
u8 *data;
int r;
sector_t hash_block;
- unsigned offset;
+ unsigned int offset;
verity_hash_at_level(v, block, level, &hash_block, &offset);
@@ -332,10 +334,8 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
*/
r = -EAGAIN;
goto release_ret_r;
- }
- else if (verity_fec_decode(v, io,
- DM_VERITY_BLOCK_TYPE_METADATA,
- hash_block, data, NULL) == 0)
+ } else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_METADATA,
+ hash_block, data, NULL) == 0)
aux->hash_verified = 1;
else if (verity_handle_err(v,
DM_VERITY_BLOCK_TYPE_METADATA,
@@ -424,7 +424,7 @@ static int verity_for_io_block(struct dm_verity *v, struct dm_verity_io *io,
r = crypto_wait_req(crypto_ahash_update(req), wait);
if (unlikely(r < 0)) {
- DMERR("verity_for_io_block crypto op failed: %d", r);
+ DMERR("%s crypto op failed: %d", __func__, r);
return r;
}
@@ -445,13 +445,13 @@ int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
struct dm_verity_io *io, u8 *data,
size_t len))
{
- unsigned todo = 1 << v->data_dev_block_bits;
+ unsigned int todo = 1 << v->data_dev_block_bits;
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
do {
int r;
u8 *page;
- unsigned len;
+ unsigned int len;
struct bio_vec bv = bio_iter_iovec(bio, *iter);
page = bvec_kmap_local(&bv);
@@ -685,10 +685,12 @@ static void verity_prefetch_io(struct work_struct *work)
for (i = v->levels - 2; i >= 0; i--) {
sector_t hash_block_start;
sector_t hash_block_end;
+
verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL);
verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL);
+
if (!i) {
- unsigned cluster = READ_ONCE(dm_verity_prefetch_cluster);
+ unsigned int cluster = READ_ONCE(dm_verity_prefetch_cluster);
cluster >>= v->data_dev_block_bits;
if (unlikely(!cluster))
@@ -753,7 +755,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
bio_set_dev(bio, v->data_dev->bdev);
bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector);
- if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
+ if (((unsigned int)bio->bi_iter.bi_sector | bio_sectors(bio)) &
((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) {
DMERR_LIMIT("unaligned io");
return DM_MAPIO_KILL;
@@ -789,12 +791,12 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
* Status: V (valid) or C (corruption found)
*/
static void verity_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
struct dm_verity *v = ti->private;
- unsigned args = 0;
- unsigned sz = 0;
- unsigned x;
+ unsigned int args = 0;
+ unsigned int sz = 0;
+ unsigned int x;
switch (type) {
case STATUSTYPE_INFO:
@@ -1054,7 +1056,7 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
bool only_modifier_opts)
{
int r = 0;
- unsigned argc;
+ unsigned int argc;
struct dm_target *ti = v->ti;
const char *arg_name;
@@ -1156,13 +1158,12 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
* <digest>
* <salt> Hex string or "-" if no salt.
*/
-static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
+static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct dm_verity *v;
struct dm_verity_sig_opts verify_args = {0};
struct dm_arg_set as;
unsigned int num;
- unsigned int wq_flags;
unsigned long long num_ll;
int r;
int i;
@@ -1370,6 +1371,7 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
hash_position = v->hash_start;
for (i = v->levels - 1; i >= 0; i--) {
sector_t s;
+
v->hash_level_block[i] = hash_position;
s = (v->data_blocks + ((sector_t)1 << ((i + 1) * v->hash_per_block_bits)) - 1)
>> ((i + 1) * v->hash_per_block_bits);
@@ -1399,8 +1401,6 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
- /* WQ_UNBOUND greatly improves performance when running on ramdisk */
- wq_flags = WQ_MEM_RECLAIM | WQ_UNBOUND;
/*
* Using WQ_HIGHPRI improves throughput and completion latency by
* reducing wait times when reading from a dm-verity device.
@@ -1410,8 +1410,7 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
* will fall-back to using it for error handling (or if the bufio cache
* doesn't have required hashes).
*/
- wq_flags |= WQ_HIGHPRI;
- v->verify_wq = alloc_workqueue("kverityd", wq_flags, num_online_cpus());
+ v->verify_wq = alloc_workqueue("kverityd", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
if (!v->verify_wq) {
ti->error = "Cannot allocate workqueue";
r = -ENOMEM;
diff --git a/drivers/md/dm-verity-verify-sig.c b/drivers/md/dm-verity-verify-sig.c
index db61a1f43ae9..4836508ea50c 100644
--- a/drivers/md/dm-verity-verify-sig.c
+++ b/drivers/md/dm-verity-verify-sig.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2019 Microsoft Corporation.
*
diff --git a/drivers/md/dm-verity-verify-sig.h b/drivers/md/dm-verity-verify-sig.h
index 3987c7141f79..f36ea92127bf 100644
--- a/drivers/md/dm-verity-verify-sig.h
+++ b/drivers/md/dm-verity-verify-sig.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2019 Microsoft Corporation.
*
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index 98f306ec6a33..2f555b420367 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -42,7 +42,7 @@ struct dm_verity {
u8 *root_digest; /* digest of the root block */
u8 *salt; /* salt: its size is salt_size */
u8 *zero_digest; /* digest for a zero block */
- unsigned salt_size;
+ unsigned int salt_size;
sector_t data_start; /* data offset in 512-byte sectors */
sector_t hash_start; /* hash start in blocks */
sector_t data_blocks; /* the number of data blocks */
@@ -54,10 +54,10 @@ struct dm_verity {
unsigned char version;
bool hash_failed:1; /* set if hash of any block failed */
bool use_tasklet:1; /* try to verify in tasklet before work-queue */
- unsigned digest_size; /* digest size for the current hash algorithm */
+ unsigned int digest_size; /* digest size for the current hash algorithm */
unsigned int ahash_reqsize;/* the size of temporary space for crypto */
enum verity_mode mode; /* mode for handling verification errors */
- unsigned corrupted_errs;/* Number of errors for corrupted blocks */
+ unsigned int corrupted_errs;/* Number of errors for corrupted blocks */
struct workqueue_struct *verify_wq;
@@ -77,7 +77,7 @@ struct dm_verity_io {
bio_end_io_t *orig_bi_end_io;
sector_t block;
- unsigned n_blocks;
+ unsigned int n_blocks;
bool in_tasklet;
struct bvec_iter iter;
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 96a003eb7323..3aa5874f0aef 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2018 Red Hat. All rights reserved.
*
@@ -83,16 +83,13 @@ struct wc_entry {
struct rb_node rb_node;
struct list_head lru;
unsigned short wc_list_contiguous;
- bool write_in_progress
#if BITS_PER_LONG == 64
- :1
-#endif
- ;
- unsigned long index
-#if BITS_PER_LONG == 64
- :47
+ bool write_in_progress : 1;
+ unsigned long index : 47;
+#else
+ bool write_in_progress;
+ unsigned long index;
#endif
- ;
unsigned long age;
#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
uint64_t original_sector;
@@ -128,9 +125,9 @@ struct dm_writecache {
unsigned long max_age;
unsigned long pause;
- unsigned uncommitted_blocks;
- unsigned autocommit_blocks;
- unsigned max_writeback_jobs;
+ unsigned int uncommitted_blocks;
+ unsigned int autocommit_blocks;
+ unsigned int max_writeback_jobs;
int error;
@@ -155,7 +152,7 @@ struct dm_writecache {
sector_t data_device_sectors;
void *block_start;
struct wc_entry *entries;
- unsigned block_size;
+ unsigned int block_size;
unsigned char block_size_bits;
bool pmem_mode:1;
@@ -178,13 +175,13 @@ struct dm_writecache {
bool metadata_only:1;
bool pause_set:1;
- unsigned high_wm_percent_value;
- unsigned low_wm_percent_value;
- unsigned autocommit_time_value;
- unsigned max_age_value;
- unsigned pause_value;
+ unsigned int high_wm_percent_value;
+ unsigned int low_wm_percent_value;
+ unsigned int autocommit_time_value;
+ unsigned int max_age_value;
+ unsigned int pause_value;
- unsigned writeback_all;
+ unsigned int writeback_all;
struct workqueue_struct *writeback_wq;
struct work_struct writeback_work;
struct work_struct flush_work;
@@ -202,7 +199,7 @@ struct dm_writecache {
struct dm_kcopyd_client *dm_kcopyd;
unsigned long *dirty_bitmap;
- unsigned dirty_bitmap_size;
+ unsigned int dirty_bitmap_size;
struct bio_set bio_set;
mempool_t copy_pool;
@@ -227,7 +224,7 @@ struct writeback_struct {
struct list_head endio_entry;
struct dm_writecache *wc;
struct wc_entry **wc_list;
- unsigned wc_list_n;
+ unsigned int wc_list_n;
struct wc_entry *wc_list_inline[WB_LIST_INLINE];
struct bio bio;
};
@@ -236,7 +233,7 @@ struct copy_struct {
struct list_head endio_entry;
struct dm_writecache *wc;
struct wc_entry *e;
- unsigned n_entries;
+ unsigned int n_entries;
int error;
};
@@ -300,6 +297,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
}
if (da != p) {
long i;
+
wc->memory_map = NULL;
pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
@@ -309,6 +307,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
i = 0;
do {
long daa;
+
daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + i,
p - i, DAX_ACCESS, NULL, &pfn);
if (daa <= 0) {
@@ -369,7 +368,7 @@ static struct page *persistent_memory_page(void *addr)
return virt_to_page(addr);
}
-static unsigned persistent_memory_page_offset(void *addr)
+static unsigned int persistent_memory_page_offset(void *addr)
{
return (unsigned long)addr & (PAGE_SIZE - 1);
}
@@ -502,11 +501,12 @@ static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
COMPLETION_INITIALIZER_ONSTACK(endio.c),
ATOMIC_INIT(1),
};
- unsigned bitmap_bits = wc->dirty_bitmap_size * 8;
- unsigned i = 0;
+ unsigned int bitmap_bits = wc->dirty_bitmap_size * 8;
+ unsigned int i = 0;
while (1) {
- unsigned j;
+ unsigned int j;
+
i = find_next_bit(wc->dirty_bitmap, bitmap_bits, i);
if (unlikely(i == bitmap_bits))
break;
@@ -531,7 +531,7 @@ static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
req.notify.context = &endio;
/* writing via async dm-io (implied by notify.fn above) won't return an error */
- (void) dm_io(&req, 1, &region, NULL);
+ (void) dm_io(&req, 1, &region, NULL);
i = j;
}
@@ -623,20 +623,21 @@ static struct wc_entry *writecache_find_entry(struct dm_writecache *wc,
if (unlikely(!node)) {
if (!(flags & WFE_RETURN_FOLLOWING))
return NULL;
- if (read_original_sector(wc, e) >= block) {
+ if (read_original_sector(wc, e) >= block)
return e;
- } else {
- node = rb_next(&e->rb_node);
- if (unlikely(!node))
- return NULL;
- e = container_of(node, struct wc_entry, rb_node);
- return e;
- }
+
+ node = rb_next(&e->rb_node);
+ if (unlikely(!node))
+ return NULL;
+
+ e = container_of(node, struct wc_entry, rb_node);
+ return e;
}
}
while (1) {
struct wc_entry *e2;
+
if (flags & WFE_LOWEST_SEQ)
node = rb_prev(&e->rb_node);
else
@@ -679,6 +680,7 @@ static void writecache_add_to_freelist(struct dm_writecache *wc, struct wc_entry
{
if (WC_MODE_SORT_FREELIST(wc)) {
struct rb_node **node = &wc->freetree.rb_node, *parent = NULL;
+
if (unlikely(!*node))
wc->current_free = e;
while (*node) {
@@ -718,6 +720,7 @@ static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc, s
if (WC_MODE_SORT_FREELIST(wc)) {
struct rb_node *next;
+
if (unlikely(!wc->current_free))
return NULL;
e = wc->current_free;
@@ -769,7 +772,7 @@ static void writecache_poison_lists(struct dm_writecache *wc)
/*
* Catch incorrect access to these values while the device is suspended.
*/
- memset(&wc->tree, -1, sizeof wc->tree);
+ memset(&wc->tree, -1, sizeof(wc->tree));
wc->lru.next = LIST_POISON1;
wc->lru.prev = LIST_POISON2;
wc->freelist.next = LIST_POISON1;
@@ -864,6 +867,7 @@ static void writecache_flush_work(struct work_struct *work)
static void writecache_autocommit_timer(struct timer_list *t)
{
struct dm_writecache *wc = from_timer(wc, t, autocommit_timer);
+
if (!writecache_has_error(wc))
queue_work(wc->writeback_wq, &wc->flush_work);
}
@@ -941,7 +945,8 @@ static void writecache_suspend(struct dm_target *ti)
wc_lock(wc);
if (flush_on_suspend)
wc->writeback_all--;
- while (writecache_wait_for_writeback(wc));
+ while (writecache_wait_for_writeback(wc))
+ ;
if (WC_MODE_PMEM(wc))
persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size);
@@ -962,6 +967,7 @@ static int writecache_alloc_entries(struct dm_writecache *wc)
return -ENOMEM;
for (b = 0; b < wc->n_blocks; b++) {
struct wc_entry *e = &wc->entries[b];
+
e->index = b;
e->write_in_progress = false;
cond_resched();
@@ -1005,6 +1011,7 @@ static void writecache_resume(struct dm_target *ti)
r = writecache_read_metadata(wc, wc->metadata_sectors);
if (r) {
size_t sb_entries_offset;
+
writecache_error(wc, r, "unable to read metadata: %d", r);
sb_entries_offset = offsetof(struct wc_memory_superblock, entries);
memset((char *)wc->memory_map + sb_entries_offset, -1,
@@ -1034,6 +1041,7 @@ static void writecache_resume(struct dm_target *ti)
for (b = 0; b < wc->n_blocks; b++) {
struct wc_entry *e = &wc->entries[b];
struct wc_memory_entry wme;
+
if (writecache_has_error(wc)) {
e->original_sector = -1;
e->seq_count = -1;
@@ -1055,6 +1063,7 @@ static void writecache_resume(struct dm_target *ti)
#endif
for (b = 0; b < wc->n_blocks; b++) {
struct wc_entry *e = &wc->entries[b];
+
if (!writecache_entry_is_committed(wc, e)) {
if (read_seq_count(wc, e) != -1) {
erase_this:
@@ -1100,7 +1109,7 @@ erase_this:
wc_unlock(wc);
}
-static int process_flush_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
+static int process_flush_mesg(unsigned int argc, char **argv, struct dm_writecache *wc)
{
if (argc != 1)
return -EINVAL;
@@ -1133,7 +1142,7 @@ static int process_flush_mesg(unsigned argc, char **argv, struct dm_writecache *
return 0;
}
-static int process_flush_on_suspend_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
+static int process_flush_on_suspend_mesg(unsigned int argc, char **argv, struct dm_writecache *wc)
{
if (argc != 1)
return -EINVAL;
@@ -1153,7 +1162,7 @@ static void activate_cleaner(struct dm_writecache *wc)
wc->freelist_low_watermark = wc->n_blocks;
}
-static int process_cleaner_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
+static int process_cleaner_mesg(unsigned int argc, char **argv, struct dm_writecache *wc)
{
if (argc != 1)
return -EINVAL;
@@ -1167,20 +1176,20 @@ static int process_cleaner_mesg(unsigned argc, char **argv, struct dm_writecache
return 0;
}
-static int process_clear_stats_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
+static int process_clear_stats_mesg(unsigned int argc, char **argv, struct dm_writecache *wc)
{
if (argc != 1)
return -EINVAL;
wc_lock(wc);
- memset(&wc->stats, 0, sizeof wc->stats);
+ memset(&wc->stats, 0, sizeof(wc->stats));
wc_unlock(wc);
return 0;
}
-static int writecache_message(struct dm_target *ti, unsigned argc, char **argv,
- char *result, unsigned maxlen)
+static int writecache_message(struct dm_target *ti, unsigned int argc, char **argv,
+ char *result, unsigned int maxlen)
{
int r = -EINVAL;
struct dm_writecache *wc = ti->private;
@@ -1238,12 +1247,13 @@ static void memcpy_flushcache_optimized(void *dest, void *source, size_t size)
static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data)
{
void *buf;
- unsigned size;
+ unsigned int size;
int rw = bio_data_dir(bio);
- unsigned remaining_size = wc->block_size;
+ unsigned int remaining_size = wc->block_size;
do {
struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter);
+
buf = bvec_kmap_local(&bv);
size = bv.bv_len;
if (unlikely(size > remaining_size))
@@ -1251,6 +1261,7 @@ static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data
if (rw == READ) {
int r;
+
r = copy_mc_to_kernel(buf, data, size);
flush_dcache_page(bio_page(bio));
if (unlikely(r)) {
@@ -1371,13 +1382,14 @@ read_next_block:
static void writecache_bio_copy_ssd(struct dm_writecache *wc, struct bio *bio,
struct wc_entry *e, bool search_used)
{
- unsigned bio_size = wc->block_size;
+ unsigned int bio_size = wc->block_size;
sector_t start_cache_sec = cache_sector(wc, e);
sector_t current_cache_sec = start_cache_sec + (bio_size >> SECTOR_SHIFT);
while (bio_size < bio->bi_iter.bi_size) {
if (!search_used) {
struct wc_entry *f = writecache_pop_from_freelist(wc, current_cache_sec);
+
if (!f)
break;
write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector +
@@ -1387,6 +1399,7 @@ static void writecache_bio_copy_ssd(struct dm_writecache *wc, struct bio *bio,
} else {
struct wc_entry *f;
struct rb_node *next = rb_next(&e->rb_node);
+
if (!next)
break;
f = container_of(next, struct wc_entry, rb_node);
@@ -1427,6 +1440,7 @@ static enum wc_map_op writecache_map_write(struct dm_writecache *wc, struct bio
do {
bool found_entry = false;
bool search_used = false;
+
if (writecache_has_error(wc)) {
wc->stats.writes += bio->bi_iter.bi_size >> wc->block_size_bits;
return WC_MAP_ERROR;
@@ -1540,7 +1554,7 @@ static int writecache_map(struct dm_target *ti, struct bio *bio)
bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
- if (unlikely((((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
+ if (unlikely((((unsigned int)bio->bi_iter.bi_sector | bio_sectors(bio)) &
(wc->block_size / 512 - 1)) != 0)) {
DMERR("I/O is not aligned, sector %llu, size %u, block size %u",
(unsigned long long)bio->bi_iter.bi_sector,
@@ -1605,6 +1619,7 @@ static int writecache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t
if (bio->bi_private == (void *)1) {
int dir = bio_data_dir(bio);
+
if (atomic_dec_and_test(&wc->bio_in_progress[dir]))
if (unlikely(waitqueue_active(&wc->bio_in_progress_wait[dir])))
wake_up(&wc->bio_in_progress_wait[dir]);
@@ -1666,7 +1681,7 @@ static void writecache_copy_endio(int read_err, unsigned long write_err, void *p
static void __writecache_endio_pmem(struct dm_writecache *wc, struct list_head *list)
{
- unsigned i;
+ unsigned int i;
struct writeback_struct *wb;
struct wc_entry *e;
unsigned long n_walked = 0;
@@ -1782,7 +1797,7 @@ pop_from_list:
static bool wc_add_block(struct writeback_struct *wb, struct wc_entry *e)
{
struct dm_writecache *wc = wb->wc;
- unsigned block_size = wc->block_size;
+ unsigned int block_size = wc->block_size;
void *address = memory_data(wc, e);
persistent_memory_flush_cache(address, block_size);
@@ -1817,7 +1832,7 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba
struct wc_entry *e, *f;
struct bio *bio;
struct writeback_struct *wb;
- unsigned max_pages;
+ unsigned int max_pages;
while (wbl->size) {
wbl->size--;
@@ -1832,10 +1847,13 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba
wb->wc = wc;
bio->bi_end_io = writecache_writeback_endio;
bio->bi_iter.bi_sector = read_original_sector(wc, e);
- if (max_pages <= WB_LIST_INLINE ||
- unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *),
- GFP_NOIO | __GFP_NORETRY |
- __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
+
+ if (unlikely(max_pages > WB_LIST_INLINE))
+ wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *),
+ GFP_NOIO | __GFP_NORETRY |
+ __GFP_NOMEMALLOC | __GFP_NOWARN);
+
+ if (likely(max_pages <= WB_LIST_INLINE) || unlikely(!wb->wc_list)) {
wb->wc_list = wb->wc_list_inline;
max_pages = WB_LIST_INLINE;
}
@@ -1880,7 +1898,7 @@ static void __writecache_writeback_ssd(struct dm_writecache *wc, struct writebac
struct copy_struct *c;
while (wbl->size) {
- unsigned n_sectors;
+ unsigned int n_sectors;
wbl->size--;
e = container_of(wbl->list.prev, struct wc_entry, lru);
@@ -1940,6 +1958,7 @@ static void writecache_writeback(struct work_struct *work)
if (likely(wc->pause != 0)) {
while (1) {
unsigned long idle;
+
if (unlikely(wc->cleaner) || unlikely(wc->writeback_all) ||
unlikely(dm_suspended(wc->ti)))
break;
@@ -1965,9 +1984,8 @@ restart:
goto restart;
}
- if (wc->overwrote_committed) {
+ if (wc->overwrote_committed)
writecache_wait_for_ios(wc, WRITE);
- }
n_walked = 0;
INIT_LIST_HEAD(&skipped);
@@ -1996,9 +2014,9 @@ restart:
} else
e = container_of(wc->lru.prev, struct wc_entry, lru);
BUG_ON(e->write_in_progress);
- if (unlikely(!writecache_entry_is_committed(wc, e))) {
+ if (unlikely(!writecache_entry_is_committed(wc, e)))
writecache_flush(wc);
- }
+
node = rb_prev(&e->rb_node);
if (node) {
f = container_of(node, struct wc_entry, rb_node);
@@ -2087,12 +2105,13 @@ restart:
if (unlikely(wc->writeback_all)) {
wc_lock(wc);
- while (writecache_wait_for_writeback(wc));
+ while (writecache_wait_for_writeback(wc))
+ ;
wc_unlock(wc);
}
}
-static int calculate_memory_size(uint64_t device_size, unsigned block_size,
+static int calculate_memory_size(uint64_t device_size, unsigned int block_size,
size_t *n_blocks_p, size_t *n_metadata_blocks_p)
{
uint64_t n_blocks, offset;
@@ -2155,7 +2174,7 @@ static int init_memory(struct dm_writecache *wc)
writecache_flush_all_metadata(wc);
writecache_commit_flushed(wc, false);
pmem_assign(sb(wc)->magic, cpu_to_le32(MEMORY_SUPERBLOCK_MAGIC));
- writecache_flush_region(wc, &sb(wc)->magic, sizeof sb(wc)->magic);
+ writecache_flush_region(wc, &sb(wc)->magic, sizeof(sb(wc)->magic));
writecache_commit_flushed(wc, false);
return 0;
@@ -2207,12 +2226,12 @@ static void writecache_dtr(struct dm_target *ti)
kfree(wc);
}
-static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
+static int writecache_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct dm_writecache *wc;
struct dm_arg_set as;
const char *string;
- unsigned opt_params;
+ unsigned int opt_params;
size_t offset, data_size;
int i, r;
char dummy;
@@ -2384,6 +2403,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
string = dm_shift_arg(&as), opt_params--;
if (!strcasecmp(string, "start_sector") && opt_params >= 1) {
unsigned long long start_sector;
+
string = dm_shift_arg(&as), opt_params--;
if (sscanf(string, "%llu%c", &start_sector, &dummy) != 1)
goto invalid_optional;
@@ -2419,7 +2439,8 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto invalid_optional;
wc->autocommit_blocks_set = true;
} else if (!strcasecmp(string, "autocommit_time") && opt_params >= 1) {
- unsigned autocommit_msecs;
+ unsigned int autocommit_msecs;
+
string = dm_shift_arg(&as), opt_params--;
if (sscanf(string, "%u%c", &autocommit_msecs, &dummy) != 1)
goto invalid_optional;
@@ -2429,7 +2450,8 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
wc->autocommit_time_value = autocommit_msecs;
wc->autocommit_time_set = true;
} else if (!strcasecmp(string, "max_age") && opt_params >= 1) {
- unsigned max_age_msecs;
+ unsigned int max_age_msecs;
+
string = dm_shift_arg(&as), opt_params--;
if (sscanf(string, "%u%c", &max_age_msecs, &dummy) != 1)
goto invalid_optional;
@@ -2445,16 +2467,19 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (WC_MODE_PMEM(wc)) {
wc->writeback_fua = true;
wc->writeback_fua_set = true;
- } else goto invalid_optional;
+ } else
+ goto invalid_optional;
} else if (!strcasecmp(string, "nofua")) {
if (WC_MODE_PMEM(wc)) {
wc->writeback_fua = false;
wc->writeback_fua_set = true;
- } else goto invalid_optional;
+ } else
+ goto invalid_optional;
} else if (!strcasecmp(string, "metadata_only")) {
wc->metadata_only = true;
} else if (!strcasecmp(string, "pause_writeback") && opt_params >= 1) {
- unsigned pause_msecs;
+ unsigned int pause_msecs;
+
if (WC_MODE_PMEM(wc))
goto invalid_optional;
string = dm_shift_arg(&as), opt_params--;
@@ -2653,11 +2678,11 @@ bad:
}
static void writecache_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
struct dm_writecache *wc = ti->private;
- unsigned extra_args;
- unsigned sz = 0;
+ unsigned int extra_args;
+ unsigned int sz = 0;
switch (type) {
case STATUSTYPE_INFO:
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index faa1dbffc8b4..2601cd520384 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2003 Jana Saout <jana@saout.de>
*
diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c
index 3dafc0e8b7a9..4b82b7798ce4 100644
--- a/drivers/md/dm-zone.c
+++ b/drivers/md/dm-zone.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2021 Western Digital Corporation or its affiliates.
*/
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 0278482fac94..cf9402064aba 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -1013,11 +1013,9 @@ static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_sb *dsb,
}
sb_block = le64_to_cpu(sb->sb_block);
- if (sb_block != (u64)dsb->zone->id << zmd->zone_nr_blocks_shift ) {
- dmz_dev_err(dev, "Invalid superblock position "
- "(is %llu expected %llu)",
- sb_block,
- (u64)dsb->zone->id << zmd->zone_nr_blocks_shift);
+ if (sb_block != (u64)dsb->zone->id << zmd->zone_nr_blocks_shift) {
+ dmz_dev_err(dev, "Invalid superblock position (is %llu expected %llu)",
+ sb_block, (u64)dsb->zone->id << zmd->zone_nr_blocks_shift);
return -EINVAL;
}
if (zmd->sb_version > 1) {
@@ -1030,16 +1028,14 @@ static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_sb *dsb,
} else if (uuid_is_null(&zmd->uuid)) {
uuid_copy(&zmd->uuid, &sb_uuid);
} else if (!uuid_equal(&zmd->uuid, &sb_uuid)) {
- dmz_dev_err(dev, "mismatching DM-Zoned uuid, "
- "is %pUl expected %pUl",
+ dmz_dev_err(dev, "mismatching DM-Zoned uuid, is %pUl expected %pUl",
&sb_uuid, &zmd->uuid);
return -ENXIO;
}
if (!strlen(zmd->label))
memcpy(zmd->label, sb->dmz_label, BDEVNAME_SIZE);
else if (memcmp(zmd->label, sb->dmz_label, BDEVNAME_SIZE)) {
- dmz_dev_err(dev, "mismatching DM-Zoned label, "
- "is %s expected %s",
+ dmz_dev_err(dev, "mismatching DM-Zoned label, is %s expected %s",
sb->dmz_label, zmd->label);
return -ENXIO;
}
@@ -1346,7 +1342,7 @@ static int dmz_load_sb(struct dmz_metadata *zmd)
if (ret == -EINVAL)
goto out_kfree;
}
- out_kfree:
+out_kfree:
kfree(sb);
}
return ret;
@@ -1430,7 +1426,7 @@ static int dmz_emulate_zones(struct dmz_metadata *zmd, struct dmz_dev *dev)
int idx;
sector_t zone_offset = 0;
- for(idx = 0; idx < dev->nr_zones; idx++) {
+ for (idx = 0; idx < dev->nr_zones; idx++) {
struct dm_zone *zone;
zone = dmz_insert(zmd, idx, dev);
@@ -1457,7 +1453,7 @@ static void dmz_drop_zones(struct dmz_metadata *zmd)
{
int idx;
- for(idx = 0; idx < zmd->nr_zones; idx++) {
+ for (idx = 0; idx < zmd->nr_zones; idx++) {
struct dm_zone *zone = xa_load(&zmd->zones, idx);
kfree(zone);
@@ -2945,7 +2941,7 @@ int dmz_ctr_metadata(struct dmz_dev *dev, int num_dev,
zmd->mblk_shrinker.seeks = DEFAULT_SEEKS;
/* Metadata cache shrinker */
- ret = register_shrinker(&zmd->mblk_shrinker, "md-meta:(%u:%u)",
+ ret = register_shrinker(&zmd->mblk_shrinker, "dm-zoned-meta:(%u:%u)",
MAJOR(dev->bdev->bd_dev),
MINOR(dev->bdev->bd_dev));
if (ret) {
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 95b132b52f33..ad4764dcd013 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -1119,7 +1119,6 @@ static void dmz_status(struct dm_target *ti, status_type_t type,
*result = '\0';
break;
}
- return;
}
static int dmz_message(struct dm_target *ti, unsigned int argc, char **argv,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index b424a6ee27ba..eace45a18d45 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
@@ -49,8 +50,8 @@
static const char *_name = DM_NAME;
-static unsigned int major = 0;
-static unsigned int _major = 0;
+static unsigned int major;
+static unsigned int _major;
static DEFINE_IDR(_minor_idr);
@@ -83,7 +84,7 @@ struct clone_info {
struct bio *bio;
struct dm_io *io;
sector_t sector;
- unsigned sector_count;
+ unsigned int sector_count;
bool is_abnormal_io:1;
bool submit_as_polled:1;
};
@@ -104,6 +105,7 @@ EXPORT_SYMBOL_GPL(dm_per_bio_data);
struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
{
struct dm_io *io = (struct dm_io *)((char *)data + data_size);
+
if (io->magic == DM_IO_MAGIC)
return (struct bio *)((char *)io + DM_IO_BIO_OFFSET);
BUG_ON(io->magic != DM_TIO_MAGIC);
@@ -111,7 +113,7 @@ struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
}
EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data);
-unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
+unsigned int dm_bio_get_target_bio_nr(const struct bio *bio)
{
return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
}
@@ -127,6 +129,7 @@ static int swap_bios = DEFAULT_SWAP_BIOS;
static int get_swap_bios(void)
{
int latch = READ_ONCE(swap_bios);
+
if (unlikely(latch <= 0))
latch = DEFAULT_SWAP_BIOS;
return latch;
@@ -142,7 +145,7 @@ struct table_device {
* Bio-based DM's mempools' reserved IOs set by the user.
*/
#define RESERVED_BIO_BASED_IOS 16
-static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
+static unsigned int reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
static int __dm_get_module_param_int(int *module_param, int min, int max)
{
@@ -165,11 +168,10 @@ static int __dm_get_module_param_int(int *module_param, int min, int max)
return param;
}
-unsigned __dm_get_module_param(unsigned *module_param,
- unsigned def, unsigned max)
+unsigned int __dm_get_module_param(unsigned int *module_param, unsigned int def, unsigned int max)
{
- unsigned param = READ_ONCE(*module_param);
- unsigned modified_param = 0;
+ unsigned int param = READ_ONCE(*module_param);
+ unsigned int modified_param = 0;
if (!param)
modified_param = def;
@@ -184,14 +186,14 @@ unsigned __dm_get_module_param(unsigned *module_param,
return param;
}
-unsigned dm_get_reserved_bio_based_ios(void)
+unsigned int dm_get_reserved_bio_based_ios(void)
{
return __dm_get_module_param(&reserved_bio_based_ios,
RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS);
}
EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
-static unsigned dm_get_numa_node(void)
+static unsigned int dm_get_numa_node(void)
{
return __dm_get_module_param_int(&dm_numa_node,
DM_NUMA_NODE, num_online_nodes() - 1);
@@ -231,7 +233,6 @@ out_uevent_exit:
static void local_exit(void)
{
- flush_scheduled_work();
destroy_workqueue(deferred_remove_workqueue);
unregister_blkdev(_major, _name);
@@ -435,7 +436,7 @@ retry:
r = ti->type->prepare_ioctl(ti, bdev);
if (r == -ENOTCONN && !fatal_signal_pending(current)) {
dm_put_live_table(md, *srcu_idx);
- msleep(10);
+ fsleep(10000);
goto retry;
}
@@ -604,7 +605,7 @@ static void free_io(struct dm_io *io)
}
static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
- unsigned target_bio_nr, unsigned *len, gfp_t gfp_mask)
+ unsigned int target_bio_nr, unsigned int *len, gfp_t gfp_mask)
{
struct mapped_device *md = ci->io->md;
struct dm_target_io *tio;
@@ -1008,6 +1009,7 @@ static void dm_wq_requeue_work(struct work_struct *work)
io->next = NULL;
__dm_io_complete(io, false);
io = next;
+ cond_resched();
}
}
@@ -1115,6 +1117,7 @@ static void clone_endio(struct bio *bio)
if (endio) {
int r = endio(ti, bio, &error);
+
switch (r) {
case DM_ENDIO_REQUEUE:
if (static_branch_unlikely(&zoned_enabled)) {
@@ -1314,11 +1317,11 @@ out:
* the partially processed part (the sum of regions 1+2) must be the same for all
* copies of the bio.
*/
-void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
+void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors)
{
struct dm_target_io *tio = clone_to_tio(bio);
struct dm_io *io = tio->io;
- unsigned bio_sectors = bio_sectors(bio);
+ unsigned int bio_sectors = bio_sectors(bio);
BUG_ON(dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO));
BUG_ON(op_is_zone_mgmt(bio_op(bio)));
@@ -1403,6 +1406,7 @@ static void __map_bio(struct bio *clone)
if (static_branch_unlikely(&swap_bios_enabled) &&
unlikely(swap_bios_limit(ti, clone))) {
int latch = get_swap_bios();
+
if (unlikely(latch != md->swap_bios))
__set_swap_bios_limit(md, latch);
down(&md->swap_bios_semaphore);
@@ -1447,7 +1451,7 @@ static void __map_bio(struct bio *clone)
}
}
-static void setup_split_accounting(struct clone_info *ci, unsigned len)
+static void setup_split_accounting(struct clone_info *ci, unsigned int len)
{
struct dm_io *io = ci->io;
@@ -1463,7 +1467,7 @@ static void setup_split_accounting(struct clone_info *ci, unsigned len)
}
static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
- struct dm_target *ti, unsigned num_bios)
+ struct dm_target *ti, unsigned int num_bios)
{
struct bio *bio;
int try;
@@ -1492,7 +1496,7 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
}
static int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
- unsigned int num_bios, unsigned *len)
+ unsigned int num_bios, unsigned int *len)
{
struct bio_list blist = BIO_EMPTY_LIST;
struct bio *clone;
@@ -1558,10 +1562,9 @@ static void __send_empty_flush(struct clone_info *ci)
}
static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
- unsigned num_bios)
+ unsigned int num_bios)
{
- unsigned len;
- unsigned int bios;
+ unsigned int len, bios;
len = min_t(sector_t, ci->sector_count,
max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector)));
@@ -1599,7 +1602,7 @@ static bool is_abnormal_io(struct bio *bio)
static blk_status_t __process_abnormal_io(struct clone_info *ci,
struct dm_target *ti)
{
- unsigned num_bios = 0;
+ unsigned int num_bios = 0;
switch (bio_op(ci->bio)) {
case REQ_OP_DISCARD:
@@ -1677,7 +1680,7 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci)
{
struct bio *clone;
struct dm_target *ti;
- unsigned len;
+ unsigned int len;
ti = dm_table_find_target(ci->map, ci->sector);
if (unlikely(!ti))
@@ -1874,9 +1877,11 @@ static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob,
return 1;
}
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* An IDR is used to keep track of allocated minor numbers.
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
static void free_minor(int minor)
{
spin_lock(&_minor_lock);
@@ -2139,7 +2144,7 @@ static void event_callback(void *context)
{
unsigned long flags;
LIST_HEAD(uevents);
- struct mapped_device *md = (struct mapped_device *) context;
+ struct mapped_device *md = context;
spin_lock_irqsave(&md->uevent_lock, flags);
list_splice_init(&md->uevent_list, &uevents);
@@ -2172,10 +2177,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
if (size != dm_get_size(md))
memset(&md->geometry, 0, sizeof(md->geometry));
- if (!get_capacity(md->disk))
- set_capacity(md->disk, size);
- else
- set_capacity_and_notify(md->disk, size);
+ set_capacity(md->disk, size);
dm_table_event_callback(t, event_callback, md);
@@ -2375,7 +2377,7 @@ out_undo_holders:
struct mapped_device *dm_get_md(dev_t dev)
{
struct mapped_device *md;
- unsigned minor = MINOR(dev);
+ unsigned int minor = MINOR(dev);
if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
return NULL;
@@ -2457,7 +2459,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
set_bit(DMF_POST_SUSPENDING, &md->flags);
dm_table_postsuspend_targets(map);
}
- /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
+ /* dm_put_live_table must be before fsleep, otherwise deadlock is possible */
dm_put_live_table(md, srcu_idx);
mutex_unlock(&md->suspend_lock);
@@ -2469,7 +2471,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
*/
if (wait)
while (atomic_read(&md->holders))
- msleep(1);
+ fsleep(1000);
else if (atomic_read(&md->holders))
DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
dm_device_name(md), atomic_read(&md->holders));
@@ -2546,7 +2548,7 @@ static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_st
break;
}
- msleep(5);
+ fsleep(5000);
}
return r;
@@ -2569,6 +2571,7 @@ static void dm_wq_work(struct work_struct *work)
break;
submit_bio_noacct(bio);
+ cond_resched();
}
}
@@ -2657,7 +2660,7 @@ static void unlock_fs(struct mapped_device *md)
* are being added to md->deferred list.
*/
static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
- unsigned suspend_flags, unsigned int task_state,
+ unsigned int suspend_flags, unsigned int task_state,
int dmf_suspended_flag)
{
bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
@@ -2764,7 +2767,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
*
* To abort suspend, start the request_queue.
*/
-int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
+int dm_suspend(struct mapped_device *md, unsigned int suspend_flags)
{
struct dm_table *map = NULL;
int r = 0;
@@ -2805,6 +2808,7 @@ static int __dm_resume(struct mapped_device *md, struct dm_table *map)
{
if (map) {
int r = dm_table_resume_targets(map);
+
if (r)
return r;
}
@@ -2866,7 +2870,7 @@ out:
* It may be used only from the kernel.
*/
-static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags)
+static void __dm_internal_suspend(struct mapped_device *md, unsigned int suspend_flags)
{
struct dm_table *map = NULL;
@@ -2964,27 +2968,31 @@ done:
}
EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Event notification.
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
- unsigned cookie)
+ unsigned int cookie, bool need_resize_uevent)
{
int r;
- unsigned noio_flag;
+ unsigned int noio_flag;
char udev_cookie[DM_COOKIE_LENGTH];
- char *envp[] = { udev_cookie, NULL };
-
- noio_flag = memalloc_noio_save();
-
- if (!cookie)
- r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
- else {
+ char *envp[3] = { NULL, NULL, NULL };
+ char **envpp = envp;
+ if (cookie) {
snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
DM_COOKIE_ENV_VAR_NAME, cookie);
- r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
- action, envp);
+ *envpp++ = udev_cookie;
}
+ if (need_resize_uevent) {
+ *envpp++ = "RESIZE=1";
+ }
+
+ noio_flag = memalloc_noio_save();
+
+ r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp);
memalloc_noio_restore(noio_flag);
@@ -3382,13 +3390,13 @@ module_exit(dm_exit);
module_param(major, uint, 0);
MODULE_PARM_DESC(major, "The major number of the device mapper");
-module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
+module_param(reserved_bio_based_ios, uint, 0644);
MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
-module_param(dm_numa_node, int, S_IRUGO | S_IWUSR);
+module_param(dm_numa_node, int, 0644);
MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
-module_param(swap_bios, int, S_IRUGO | S_IWUSR);
+module_param(swap_bios, int, 0644);
MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs");
MODULE_DESCRIPTION(DM_NAME " driver");
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 5201df03ce40..22eaed188907 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -48,9 +48,11 @@ struct dm_md_mempools;
struct dm_target_io;
struct dm_io;
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Internal table functions.
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
void dm_table_event_callback(struct dm_table *t,
void (*fn)(void *), void *context);
struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
@@ -119,9 +121,11 @@ static inline int dm_zone_map_bio(struct dm_target_io *tio)
}
#endif
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* A registry of target types.
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
int dm_target_init(void);
void dm_target_exit(void);
struct target_type *dm_get_target_type(const char *name);
@@ -203,7 +207,7 @@ int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
void dm_put_table_device(struct mapped_device *md, struct dm_dev *d);
int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
- unsigned cookie);
+ unsigned int cookie, bool need_resize_uevent);
void dm_internal_suspend(struct mapped_device *md);
void dm_internal_resume(struct mapped_device *md);
@@ -222,6 +226,6 @@ void dm_free_md_mempools(struct dm_md_mempools *pools);
/*
* Various helpers
*/
-unsigned dm_get_reserved_bio_based_ios(void);
+unsigned int dm_get_reserved_bio_based_ios(void);
#endif
diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c
index 3a963d783a86..798c9c53a343 100644
--- a/drivers/md/persistent-data/dm-array.c
+++ b/drivers/md/persistent-data/dm-array.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Red Hat, Inc.
*
@@ -57,7 +58,7 @@ static int array_block_check(struct dm_block_validator *v,
__le32 csum_disk;
if (dm_block_location(b) != le64_to_cpu(bh_le->blocknr)) {
- DMERR_LIMIT("array_block_check failed: blocknr %llu != wanted %llu",
+ DMERR_LIMIT("%s failed: blocknr %llu != wanted %llu", __func__,
(unsigned long long) le64_to_cpu(bh_le->blocknr),
(unsigned long long) dm_block_location(b));
return -ENOTBLK;
@@ -67,9 +68,9 @@ static int array_block_check(struct dm_block_validator *v,
size_of_block - sizeof(__le32),
CSUM_XOR));
if (csum_disk != bh_le->csum) {
- DMERR_LIMIT("array_block_check failed: csum %u != wanted %u",
- (unsigned) le32_to_cpu(csum_disk),
- (unsigned) le32_to_cpu(bh_le->csum));
+ DMERR_LIMIT("%s failed: csum %u != wanted %u", __func__,
+ (unsigned int) le32_to_cpu(csum_disk),
+ (unsigned int) le32_to_cpu(bh_le->csum));
return -EILSEQ;
}
@@ -94,7 +95,7 @@ static struct dm_block_validator array_validator = {
* index - The index into _this_ specific block.
*/
static void *element_at(struct dm_array_info *info, struct array_block *ab,
- unsigned index)
+ unsigned int index)
{
unsigned char *entry = (unsigned char *) (ab + 1);
@@ -108,9 +109,10 @@ static void *element_at(struct dm_array_info *info, struct array_block *ab,
* in an array block.
*/
static void on_entries(struct dm_array_info *info, struct array_block *ab,
- void (*fn)(void *, const void *, unsigned))
+ void (*fn)(void *, const void *, unsigned int))
{
- unsigned nr_entries = le32_to_cpu(ab->nr_entries);
+ unsigned int nr_entries = le32_to_cpu(ab->nr_entries);
+
fn(info->value_type.context, element_at(info, ab, 0), nr_entries);
}
@@ -171,7 +173,7 @@ static int alloc_ablock(struct dm_array_info *info, size_t size_of_block,
* the current number of entries.
*/
static void fill_ablock(struct dm_array_info *info, struct array_block *ab,
- const void *value, unsigned new_nr)
+ const void *value, unsigned int new_nr)
{
uint32_t nr_entries, delta, i;
struct dm_btree_value_type *vt = &info->value_type;
@@ -194,7 +196,7 @@ static void fill_ablock(struct dm_array_info *info, struct array_block *ab,
* entries.
*/
static void trim_ablock(struct dm_array_info *info, struct array_block *ab,
- unsigned new_nr)
+ unsigned int new_nr)
{
uint32_t nr_entries, delta;
struct dm_btree_value_type *vt = &info->value_type;
@@ -247,7 +249,7 @@ static void unlock_ablock(struct dm_array_info *info, struct dm_block *block)
* / max_entries).
*/
static int lookup_ablock(struct dm_array_info *info, dm_block_t root,
- unsigned index, struct dm_block **block,
+ unsigned int index, struct dm_block **block,
struct array_block **ab)
{
int r;
@@ -295,7 +297,7 @@ static int __shadow_ablock(struct dm_array_info *info, dm_block_t b,
* The shadow op will often be a noop. Only insert if it really
* copied data.
*/
-static int __reinsert_ablock(struct dm_array_info *info, unsigned index,
+static int __reinsert_ablock(struct dm_array_info *info, unsigned int index,
struct dm_block *block, dm_block_t b,
dm_block_t *root)
{
@@ -321,7 +323,7 @@ static int __reinsert_ablock(struct dm_array_info *info, unsigned index,
* for both the current root block, and the new one.
*/
static int shadow_ablock(struct dm_array_info *info, dm_block_t *root,
- unsigned index, struct dm_block **block,
+ unsigned int index, struct dm_block **block,
struct array_block **ab)
{
int r;
@@ -346,7 +348,7 @@ static int shadow_ablock(struct dm_array_info *info, dm_block_t *root,
*/
static int insert_new_ablock(struct dm_array_info *info, size_t size_of_block,
uint32_t max_entries,
- unsigned block_index, uint32_t nr,
+ unsigned int block_index, uint32_t nr,
const void *value, dm_block_t *root)
{
int r;
@@ -365,8 +367,8 @@ static int insert_new_ablock(struct dm_array_info *info, size_t size_of_block,
}
static int insert_full_ablocks(struct dm_array_info *info, size_t size_of_block,
- unsigned begin_block, unsigned end_block,
- unsigned max_entries, const void *value,
+ unsigned int begin_block, unsigned int end_block,
+ unsigned int max_entries, const void *value,
dm_block_t *root)
{
int r = 0;
@@ -402,20 +404,20 @@ struct resize {
/*
* Maximum nr entries in an array block.
*/
- unsigned max_entries;
+ unsigned int max_entries;
/*
* nr of completely full blocks in the array.
*
* 'old' refers to before the resize, 'new' after.
*/
- unsigned old_nr_full_blocks, new_nr_full_blocks;
+ unsigned int old_nr_full_blocks, new_nr_full_blocks;
/*
* Number of entries in the final block. 0 iff only full blocks in
* the array.
*/
- unsigned old_nr_entries_in_last_block, new_nr_entries_in_last_block;
+ unsigned int old_nr_entries_in_last_block, new_nr_entries_in_last_block;
/*
* The default value used when growing the array.
@@ -430,13 +432,14 @@ struct resize {
* begin_index - the index of the first array block to remove.
* end_index - the one-past-the-end value. ie. this block is not removed.
*/
-static int drop_blocks(struct resize *resize, unsigned begin_index,
- unsigned end_index)
+static int drop_blocks(struct resize *resize, unsigned int begin_index,
+ unsigned int end_index)
{
int r;
while (begin_index != end_index) {
uint64_t key = begin_index++;
+
r = dm_btree_remove(&resize->info->btree_info, resize->root,
&key, &resize->root);
if (r)
@@ -449,8 +452,8 @@ static int drop_blocks(struct resize *resize, unsigned begin_index,
/*
* Calculates how many blocks are needed for the array.
*/
-static unsigned total_nr_blocks_needed(unsigned nr_full_blocks,
- unsigned nr_entries_in_last_block)
+static unsigned int total_nr_blocks_needed(unsigned int nr_full_blocks,
+ unsigned int nr_entries_in_last_block)
{
return nr_full_blocks + (nr_entries_in_last_block ? 1 : 0);
}
@@ -461,7 +464,7 @@ static unsigned total_nr_blocks_needed(unsigned nr_full_blocks,
static int shrink(struct resize *resize)
{
int r;
- unsigned begin, end;
+ unsigned int begin, end;
struct dm_block *block;
struct array_block *ab;
@@ -527,7 +530,7 @@ static int grow_add_tail_block(struct resize *resize)
static int grow_needs_more_blocks(struct resize *resize)
{
int r;
- unsigned old_nr_blocks = resize->old_nr_full_blocks;
+ unsigned int old_nr_blocks = resize->old_nr_full_blocks;
if (resize->old_nr_entries_in_last_block > 0) {
old_nr_blocks++;
@@ -569,11 +572,11 @@ static int grow(struct resize *resize)
* These are the value_type functions for the btree elements, which point
* to array blocks.
*/
-static void block_inc(void *context, const void *value, unsigned count)
+static void block_inc(void *context, const void *value, unsigned int count)
{
const __le64 *block_le = value;
struct dm_array_info *info = context;
- unsigned i;
+ unsigned int i;
for (i = 0; i < count; i++, block_le++)
dm_tm_inc(info->btree_info.tm, le64_to_cpu(*block_le));
@@ -618,9 +621,10 @@ static void __block_dec(void *context, const void *value)
dm_tm_dec(info->btree_info.tm, b);
}
-static void block_dec(void *context, const void *value, unsigned count)
+static void block_dec(void *context, const void *value, unsigned int count)
{
- unsigned i;
+ unsigned int i;
+
for (i = 0; i < count; i++, value += sizeof(__le64))
__block_dec(context, value);
}
@@ -691,19 +695,21 @@ static int array_resize(struct dm_array_info *info, dm_block_t root,
int dm_array_resize(struct dm_array_info *info, dm_block_t root,
uint32_t old_size, uint32_t new_size,
const void *value, dm_block_t *new_root)
- __dm_written_to_disk(value)
+ __dm_written_to_disk(value)
{
int r = array_resize(info, root, old_size, new_size, value, new_root);
+
__dm_unbless_for_disk(value);
return r;
}
EXPORT_SYMBOL_GPL(dm_array_resize);
static int populate_ablock_with_values(struct dm_array_info *info, struct array_block *ab,
- value_fn fn, void *context, unsigned base, unsigned new_nr)
+ value_fn fn, void *context,
+ unsigned int base, unsigned int new_nr)
{
int r;
- unsigned i;
+ unsigned int i;
struct dm_btree_value_type *vt = &info->value_type;
BUG_ON(le32_to_cpu(ab->nr_entries));
@@ -728,7 +734,7 @@ int dm_array_new(struct dm_array_info *info, dm_block_t *root,
int r;
struct dm_block *block;
struct array_block *ab;
- unsigned block_index, end_block, size_of_block, max_entries;
+ unsigned int block_index, end_block, size_of_block, max_entries;
r = dm_array_empty(info, root);
if (r)
@@ -776,7 +782,7 @@ int dm_array_get_value(struct dm_array_info *info, dm_block_t root,
struct dm_block *block;
struct array_block *ab;
size_t size_of_block;
- unsigned entry, max_entries;
+ unsigned int entry, max_entries;
size_of_block = dm_bm_block_size(dm_tm_get_bm(info->btree_info.tm));
max_entries = calc_max_entries(info->value_type.size, size_of_block);
@@ -804,8 +810,8 @@ static int array_set_value(struct dm_array_info *info, dm_block_t root,
struct dm_block *block;
struct array_block *ab;
size_t size_of_block;
- unsigned max_entries;
- unsigned entry;
+ unsigned int max_entries;
+ unsigned int entry;
void *old_value;
struct dm_btree_value_type *vt = &info->value_type;
@@ -840,7 +846,7 @@ out:
int dm_array_set_value(struct dm_array_info *info, dm_block_t root,
uint32_t index, const void *value, dm_block_t *new_root)
- __dm_written_to_disk(value)
+ __dm_written_to_disk(value)
{
int r;
@@ -861,9 +867,9 @@ static int walk_ablock(void *context, uint64_t *keys, void *leaf)
struct walk_info *wi = context;
int r;
- unsigned i;
+ unsigned int i;
__le64 block_le;
- unsigned nr_entries, max_entries;
+ unsigned int nr_entries, max_entries;
struct dm_block *block;
struct array_block *ab;
diff --git a/drivers/md/persistent-data/dm-array.h b/drivers/md/persistent-data/dm-array.h
index d7d2d579c662..91d6165427b3 100644
--- a/drivers/md/persistent-data/dm-array.h
+++ b/drivers/md/persistent-data/dm-array.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Red Hat, Inc.
*
@@ -198,7 +199,7 @@ struct dm_array_cursor {
struct dm_block *block;
struct array_block *ab;
- unsigned index;
+ unsigned int index;
};
int dm_array_cursor_begin(struct dm_array_info *info,
diff --git a/drivers/md/persistent-data/dm-bitset.c b/drivers/md/persistent-data/dm-bitset.c
index b7208d82e748..00c0a3f186b7 100644
--- a/drivers/md/persistent-data/dm-bitset.c
+++ b/drivers/md/persistent-data/dm-bitset.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Red Hat, Inc.
*
@@ -41,7 +42,7 @@ EXPORT_SYMBOL_GPL(dm_bitset_empty);
struct packer_context {
bit_value_fn fn;
- unsigned nr_bits;
+ unsigned int nr_bits;
void *context;
};
@@ -49,7 +50,7 @@ static int pack_bits(uint32_t index, void *value, void *context)
{
int r;
struct packer_context *p = context;
- unsigned bit, nr = min(64u, p->nr_bits - (index * 64));
+ unsigned int bit, nr = min(64u, p->nr_bits - (index * 64));
uint64_t word = 0;
bool bv;
@@ -73,6 +74,7 @@ int dm_bitset_new(struct dm_disk_bitset *info, dm_block_t *root,
uint32_t size, bit_value_fn fn, void *context)
{
struct packer_context p;
+
p.fn = fn;
p.nr_bits = size;
p.context = context;
@@ -147,7 +149,7 @@ static int get_array_entry(struct dm_disk_bitset *info, dm_block_t root,
uint32_t index, dm_block_t *new_root)
{
int r;
- unsigned array_index = index / BITS_PER_ARRAY_ENTRY;
+ unsigned int array_index = index / BITS_PER_ARRAY_ENTRY;
if (info->current_index_set) {
if (info->current_index == array_index)
@@ -165,7 +167,7 @@ int dm_bitset_set_bit(struct dm_disk_bitset *info, dm_block_t root,
uint32_t index, dm_block_t *new_root)
{
int r;
- unsigned b = index % BITS_PER_ARRAY_ENTRY;
+ unsigned int b = index % BITS_PER_ARRAY_ENTRY;
r = get_array_entry(info, root, index, new_root);
if (r)
@@ -182,7 +184,7 @@ int dm_bitset_clear_bit(struct dm_disk_bitset *info, dm_block_t root,
uint32_t index, dm_block_t *new_root)
{
int r;
- unsigned b = index % BITS_PER_ARRAY_ENTRY;
+ unsigned int b = index % BITS_PER_ARRAY_ENTRY;
r = get_array_entry(info, root, index, new_root);
if (r)
@@ -199,7 +201,7 @@ int dm_bitset_test_bit(struct dm_disk_bitset *info, dm_block_t root,
uint32_t index, dm_block_t *new_root, bool *result)
{
int r;
- unsigned b = index % BITS_PER_ARRAY_ENTRY;
+ unsigned int b = index % BITS_PER_ARRAY_ENTRY;
r = get_array_entry(info, root, index, new_root);
if (r)
diff --git a/drivers/md/persistent-data/dm-bitset.h b/drivers/md/persistent-data/dm-bitset.h
index df888da04ee1..a3392ece5fab 100644
--- a/drivers/md/persistent-data/dm-bitset.h
+++ b/drivers/md/persistent-data/dm-bitset.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Red Hat, Inc.
*
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index 11935864f50f..7bdfc23f758a 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011 Red Hat, Inc.
*
@@ -57,10 +58,10 @@ struct waiter {
int wants_write;
};
-static unsigned __find_holder(struct block_lock *lock,
+static unsigned int __find_holder(struct block_lock *lock,
struct task_struct *task)
{
- unsigned i;
+ unsigned int i;
for (i = 0; i < MAX_HOLDERS; i++)
if (lock->holders[i] == task)
@@ -73,7 +74,7 @@ static unsigned __find_holder(struct block_lock *lock,
/* call this *after* you increment lock->count */
static void __add_holder(struct block_lock *lock, struct task_struct *task)
{
- unsigned h = __find_holder(lock, NULL);
+ unsigned int h = __find_holder(lock, NULL);
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
struct stack_store *t;
#endif
@@ -90,14 +91,15 @@ static void __add_holder(struct block_lock *lock, struct task_struct *task)
/* call this *before* you decrement lock->count */
static void __del_holder(struct block_lock *lock, struct task_struct *task)
{
- unsigned h = __find_holder(lock, task);
+ unsigned int h = __find_holder(lock, task);
+
lock->holders[h] = NULL;
put_task_struct(task);
}
static int __check_holder(struct block_lock *lock)
{
- unsigned i;
+ unsigned int i;
for (i = 0; i < MAX_HOLDERS; i++) {
if (lock->holders[i] == current) {
@@ -354,6 +356,7 @@ struct buffer_aux {
static void dm_block_manager_alloc_callback(struct dm_buffer *buf)
{
struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
+
aux->validator = NULL;
bl_init(&aux->lock);
}
@@ -361,23 +364,26 @@ static void dm_block_manager_alloc_callback(struct dm_buffer *buf)
static void dm_block_manager_write_callback(struct dm_buffer *buf)
{
struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
+
if (aux->validator) {
aux->validator->prepare_for_write(aux->validator, (struct dm_block *) buf,
dm_bufio_get_block_size(dm_bufio_get_client(buf)));
}
}
-/*----------------------------------------------------------------
+/*
+ * -------------------------------------------------------------
* Public interface
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
struct dm_block_manager {
struct dm_bufio_client *bufio;
bool read_only:1;
};
struct dm_block_manager *dm_block_manager_create(struct block_device *bdev,
- unsigned block_size,
- unsigned max_held_per_thread)
+ unsigned int block_size,
+ unsigned int max_held_per_thread)
{
int r;
struct dm_block_manager *bm;
@@ -415,7 +421,7 @@ void dm_block_manager_destroy(struct dm_block_manager *bm)
}
EXPORT_SYMBOL_GPL(dm_block_manager_destroy);
-unsigned dm_bm_block_size(struct dm_block_manager *bm)
+unsigned int dm_bm_block_size(struct dm_block_manager *bm)
{
return dm_bufio_get_block_size(bm->bufio);
}
@@ -433,6 +439,7 @@ static int dm_bm_validate_buffer(struct dm_block_manager *bm,
{
if (unlikely(!aux->validator)) {
int r;
+
if (!v)
return 0;
r = v->check(v, (struct dm_block *) buf, dm_bufio_get_block_size(bm->bufio));
@@ -588,8 +595,7 @@ EXPORT_SYMBOL_GPL(dm_bm_write_lock_zero);
void dm_bm_unlock(struct dm_block *b)
{
- struct buffer_aux *aux;
- aux = dm_bufio_get_aux_data(to_buffer(b));
+ struct buffer_aux *aux = dm_bufio_get_aux_data(to_buffer(b));
if (aux->write_locked) {
dm_bufio_mark_buffer_dirty(to_buffer(b));
@@ -617,7 +623,7 @@ void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b)
bool dm_bm_is_read_only(struct dm_block_manager *bm)
{
- return (bm ? bm->read_only : true);
+ return bm ? bm->read_only : true;
}
EXPORT_SYMBOL_GPL(dm_bm_is_read_only);
diff --git a/drivers/md/persistent-data/dm-block-manager.h b/drivers/md/persistent-data/dm-block-manager.h
index e728937f376a..5746b0f82a03 100644
--- a/drivers/md/persistent-data/dm-block-manager.h
+++ b/drivers/md/persistent-data/dm-block-manager.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011 Red Hat, Inc.
*
@@ -32,11 +33,11 @@ void *dm_block_data(struct dm_block *b);
*/
struct dm_block_manager;
struct dm_block_manager *dm_block_manager_create(
- struct block_device *bdev, unsigned block_size,
- unsigned max_held_per_thread);
+ struct block_device *bdev, unsigned int block_size,
+ unsigned int max_held_per_thread);
void dm_block_manager_destroy(struct dm_block_manager *bm);
-unsigned dm_bm_block_size(struct dm_block_manager *bm);
+unsigned int dm_bm_block_size(struct dm_block_manager *bm);
dm_block_t dm_bm_nr_blocks(struct dm_block_manager *bm);
/*----------------------------------------------------------------*/
diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
index 893edb426dba..7ed2ce656fcc 100644
--- a/drivers/md/persistent-data/dm-btree-internal.h
+++ b/drivers/md/persistent-data/dm-btree-internal.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011 Red Hat, Inc.
*
@@ -34,12 +35,12 @@ struct node_header {
__le32 max_entries;
__le32 value_size;
__le32 padding;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
struct btree_node {
struct node_header header;
__le64 keys[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
/*
@@ -118,6 +119,7 @@ static inline void *value_base(struct btree_node *n)
static inline void *value_ptr(struct btree_node *n, uint32_t index)
{
uint32_t value_size = le32_to_cpu(n->header.value_size);
+
return value_base(n) + (value_size * index);
}
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
index 4ead31e0d8ce..942cd47eb52d 100644
--- a/drivers/md/persistent-data/dm-btree-remove.c
+++ b/drivers/md/persistent-data/dm-btree-remove.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011 Red Hat, Inc.
*
@@ -86,6 +87,7 @@ static int node_copy(struct btree_node *left, struct btree_node *right, int shif
{
uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
uint32_t value_size = le32_to_cpu(left->header.value_size);
+
if (value_size != le32_to_cpu(right->header.value_size)) {
DMERR("mismatched value size");
return -EILSEQ;
@@ -124,11 +126,12 @@ static int node_copy(struct btree_node *left, struct btree_node *right, int shif
/*
* Delete a specific entry from a leaf node.
*/
-static void delete_at(struct btree_node *n, unsigned index)
+static void delete_at(struct btree_node *n, unsigned int index)
{
- unsigned nr_entries = le32_to_cpu(n->header.nr_entries);
- unsigned nr_to_copy = nr_entries - (index + 1);
+ unsigned int nr_entries = le32_to_cpu(n->header.nr_entries);
+ unsigned int nr_to_copy = nr_entries - (index + 1);
uint32_t value_size = le32_to_cpu(n->header.value_size);
+
BUG_ON(index >= nr_entries);
if (nr_to_copy) {
@@ -144,20 +147,20 @@ static void delete_at(struct btree_node *n, unsigned index)
n->header.nr_entries = cpu_to_le32(nr_entries - 1);
}
-static unsigned merge_threshold(struct btree_node *n)
+static unsigned int merge_threshold(struct btree_node *n)
{
return le32_to_cpu(n->header.max_entries) / 3;
}
struct child {
- unsigned index;
+ unsigned int index;
struct dm_block *block;
struct btree_node *n;
};
static int init_child(struct dm_btree_info *info, struct dm_btree_value_type *vt,
struct btree_node *parent,
- unsigned index, struct child *result)
+ unsigned int index, struct child *result)
{
int r, inc;
dm_block_t root;
@@ -263,7 +266,8 @@ static int __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
/*
* Rebalance.
*/
- unsigned target_left = (nr_left + nr_right) / 2;
+ unsigned int target_left = (nr_left + nr_right) / 2;
+
ret = shift(left, right, nr_left - target_left);
if (ret)
return ret;
@@ -273,7 +277,7 @@ static int __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
}
static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
- struct dm_btree_value_type *vt, unsigned left_index)
+ struct dm_btree_value_type *vt, unsigned int left_index)
{
int r;
struct btree_node *parent;
@@ -310,7 +314,7 @@ static int delete_center_node(struct dm_btree_info *info, struct btree_node *par
uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
{
uint32_t max_entries = le32_to_cpu(left->header.max_entries);
- unsigned shift = min(max_entries - nr_left, nr_center);
+ unsigned int shift = min(max_entries - nr_left, nr_center);
if (nr_left + shift > max_entries) {
DMERR("node shift out of bounds");
@@ -351,10 +355,10 @@ static int redistribute3(struct dm_btree_info *info, struct btree_node *parent,
{
int s, ret;
uint32_t max_entries = le32_to_cpu(left->header.max_entries);
- unsigned total = nr_left + nr_center + nr_right;
- unsigned target_right = total / 3;
- unsigned remainder = (target_right * 3) != total;
- unsigned target_left = target_right + remainder;
+ unsigned int total = nr_left + nr_center + nr_right;
+ unsigned int target_right = total / 3;
+ unsigned int remainder = (target_right * 3) != total;
+ unsigned int target_left = target_right + remainder;
BUG_ON(target_left > max_entries);
BUG_ON(target_right > max_entries);
@@ -422,7 +426,7 @@ static int __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
uint32_t nr_center = le32_to_cpu(center->header.nr_entries);
uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
- unsigned threshold = merge_threshold(left) * 4 + 1;
+ unsigned int threshold = merge_threshold(left) * 4 + 1;
if ((left->header.max_entries != center->header.max_entries) ||
(center->header.max_entries != right->header.max_entries)) {
@@ -440,7 +444,7 @@ static int __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
}
static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
- struct dm_btree_value_type *vt, unsigned left_index)
+ struct dm_btree_value_type *vt, unsigned int left_index)
{
int r;
struct btree_node *parent = dm_block_data(shadow_current(s));
@@ -519,7 +523,7 @@ static int rebalance_children(struct shadow_spine *s,
return r;
}
-static int do_leaf(struct btree_node *n, uint64_t key, unsigned *index)
+static int do_leaf(struct btree_node *n, uint64_t key, unsigned int *index)
{
int i = lower_bound(n, key);
@@ -539,7 +543,7 @@ static int do_leaf(struct btree_node *n, uint64_t key, unsigned *index)
*/
static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
struct dm_btree_value_type *vt, dm_block_t root,
- uint64_t key, unsigned *index)
+ uint64_t key, unsigned int *index)
{
int i = *index, r;
struct btree_node *n;
@@ -556,6 +560,7 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
*/
if (shadow_has_parent(s)) {
__le64 location = cpu_to_le64(dm_block_location(shadow_current(s)));
+
memcpy(value_ptr(dm_block_data(shadow_parent(s)), i),
&location, sizeof(__le64));
}
@@ -589,7 +594,7 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
uint64_t *keys, dm_block_t *new_root)
{
- unsigned level, last_level = info->levels - 1;
+ unsigned int level, last_level = info->levels - 1;
int index = 0, r = 0;
struct shadow_spine spine;
struct btree_node *n;
@@ -601,7 +606,7 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
r = remove_raw(&spine, info,
(level == last_level ?
&info->value_type : &le64_vt),
- root, keys[level], (unsigned *)&index);
+ root, keys[level], (unsigned int *)&index);
if (r < 0)
break;
@@ -649,6 +654,7 @@ static int remove_nearest(struct shadow_spine *s, struct dm_btree_info *info,
*/
if (shadow_has_parent(s)) {
__le64 location = cpu_to_le64(dm_block_location(shadow_current(s)));
+
memcpy(value_ptr(dm_block_data(shadow_parent(s)), i),
&location, sizeof(__le64));
}
@@ -685,9 +691,9 @@ static int remove_nearest(struct shadow_spine *s, struct dm_btree_info *info,
static int remove_one(struct dm_btree_info *info, dm_block_t root,
uint64_t *keys, uint64_t end_key,
- dm_block_t *new_root, unsigned *nr_removed)
+ dm_block_t *new_root, unsigned int *nr_removed)
{
- unsigned level, last_level = info->levels - 1;
+ unsigned int level, last_level = info->levels - 1;
int index = 0, r = 0;
struct shadow_spine spine;
struct btree_node *n;
@@ -698,7 +704,7 @@ static int remove_one(struct dm_btree_info *info, dm_block_t root,
init_shadow_spine(&spine, info);
for (level = 0; level < last_level; level++) {
r = remove_raw(&spine, info, &le64_vt,
- root, keys[level], (unsigned *) &index);
+ root, keys[level], (unsigned int *) &index);
if (r < 0)
goto out;
@@ -742,7 +748,7 @@ out:
int dm_btree_remove_leaves(struct dm_btree_info *info, dm_block_t root,
uint64_t *first_key, uint64_t end_key,
- dm_block_t *new_root, unsigned *nr_removed)
+ dm_block_t *new_root, unsigned int *nr_removed)
{
int r;
diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
index e653458888a7..7540383b7cf3 100644
--- a/drivers/md/persistent-data/dm-btree-spine.c
+++ b/drivers/md/persistent-data/dm-btree-spine.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011 Red Hat, Inc.
*
@@ -39,7 +40,7 @@ static int node_check(struct dm_block_validator *v,
uint32_t flags, nr_entries, max_entries;
if (dm_block_location(b) != le64_to_cpu(h->blocknr)) {
- DMERR_LIMIT("node_check failed: blocknr %llu != wanted %llu",
+ DMERR_LIMIT("%s failed: blocknr %llu != wanted %llu", __func__,
le64_to_cpu(h->blocknr), dm_block_location(b));
return -ENOTBLK;
}
@@ -48,7 +49,7 @@ static int node_check(struct dm_block_validator *v,
block_size - sizeof(__le32),
BTREE_CSUM_XOR));
if (csum_disk != h->csum) {
- DMERR_LIMIT("node_check failed: csum %u != wanted %u",
+ DMERR_LIMIT("%s failed: csum %u != wanted %u", __func__,
le32_to_cpu(csum_disk), le32_to_cpu(h->csum));
return -EILSEQ;
}
@@ -59,12 +60,12 @@ static int node_check(struct dm_block_validator *v,
if (sizeof(struct node_header) +
(sizeof(__le64) + value_size) * max_entries > block_size) {
- DMERR_LIMIT("node_check failed: max_entries too large");
+ DMERR_LIMIT("%s failed: max_entries too large", __func__);
return -EILSEQ;
}
if (nr_entries > max_entries) {
- DMERR_LIMIT("node_check failed: too many entries");
+ DMERR_LIMIT("%s failed: too many entries", __func__);
return -EILSEQ;
}
@@ -73,7 +74,7 @@ static int node_check(struct dm_block_validator *v,
*/
flags = le32_to_cpu(h->flags);
if (!(flags & INTERNAL_NODE) && !(flags & LEAF_NODE)) {
- DMERR_LIMIT("node_check failed: node is neither INTERNAL or LEAF");
+ DMERR_LIMIT("%s failed: node is neither INTERNAL or LEAF", __func__);
return -EILSEQ;
}
@@ -132,9 +133,8 @@ void exit_ro_spine(struct ro_spine *s)
{
int i;
- for (i = 0; i < s->count; i++) {
+ for (i = 0; i < s->count; i++)
unlock_block(s->info, s->nodes[i]);
- }
}
int ro_step(struct ro_spine *s, dm_block_t new_child)
@@ -183,9 +183,8 @@ void exit_shadow_spine(struct shadow_spine *s)
{
int i;
- for (i = 0; i < s->count; i++) {
+ for (i = 0; i < s->count; i++)
unlock_block(s->info, s->nodes[i]);
- }
}
int shadow_step(struct shadow_spine *s, dm_block_t b,
@@ -234,12 +233,12 @@ dm_block_t shadow_root(struct shadow_spine *s)
return s->root;
}
-static void le64_inc(void *context, const void *value_le, unsigned count)
+static void le64_inc(void *context, const void *value_le, unsigned int count)
{
dm_tm_with_runs(context, value_le, count, dm_tm_inc_range);
}
-static void le64_dec(void *context, const void *value_le, unsigned count)
+static void le64_dec(void *context, const void *value_le, unsigned int count)
{
dm_tm_with_runs(context, value_le, count, dm_tm_dec_range);
}
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 5ce64e93aae7..0c7a2e8d1846 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011 Red Hat, Inc.
*
@@ -13,9 +14,11 @@
#define DM_MSG_PREFIX "btree"
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Array manipulation
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
static void memcpy_disk(void *dest, const void *src, size_t len)
__dm_written_to_disk(src)
{
@@ -23,8 +26,8 @@ static void memcpy_disk(void *dest, const void *src, size_t len)
__dm_unbless_for_disk(src);
}
-static void array_insert(void *base, size_t elt_size, unsigned nr_elts,
- unsigned index, void *elt)
+static void array_insert(void *base, size_t elt_size, unsigned int nr_elts,
+ unsigned int index, void *elt)
__dm_written_to_disk(elt)
{
if (index < nr_elts)
@@ -80,7 +83,7 @@ void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
vt->inc(vt->context, value_ptr(n, 0), nr_entries);
}
-static int insert_at(size_t value_size, struct btree_node *node, unsigned index,
+static int insert_at(size_t value_size, struct btree_node *node, unsigned int index,
uint64_t key, void *value)
__dm_written_to_disk(value)
{
@@ -162,9 +165,9 @@ EXPORT_SYMBOL_GPL(dm_btree_empty);
struct frame {
struct dm_block *b;
struct btree_node *n;
- unsigned level;
- unsigned nr_children;
- unsigned current_child;
+ unsigned int level;
+ unsigned int nr_children;
+ unsigned int current_child;
};
struct del_stack {
@@ -193,7 +196,7 @@ static int unprocessed_frames(struct del_stack *s)
static void prefetch_children(struct del_stack *s, struct frame *f)
{
- unsigned i;
+ unsigned int i;
struct dm_block_manager *bm = dm_tm_get_bm(s->tm);
for (i = 0; i < f->nr_children; i++)
@@ -205,7 +208,7 @@ static bool is_internal_level(struct dm_btree_info *info, struct frame *f)
return f->level < (info->levels - 1);
}
-static int push_frame(struct del_stack *s, dm_block_t b, unsigned level)
+static int push_frame(struct del_stack *s, dm_block_t b, unsigned int level)
{
int r;
uint32_t ref_count;
@@ -371,7 +374,7 @@ static int btree_lookup_raw(struct ro_spine *s, dm_block_t block, uint64_t key,
int dm_btree_lookup(struct dm_btree_info *info, dm_block_t root,
uint64_t *keys, void *value_le)
{
- unsigned level, last_level = info->levels - 1;
+ unsigned int level, last_level = info->levels - 1;
int r = -ENODATA;
uint64_t rkey;
__le64 internal_value_le;
@@ -467,7 +470,7 @@ out:
int dm_btree_lookup_next(struct dm_btree_info *info, dm_block_t root,
uint64_t *keys, uint64_t *rkey, void *value_le)
{
- unsigned level;
+ unsigned int level;
int r = -ENODATA;
__le64 internal_value_le;
struct ro_spine spine;
@@ -493,7 +496,6 @@ out:
exit_ro_spine(&spine);
return r;
}
-
EXPORT_SYMBOL_GPL(dm_btree_lookup_next);
/*----------------------------------------------------------------*/
@@ -502,11 +504,12 @@ EXPORT_SYMBOL_GPL(dm_btree_lookup_next);
* Copies entries from one region of a btree node to another. The regions
* must not overlap.
*/
-static void copy_entries(struct btree_node *dest, unsigned dest_offset,
- struct btree_node *src, unsigned src_offset,
- unsigned count)
+static void copy_entries(struct btree_node *dest, unsigned int dest_offset,
+ struct btree_node *src, unsigned int src_offset,
+ unsigned int count)
{
size_t value_size = le32_to_cpu(dest->header.value_size);
+
memcpy(dest->keys + dest_offset, src->keys + src_offset, count * sizeof(uint64_t));
memcpy(value_ptr(dest, dest_offset), value_ptr(src, src_offset), count * value_size);
}
@@ -515,11 +518,12 @@ static void copy_entries(struct btree_node *dest, unsigned dest_offset,
* Moves entries from one region fo a btree node to another. The regions
* may overlap.
*/
-static void move_entries(struct btree_node *dest, unsigned dest_offset,
- struct btree_node *src, unsigned src_offset,
- unsigned count)
+static void move_entries(struct btree_node *dest, unsigned int dest_offset,
+ struct btree_node *src, unsigned int src_offset,
+ unsigned int count)
{
size_t value_size = le32_to_cpu(dest->header.value_size);
+
memmove(dest->keys + dest_offset, src->keys + src_offset, count * sizeof(uint64_t));
memmove(value_ptr(dest, dest_offset), value_ptr(src, src_offset), count * value_size);
}
@@ -528,7 +532,7 @@ static void move_entries(struct btree_node *dest, unsigned dest_offset,
* Erases the first 'count' entries of a btree node, shifting following
* entries down into their place.
*/
-static void shift_down(struct btree_node *n, unsigned count)
+static void shift_down(struct btree_node *n, unsigned int count)
{
move_entries(n, 0, n, count, le32_to_cpu(n->header.nr_entries) - count);
}
@@ -537,7 +541,7 @@ static void shift_down(struct btree_node *n, unsigned count)
* Moves entries in a btree node up 'count' places, making space for
* new entries at the start of the node.
*/
-static void shift_up(struct btree_node *n, unsigned count)
+static void shift_up(struct btree_node *n, unsigned int count)
{
move_entries(n, count, n, 0, le32_to_cpu(n->header.nr_entries));
}
@@ -548,18 +552,20 @@ static void shift_up(struct btree_node *n, unsigned count)
*/
static void redistribute2(struct btree_node *left, struct btree_node *right)
{
- unsigned nr_left = le32_to_cpu(left->header.nr_entries);
- unsigned nr_right = le32_to_cpu(right->header.nr_entries);
- unsigned total = nr_left + nr_right;
- unsigned target_left = total / 2;
- unsigned target_right = total - target_left;
+ unsigned int nr_left = le32_to_cpu(left->header.nr_entries);
+ unsigned int nr_right = le32_to_cpu(right->header.nr_entries);
+ unsigned int total = nr_left + nr_right;
+ unsigned int target_left = total / 2;
+ unsigned int target_right = total - target_left;
if (nr_left < target_left) {
- unsigned delta = target_left - nr_left;
+ unsigned int delta = target_left - nr_left;
+
copy_entries(left, nr_left, right, 0, delta);
shift_down(right, delta);
} else if (nr_left > target_left) {
- unsigned delta = nr_left - target_left;
+ unsigned int delta = nr_left - target_left;
+
if (nr_right)
shift_up(right, delta);
copy_entries(right, 0, left, target_left, delta);
@@ -576,10 +582,10 @@ static void redistribute2(struct btree_node *left, struct btree_node *right)
static void redistribute3(struct btree_node *left, struct btree_node *center,
struct btree_node *right)
{
- unsigned nr_left = le32_to_cpu(left->header.nr_entries);
- unsigned nr_center = le32_to_cpu(center->header.nr_entries);
- unsigned nr_right = le32_to_cpu(right->header.nr_entries);
- unsigned total, target_left, target_center, target_right;
+ unsigned int nr_left = le32_to_cpu(left->header.nr_entries);
+ unsigned int nr_center = le32_to_cpu(center->header.nr_entries);
+ unsigned int nr_right = le32_to_cpu(right->header.nr_entries);
+ unsigned int total, target_left, target_center, target_right;
BUG_ON(nr_center);
@@ -589,19 +595,22 @@ static void redistribute3(struct btree_node *left, struct btree_node *center,
target_right = (total - target_left - target_center);
if (nr_left < target_left) {
- unsigned left_short = target_left - nr_left;
+ unsigned int left_short = target_left - nr_left;
+
copy_entries(left, nr_left, right, 0, left_short);
copy_entries(center, 0, right, left_short, target_center);
shift_down(right, nr_right - target_right);
} else if (nr_left < (target_left + target_center)) {
- unsigned left_to_center = nr_left - target_left;
+ unsigned int left_to_center = nr_left - target_left;
+
copy_entries(center, 0, left, target_left, left_to_center);
copy_entries(center, left_to_center, right, 0, target_center - left_to_center);
shift_down(right, nr_right - target_right);
} else {
- unsigned right_short = target_right - nr_right;
+ unsigned int right_short = target_right - nr_right;
+
shift_up(right, right_short);
copy_entries(right, 0, left, nr_left - right_short, right_short);
copy_entries(center, 0, left, target_left, nr_left - target_left);
@@ -642,7 +651,7 @@ static void redistribute3(struct btree_node *left, struct btree_node *center,
*
* Where A* is a shadow of A.
*/
-static int split_one_into_two(struct shadow_spine *s, unsigned parent_index,
+static int split_one_into_two(struct shadow_spine *s, unsigned int parent_index,
struct dm_btree_value_type *vt, uint64_t key)
{
int r;
@@ -696,7 +705,7 @@ static int split_one_into_two(struct shadow_spine *s, unsigned parent_index,
* to the new shadow.
*/
static int shadow_child(struct dm_btree_info *info, struct dm_btree_value_type *vt,
- struct btree_node *parent, unsigned index,
+ struct btree_node *parent, unsigned int index,
struct dm_block **result)
{
int r, inc;
@@ -725,11 +734,11 @@ static int shadow_child(struct dm_btree_info *info, struct dm_btree_value_type *
* Splits two nodes into three. This is more work, but results in fuller
* nodes, so saves metadata space.
*/
-static int split_two_into_three(struct shadow_spine *s, unsigned parent_index,
- struct dm_btree_value_type *vt, uint64_t key)
+static int split_two_into_three(struct shadow_spine *s, unsigned int parent_index,
+ struct dm_btree_value_type *vt, uint64_t key)
{
int r;
- unsigned middle_index;
+ unsigned int middle_index;
struct dm_block *left, *middle, *right, *parent;
struct btree_node *ln, *rn, *mn, *pn;
__le64 location;
@@ -781,7 +790,7 @@ static int split_two_into_three(struct shadow_spine *s, unsigned parent_index,
if (shadow_current(s) != right)
unlock_block(s->info, right);
- return r;
+ return r;
}
@@ -830,7 +839,7 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
{
int r;
size_t size;
- unsigned nr_left, nr_right;
+ unsigned int nr_left, nr_right;
struct dm_block *left, *right, *new_parent;
struct btree_node *pn, *ln, *rn;
__le64 val;
@@ -904,7 +913,7 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
* Redistributes a node's entries with its left sibling.
*/
static int rebalance_left(struct shadow_spine *s, struct dm_btree_value_type *vt,
- unsigned parent_index, uint64_t key)
+ unsigned int parent_index, uint64_t key)
{
int r;
struct dm_block *sib;
@@ -933,7 +942,7 @@ static int rebalance_left(struct shadow_spine *s, struct dm_btree_value_type *vt
* Redistributes a nodes entries with its right sibling.
*/
static int rebalance_right(struct shadow_spine *s, struct dm_btree_value_type *vt,
- unsigned parent_index, uint64_t key)
+ unsigned int parent_index, uint64_t key)
{
int r;
struct dm_block *sib;
@@ -961,10 +970,10 @@ static int rebalance_right(struct shadow_spine *s, struct dm_btree_value_type *v
/*
* Returns the number of spare entries in a node.
*/
-static int get_node_free_space(struct dm_btree_info *info, dm_block_t b, unsigned *space)
+static int get_node_free_space(struct dm_btree_info *info, dm_block_t b, unsigned int *space)
{
int r;
- unsigned nr_entries;
+ unsigned int nr_entries;
struct dm_block *block;
struct btree_node *node;
@@ -990,17 +999,18 @@ static int get_node_free_space(struct dm_btree_info *info, dm_block_t b, unsigne
*/
#define SPACE_THRESHOLD 8
static int rebalance_or_split(struct shadow_spine *s, struct dm_btree_value_type *vt,
- unsigned parent_index, uint64_t key)
+ unsigned int parent_index, uint64_t key)
{
int r;
struct btree_node *parent = dm_block_data(shadow_parent(s));
- unsigned nr_parent = le32_to_cpu(parent->header.nr_entries);
- unsigned free_space;
+ unsigned int nr_parent = le32_to_cpu(parent->header.nr_entries);
+ unsigned int free_space;
int left_shared = 0, right_shared = 0;
/* Should we move entries to the left sibling? */
if (parent_index > 0) {
dm_block_t left_b = value64(parent, parent_index - 1);
+
r = dm_tm_block_is_shared(s->info->tm, left_b, &left_shared);
if (r)
return r;
@@ -1018,6 +1028,7 @@ static int rebalance_or_split(struct shadow_spine *s, struct dm_btree_value_type
/* Should we move entries to the right sibling? */
if (parent_index < (nr_parent - 1)) {
dm_block_t right_b = value64(parent, parent_index + 1);
+
r = dm_tm_block_is_shared(s->info->tm, right_b, &right_shared);
if (r)
return r;
@@ -1080,7 +1091,7 @@ static bool has_space_for_insert(struct btree_node *node, uint64_t key)
static int btree_insert_raw(struct shadow_spine *s, dm_block_t root,
struct dm_btree_value_type *vt,
- uint64_t key, unsigned *index)
+ uint64_t key, unsigned int *index)
{
int r, i = *index, top = 1;
struct btree_node *node;
@@ -1214,9 +1225,9 @@ int btree_get_overwrite_leaf(struct dm_btree_info *info, dm_block_t root,
}
static bool need_insert(struct btree_node *node, uint64_t *keys,
- unsigned level, unsigned index)
+ unsigned int level, unsigned int index)
{
- return ((index >= le32_to_cpu(node->header.nr_entries)) ||
+ return ((index >= le32_to_cpu(node->header.nr_entries)) ||
(le64_to_cpu(node->keys[index]) != keys[level]));
}
@@ -1226,7 +1237,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root,
__dm_written_to_disk(value)
{
int r;
- unsigned level, index = -1, last_level = info->levels - 1;
+ unsigned int level, index = -1, last_level = info->levels - 1;
dm_block_t block = root;
struct shadow_spine spine;
struct btree_node *n;
@@ -1309,7 +1320,7 @@ bad_unblessed:
int dm_btree_insert(struct dm_btree_info *info, dm_block_t root,
uint64_t *keys, void *value, dm_block_t *new_root)
- __dm_written_to_disk(value)
+ __dm_written_to_disk(value)
{
return insert(info, root, keys, value, new_root, NULL);
}
@@ -1318,7 +1329,7 @@ EXPORT_SYMBOL_GPL(dm_btree_insert);
int dm_btree_insert_notify(struct dm_btree_info *info, dm_block_t root,
uint64_t *keys, void *value, dm_block_t *new_root,
int *inserted)
- __dm_written_to_disk(value)
+ __dm_written_to_disk(value)
{
return insert(info, root, keys, value, new_root, inserted);
}
@@ -1341,8 +1352,8 @@ static int find_key(struct ro_spine *s, dm_block_t block, bool find_highest,
i = le32_to_cpu(ro_node(s)->header.nr_entries);
if (!i)
return -ENODATA;
- else
- i--;
+
+ i--;
if (find_highest)
*result_key = le64_to_cpu(ro_node(s)->keys[i]);
@@ -1412,7 +1423,7 @@ static int walk_node(struct dm_btree_info *info, dm_block_t block,
void *context)
{
int r;
- unsigned i, nr;
+ unsigned int i, nr;
struct dm_block *node;
struct btree_node *n;
uint64_t keys;
@@ -1455,7 +1466,7 @@ EXPORT_SYMBOL_GPL(dm_btree_walk);
static void prefetch_values(struct dm_btree_cursor *c)
{
- unsigned i, nr;
+ unsigned int i, nr;
__le64 value_le;
struct cursor_node *n = c->nodes + c->depth - 1;
struct btree_node *bn = dm_block_data(n->b);
@@ -1585,6 +1596,7 @@ EXPORT_SYMBOL_GPL(dm_btree_cursor_end);
int dm_btree_cursor_next(struct dm_btree_cursor *c)
{
int r = inc_or_backtrack(c);
+
if (!r) {
r = find_leaf(c);
if (r)
diff --git a/drivers/md/persistent-data/dm-btree.h b/drivers/md/persistent-data/dm-btree.h
index d2ae5aa4d00b..1b92acd7823d 100644
--- a/drivers/md/persistent-data/dm-btree.h
+++ b/drivers/md/persistent-data/dm-btree.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011 Red Hat, Inc.
*
@@ -58,14 +59,14 @@ struct dm_btree_value_type {
* somewhere.) This method is _not_ called for insertion of a new
* value: It is assumed the ref count is already 1.
*/
- void (*inc)(void *context, const void *value, unsigned count);
+ void (*inc)(void *context, const void *value, unsigned int count);
/*
* These values are being deleted. The btree takes care of freeing
* the memory pointed to by @value. Often the del function just
* needs to decrement a reference counts somewhere.
*/
- void (*dec)(void *context, const void *value, unsigned count);
+ void (*dec)(void *context, const void *value, unsigned int count);
/*
* A test for equality between two values. When a value is
@@ -84,7 +85,7 @@ struct dm_btree_info {
/*
* Number of nested btrees. (Not the depth of a single tree.)
*/
- unsigned levels;
+ unsigned int levels;
struct dm_btree_value_type value_type;
};
@@ -121,7 +122,7 @@ int dm_btree_lookup_next(struct dm_btree_info *info, dm_block_t root,
*/
int dm_btree_insert(struct dm_btree_info *info, dm_block_t root,
uint64_t *keys, void *value, dm_block_t *new_root)
- __dm_written_to_disk(value);
+ __dm_written_to_disk(value);
/*
* A variant of insert that indicates whether it actually inserted or just
@@ -149,7 +150,7 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
*/
int dm_btree_remove_leaves(struct dm_btree_info *info, dm_block_t root,
uint64_t *keys, uint64_t end_key,
- dm_block_t *new_root, unsigned *nr_removed);
+ dm_block_t *new_root, unsigned int *nr_removed);
/*
* Returns < 0 on failure. Otherwise the number of key entries that have
@@ -188,7 +189,7 @@ int dm_btree_walk(struct dm_btree_info *info, dm_block_t root,
struct cursor_node {
struct dm_block *b;
- unsigned index;
+ unsigned int index;
};
struct dm_btree_cursor {
@@ -196,7 +197,7 @@ struct dm_btree_cursor {
dm_block_t root;
bool prefetch_leaves;
- unsigned depth;
+ unsigned int depth;
struct cursor_node nodes[DM_BTREE_CURSOR_MAX_DEPTH];
};
diff --git a/drivers/md/persistent-data/dm-persistent-data-internal.h b/drivers/md/persistent-data/dm-persistent-data-internal.h
index c49e26fff36c..c482434e566b 100644
--- a/drivers/md/persistent-data/dm-persistent-data-internal.h
+++ b/drivers/md/persistent-data/dm-persistent-data-internal.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011 Red Hat, Inc.
*
@@ -9,11 +10,11 @@
#include "dm-block-manager.h"
-static inline unsigned dm_hash_block(dm_block_t b, unsigned hash_mask)
+static inline unsigned int dm_hash_block(dm_block_t b, unsigned int hash_mask)
{
- const unsigned BIG_PRIME = 4294967291UL;
+ const unsigned int BIG_PRIME = 4294967291UL;
- return (((unsigned) b) * BIG_PRIME) & hash_mask;
+ return (((unsigned int) b) * BIG_PRIME) & hash_mask;
}
#endif /* _PERSISTENT_DATA_INTERNAL_H */
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index bfbfa750e016..591d1a43d035 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011 Red Hat, Inc.
*
@@ -41,7 +42,7 @@ static int index_check(struct dm_block_validator *v,
__le32 csum_disk;
if (dm_block_location(b) != le64_to_cpu(mi_le->blocknr)) {
- DMERR_LIMIT("index_check failed: blocknr %llu != wanted %llu",
+ DMERR_LIMIT("%s failed: blocknr %llu != wanted %llu", __func__,
le64_to_cpu(mi_le->blocknr), dm_block_location(b));
return -ENOTBLK;
}
@@ -50,7 +51,7 @@ static int index_check(struct dm_block_validator *v,
block_size - sizeof(__le32),
INDEX_CSUM_XOR));
if (csum_disk != mi_le->csum) {
- DMERR_LIMIT("index_check failed: csum %u != wanted %u",
+ DMERR_LIMIT("i%s failed: csum %u != wanted %u", __func__,
le32_to_cpu(csum_disk), le32_to_cpu(mi_le->csum));
return -EILSEQ;
}
@@ -126,7 +127,7 @@ static void *dm_bitmap_data(struct dm_block *b)
#define WORD_MASK_HIGH 0xAAAAAAAAAAAAAAAAULL
-static unsigned dm_bitmap_word_used(void *addr, unsigned b)
+static unsigned int dm_bitmap_word_used(void *addr, unsigned int b)
{
__le64 *words_le = addr;
__le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
@@ -137,11 +138,11 @@ static unsigned dm_bitmap_word_used(void *addr, unsigned b)
return !(~bits & mask);
}
-static unsigned sm_lookup_bitmap(void *addr, unsigned b)
+static unsigned int sm_lookup_bitmap(void *addr, unsigned int b)
{
__le64 *words_le = addr;
__le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
- unsigned hi, lo;
+ unsigned int hi, lo;
b = (b & (ENTRIES_PER_WORD - 1)) << 1;
hi = !!test_bit_le(b, (void *) w_le);
@@ -149,7 +150,7 @@ static unsigned sm_lookup_bitmap(void *addr, unsigned b)
return (hi << 1) | lo;
}
-static void sm_set_bitmap(void *addr, unsigned b, unsigned val)
+static void sm_set_bitmap(void *addr, unsigned int b, unsigned int val)
{
__le64 *words_le = addr;
__le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
@@ -167,8 +168,8 @@ static void sm_set_bitmap(void *addr, unsigned b, unsigned val)
__clear_bit_le(b + 1, (void *) w_le);
}
-static int sm_find_free(void *addr, unsigned begin, unsigned end,
- unsigned *result)
+static int sm_find_free(void *addr, unsigned int begin, unsigned int end,
+ unsigned int *result)
{
while (begin < end) {
if (!(begin & (ENTRIES_PER_WORD - 1)) &&
@@ -237,7 +238,7 @@ int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
{
int r;
dm_block_t i, nr_blocks, nr_indexes;
- unsigned old_blocks, blocks;
+ unsigned int old_blocks, blocks;
nr_blocks = ll->nr_blocks + extra_blocks;
old_blocks = dm_sector_div_up(ll->nr_blocks, ll->entries_per_block);
@@ -351,7 +352,7 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
for (i = index_begin; i < index_end; i++, begin = 0) {
struct dm_block *blk;
- unsigned position;
+ unsigned int position;
uint32_t bit_end;
r = ll->load_ie(ll, i, &ie_disk);
@@ -369,7 +370,7 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
bit_end = (i == index_end - 1) ? end : ll->entries_per_block;
r = sm_find_free(dm_bitmap_data(blk),
- max_t(unsigned, begin, le32_to_cpu(ie_disk.none_free_before)),
+ max_t(unsigned int, begin, le32_to_cpu(ie_disk.none_free_before)),
bit_end, &position);
if (r == -ENOSPC) {
/*
@@ -390,7 +391,7 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
}
int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll,
- dm_block_t begin, dm_block_t end, dm_block_t *b)
+ dm_block_t begin, dm_block_t end, dm_block_t *b)
{
int r;
uint32_t count;
@@ -608,6 +609,7 @@ static int sm_ll_inc_overflow(struct ll_disk *ll, dm_block_t b, struct inc_conte
static inline int shadow_bitmap(struct ll_disk *ll, struct inc_context *ic)
{
int r, inc;
+
r = dm_tm_shadow_block(ll->tm, le64_to_cpu(ic->ie_disk.blocknr),
&dm_sm_bitmap_validator, &ic->bitmap_block, &inc);
if (r < 0) {
@@ -747,6 +749,7 @@ int sm_ll_inc(struct ll_disk *ll, dm_block_t b, dm_block_t e,
*nr_allocations = 0;
while (b != e) {
int r = __sm_ll_inc(ll, b, e, nr_allocations, &b);
+
if (r)
return r;
}
@@ -790,13 +793,12 @@ static int __sm_ll_dec_overflow(struct ll_disk *ll, dm_block_t b,
rc = le32_to_cpu(*v_ptr);
*old_rc = rc;
- if (rc == 3) {
+ if (rc == 3)
return __sm_ll_del_overflow(ll, b, ic);
- } else {
- rc--;
- *v_ptr = cpu_to_le32(rc);
- return 0;
- }
+
+ rc--;
+ *v_ptr = cpu_to_le32(rc);
+ return 0;
}
static int sm_ll_dec_overflow(struct ll_disk *ll, dm_block_t b,
@@ -929,6 +931,7 @@ int sm_ll_dec(struct ll_disk *ll, dm_block_t b, dm_block_t e,
*nr_allocations = 0;
while (b != e) {
int r = __sm_ll_dec(ll, b, e, nr_allocations, &b);
+
if (r)
return r;
}
@@ -1097,7 +1100,7 @@ static inline int ie_cache_writeback(struct ll_disk *ll, struct ie_cache *iec)
&iec->index, &iec->ie, &ll->bitmap_root);
}
-static inline unsigned hash_index(dm_block_t index)
+static inline unsigned int hash_index(dm_block_t index)
{
return dm_hash_block(index, IE_CACHE_MASK);
}
@@ -1106,7 +1109,7 @@ static int disk_ll_load_ie(struct ll_disk *ll, dm_block_t index,
struct disk_index_entry *ie)
{
int r;
- unsigned h = hash_index(index);
+ unsigned int h = hash_index(index);
struct ie_cache *iec = ll->ie_cache + h;
if (iec->valid) {
@@ -1137,7 +1140,7 @@ static int disk_ll_save_ie(struct ll_disk *ll, dm_block_t index,
struct disk_index_entry *ie)
{
int r;
- unsigned h = hash_index(index);
+ unsigned int h = hash_index(index);
struct ie_cache *iec = ll->ie_cache + h;
ll->bitmap_index_changed = true;
@@ -1164,9 +1167,11 @@ static int disk_ll_save_ie(struct ll_disk *ll, dm_block_t index,
static int disk_ll_init_index(struct ll_disk *ll)
{
- unsigned i;
+ unsigned int i;
+
for (i = 0; i < IE_CACHE_SIZE; i++) {
struct ie_cache *iec = ll->ie_cache + i;
+
iec->valid = false;
iec->dirty = false;
}
@@ -1186,10 +1191,11 @@ static dm_block_t disk_ll_max_entries(struct ll_disk *ll)
static int disk_ll_commit(struct ll_disk *ll)
{
int r = 0;
- unsigned i;
+ unsigned int i;
for (i = 0; i < IE_CACHE_SIZE; i++) {
struct ie_cache *iec = ll->ie_cache + i;
+
if (iec->valid && iec->dirty)
r = ie_cache_writeback(ll, iec);
}
diff --git a/drivers/md/persistent-data/dm-space-map-common.h b/drivers/md/persistent-data/dm-space-map-common.h
index 706ceb85d680..e83d1f225078 100644
--- a/drivers/md/persistent-data/dm-space-map-common.h
+++ b/drivers/md/persistent-data/dm-space-map-common.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011 Red Hat, Inc.
*
@@ -33,7 +34,7 @@ struct disk_index_entry {
__le64 blocknr;
__le32 nr_free;
__le32 none_free_before;
-} __attribute__ ((packed, aligned(8)));
+} __packed __aligned(8);
#define MAX_METADATA_BITMAPS 255
@@ -43,7 +44,7 @@ struct disk_metadata_index {
__le64 blocknr;
struct disk_index_entry index[MAX_METADATA_BITMAPS];
-} __attribute__ ((packed, aligned(8)));
+} __packed __aligned(8);
struct ll_disk;
@@ -102,7 +103,7 @@ struct disk_sm_root {
__le64 nr_allocated;
__le64 bitmap_root;
__le64 ref_count_root;
-} __attribute__ ((packed, aligned(8)));
+} __packed __aligned(8);
#define ENTRIES_PER_BYTE 4
@@ -110,7 +111,7 @@ struct disk_bitmap_header {
__le32 csum;
__le32 not_used;
__le64 blocknr;
-} __attribute__ ((packed, aligned(8)));
+} __packed __aligned(8);
/*----------------------------------------------------------------*/
@@ -120,7 +121,7 @@ int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result);
int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
dm_block_t end, dm_block_t *result);
int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll,
- dm_block_t begin, dm_block_t end, dm_block_t *result);
+ dm_block_t begin, dm_block_t end, dm_block_t *result);
/*
* The next three functions return (via nr_allocations) the net number of
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
index d0a8d5e73c28..f4241f54e20e 100644
--- a/drivers/md/persistent-data/dm-space-map-disk.c
+++ b/drivers/md/persistent-data/dm-space-map-disk.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011 Red Hat, Inc.
*
@@ -48,6 +49,7 @@ static int sm_disk_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
static int sm_disk_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
{
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
*count = smd->old_ll.nr_blocks;
return 0;
@@ -56,6 +58,7 @@ static int sm_disk_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
static int sm_disk_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
{
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
*count = (smd->old_ll.nr_blocks - smd->old_ll.nr_allocated) - smd->nr_allocated_this_transaction;
return 0;
@@ -65,6 +68,7 @@ static int sm_disk_get_count(struct dm_space_map *sm, dm_block_t b,
uint32_t *result)
{
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
return sm_ll_lookup(&smd->ll, b, result);
}
@@ -91,9 +95,8 @@ static int sm_disk_set_count(struct dm_space_map *sm, dm_block_t b,
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
r = sm_ll_insert(&smd->ll, b, count, &nr_allocations);
- if (!r) {
+ if (!r)
smd->nr_allocated_this_transaction += nr_allocations;
- }
return r;
}
@@ -134,22 +137,20 @@ static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
* Any block we allocate has to be free in both the old and current ll.
*/
r = sm_ll_find_common_free_block(&smd->old_ll, &smd->ll, smd->begin, smd->ll.nr_blocks, b);
- if (r == -ENOSPC) {
+ if (r == -ENOSPC)
/*
* There's no free block between smd->begin and the end of the metadata device.
* We search before smd->begin in case something has been freed.
*/
r = sm_ll_find_common_free_block(&smd->old_ll, &smd->ll, 0, smd->begin, b);
- }
if (r)
return r;
smd->begin = *b + 1;
r = sm_ll_inc(&smd->ll, *b, *b + 1, &nr_allocations);
- if (!r) {
+ if (!r)
smd->nr_allocated_this_transaction += nr_allocations;
- }
return r;
}
diff --git a/drivers/md/persistent-data/dm-space-map-disk.h b/drivers/md/persistent-data/dm-space-map-disk.h
index 447a0a9a2d9f..6f8665db94db 100644
--- a/drivers/md/persistent-data/dm-space-map-disk.h
+++ b/drivers/md/persistent-data/dm-space-map-disk.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011 Red Hat, Inc.
*
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 392ae26134a4..04698fd03e60 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011 Red Hat, Inc.
*
@@ -94,8 +95,8 @@ struct block_op {
};
struct bop_ring_buffer {
- unsigned begin;
- unsigned end;
+ unsigned int begin;
+ unsigned int end;
struct block_op bops[MAX_RECURSIVE_ALLOCATIONS + 1];
};
@@ -110,9 +111,10 @@ static bool brb_empty(struct bop_ring_buffer *brb)
return brb->begin == brb->end;
}
-static unsigned brb_next(struct bop_ring_buffer *brb, unsigned old)
+static unsigned int brb_next(struct bop_ring_buffer *brb, unsigned int old)
{
- unsigned r = old + 1;
+ unsigned int r = old + 1;
+
return r >= ARRAY_SIZE(brb->bops) ? 0 : r;
}
@@ -120,7 +122,7 @@ static int brb_push(struct bop_ring_buffer *brb,
enum block_op_type type, dm_block_t b, dm_block_t e)
{
struct block_op *bop;
- unsigned next = brb_next(brb, brb->end);
+ unsigned int next = brb_next(brb, brb->end);
/*
* We don't allow the last bop to be filled, this way we can
@@ -171,8 +173,8 @@ struct sm_metadata {
dm_block_t begin;
- unsigned recursion_count;
- unsigned allocated_this_transaction;
+ unsigned int recursion_count;
+ unsigned int allocated_this_transaction;
struct bop_ring_buffer uncommitted;
struct threshold threshold;
@@ -181,6 +183,7 @@ struct sm_metadata {
static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b, dm_block_t e)
{
int r = brb_push(&smm->uncommitted, type, b, e);
+
if (r) {
DMERR("too many recursive allocations");
return -ENOMEM;
@@ -300,9 +303,9 @@ static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
uint32_t *result)
{
int r;
- unsigned i;
+ unsigned int i;
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
- unsigned adjustment = 0;
+ unsigned int adjustment = 0;
/*
* We may have some uncommitted adjustments to add. This list
@@ -340,7 +343,7 @@ static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm,
dm_block_t b, int *result)
{
int r, adjustment = 0;
- unsigned i;
+ unsigned int i;
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
uint32_t rc;
@@ -486,6 +489,7 @@ static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b)
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
int r = sm_metadata_new_block_(sm, b);
+
if (r) {
DMERR_LIMIT("unable to allocate new metadata block");
return r;
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.h b/drivers/md/persistent-data/dm-space-map-metadata.h
index 64df923974d8..ca5c3e532627 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.h
+++ b/drivers/md/persistent-data/dm-space-map-metadata.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011 Red Hat, Inc.
*
diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
index a015cd11f6e9..dab490353781 100644
--- a/drivers/md/persistent-data/dm-space-map.h
+++ b/drivers/md/persistent-data/dm-space-map.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011 Red Hat, Inc.
*
diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c
index 16643fc974e8..6dc016248baf 100644
--- a/drivers/md/persistent-data/dm-transaction-manager.c
+++ b/drivers/md/persistent-data/dm-transaction-manager.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011 Red Hat, Inc.
*
@@ -28,14 +29,15 @@ struct prefetch_set {
dm_block_t blocks[PREFETCH_SIZE];
};
-static unsigned prefetch_hash(dm_block_t b)
+static unsigned int prefetch_hash(dm_block_t b)
{
return hash_64(b, PREFETCH_BITS);
}
static void prefetch_wipe(struct prefetch_set *p)
{
- unsigned i;
+ unsigned int i;
+
for (i = 0; i < PREFETCH_SIZE; i++)
p->blocks[i] = PREFETCH_SENTINEL;
}
@@ -48,7 +50,7 @@ static void prefetch_init(struct prefetch_set *p)
static void prefetch_add(struct prefetch_set *p, dm_block_t b)
{
- unsigned h = prefetch_hash(b);
+ unsigned int h = prefetch_hash(b);
mutex_lock(&p->lock);
if (p->blocks[h] == PREFETCH_SENTINEL)
@@ -59,7 +61,7 @@ static void prefetch_add(struct prefetch_set *p, dm_block_t b)
static void prefetch_issue(struct prefetch_set *p, struct dm_block_manager *bm)
{
- unsigned i;
+ unsigned int i;
mutex_lock(&p->lock);
@@ -103,7 +105,7 @@ struct dm_transaction_manager {
static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b)
{
int r = 0;
- unsigned bucket = dm_hash_block(b, DM_HASH_MASK);
+ unsigned int bucket = dm_hash_block(b, DM_HASH_MASK);
struct shadow_info *si;
spin_lock(&tm->lock);
@@ -123,7 +125,7 @@ static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b)
*/
static void insert_shadow(struct dm_transaction_manager *tm, dm_block_t b)
{
- unsigned bucket;
+ unsigned int bucket;
struct shadow_info *si;
si = kmalloc(sizeof(*si), GFP_NOIO);
@@ -393,11 +395,11 @@ void dm_tm_dec_range(struct dm_transaction_manager *tm, dm_block_t b, dm_block_t
EXPORT_SYMBOL_GPL(dm_tm_dec_range);
void dm_tm_with_runs(struct dm_transaction_manager *tm,
- const __le64 *value_le, unsigned count, dm_tm_run_fn fn)
+ const __le64 *value_le, unsigned int count, dm_tm_run_fn fn)
{
uint64_t b, begin, end;
bool in_run = false;
- unsigned i;
+ unsigned int i;
for (i = 0; i < count; i++, value_le++) {
b = le64_to_cpu(*value_le);
diff --git a/drivers/md/persistent-data/dm-transaction-manager.h b/drivers/md/persistent-data/dm-transaction-manager.h
index 906c02ed0365..01f7e650118d 100644
--- a/drivers/md/persistent-data/dm-transaction-manager.h
+++ b/drivers/md/persistent-data/dm-transaction-manager.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011 Red Hat, Inc.
*
@@ -111,7 +112,7 @@ void dm_tm_dec_range(struct dm_transaction_manager *tm, dm_block_t b, dm_block_t
*/
typedef void (*dm_tm_run_fn)(struct dm_transaction_manager *, dm_block_t, dm_block_t);
void dm_tm_with_runs(struct dm_transaction_manager *tm,
- const __le64 *value_le, unsigned count, dm_tm_run_fn fn);
+ const __le64 *value_le, unsigned int count, dm_tm_run_fn fn);
int dm_tm_ref(struct dm_transaction_manager *tm, dm_block_t b, uint32_t *result);