summaryrefslogtreecommitdiff
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-crypt.c3
-rw-r--r--drivers/md/dm-zoned-target.c15
-rw-r--r--drivers/md/raid5.c7
3 files changed, 16 insertions, 9 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index f2662c21a6df..5315fd261c23 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -753,7 +753,8 @@ static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
int err;
u8 *buf;
- reqsize = ALIGN(crypto_skcipher_reqsize(tfm), __alignof__(__le64));
+ reqsize = sizeof(*req) + crypto_skcipher_reqsize(tfm);
+ reqsize = ALIGN(reqsize, __alignof__(__le64));
req = kmalloc(reqsize + cc->iv_size, GFP_NOIO);
if (!req)
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index ad8e670a2f9b..b487f7acc860 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -748,17 +748,16 @@ err:
/*
* Cleanup zoned device information.
*/
-static void dmz_put_zoned_device(struct dm_target *ti)
+static void dmz_put_zoned_devices(struct dm_target *ti)
{
struct dmz_target *dmz = ti->private;
int i;
- for (i = 0; i < dmz->nr_ddevs; i++) {
- if (dmz->ddev[i]) {
+ for (i = 0; i < dmz->nr_ddevs; i++)
+ if (dmz->ddev[i])
dm_put_device(ti, dmz->ddev[i]);
- dmz->ddev[i] = NULL;
- }
- }
+
+ kfree(dmz->ddev);
}
static int dmz_fixup_devices(struct dm_target *ti)
@@ -948,7 +947,7 @@ err_bio:
err_meta:
dmz_dtr_metadata(dmz->metadata);
err_dev:
- dmz_put_zoned_device(ti);
+ dmz_put_zoned_devices(ti);
err:
kfree(dmz->dev);
kfree(dmz);
@@ -978,7 +977,7 @@ static void dmz_dtr(struct dm_target *ti)
bioset_exit(&dmz->bio_set);
- dmz_put_zoned_device(ti);
+ dmz_put_zoned_devices(ti);
mutex_destroy(&dmz->chunk_lock);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4cb9c608ee19..284cd71bcc68 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -854,6 +854,13 @@ struct stripe_head *raid5_get_active_stripe(struct r5conf *conf,
set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
r5l_wake_reclaim(conf->log, 0);
+
+ /* release batch_last before wait to avoid risk of deadlock */
+ if (ctx && ctx->batch_last) {
+ raid5_release_stripe(ctx->batch_last);
+ ctx->batch_last = NULL;
+ }
+
wait_event_lock_irq(conf->wait_for_stripe,
is_inactive_blocked(conf, hash),
*(conf->hash_locks + hash));