summaryrefslogtreecommitdiff
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bcache/super.c22
-rw-r--r--drivers/md/dm-crypt.c53
-rw-r--r--drivers/md/dm-integrity.c12
-rw-r--r--drivers/md/dm-stripe.c1
-rw-r--r--drivers/md/dm-table.c4
-rw-r--r--drivers/md/dm.c54
-rw-r--r--drivers/md/md.c73
-rw-r--r--drivers/md/md.h26
-rw-r--r--drivers/md/raid0.c1
-rw-r--r--drivers/md/raid10.c4
-rw-r--r--drivers/md/raid5.c2
11 files changed, 170 insertions, 82 deletions
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 2ea490b9d370..1492c8552255 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -168,14 +168,14 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
{
const char *err;
struct cache_sb_disk *s;
- struct page *page;
+ struct folio *folio;
unsigned int i;
- page = read_cache_page_gfp(bdev->bd_mapping,
- SB_OFFSET >> PAGE_SHIFT, GFP_KERNEL);
- if (IS_ERR(page))
+ folio = mapping_read_folio_gfp(bdev->bd_mapping,
+ SB_OFFSET >> PAGE_SHIFT, GFP_KERNEL);
+ if (IS_ERR(folio))
return "IO error";
- s = page_address(page) + offset_in_page(SB_OFFSET);
+ s = folio_address(folio) + offset_in_folio(folio, SB_OFFSET);
sb->offset = le64_to_cpu(s->offset);
sb->version = le64_to_cpu(s->version);
@@ -272,7 +272,7 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
*res = s;
return NULL;
err:
- put_page(page);
+ folio_put(folio);
return err;
}
@@ -1366,7 +1366,7 @@ static CLOSURE_CALLBACK(cached_dev_free)
mutex_unlock(&bch_register_lock);
if (dc->sb_disk)
- put_page(virt_to_page(dc->sb_disk));
+ folio_put(virt_to_folio(dc->sb_disk));
if (dc->bdev_file)
fput(dc->bdev_file);
@@ -2216,7 +2216,7 @@ void bch_cache_release(struct kobject *kobj)
free_fifo(&ca->free[i]);
if (ca->sb_disk)
- put_page(virt_to_page(ca->sb_disk));
+ folio_put(virt_to_folio(ca->sb_disk));
if (ca->bdev_file)
fput(ca->bdev_file);
@@ -2593,7 +2593,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (!holder) {
ret = -ENOMEM;
err = "cannot allocate memory";
- goto out_put_sb_page;
+ goto out_put_sb_folio;
}
/* Now reopen in exclusive mode with proper holder */
@@ -2667,8 +2667,8 @@ async_done:
out_free_holder:
kfree(holder);
-out_put_sb_page:
- put_page(virt_to_page(sb_disk));
+out_put_sb_folio:
+ folio_put(virt_to_folio(sb_disk));
out_blkdev_put:
if (bdev_file)
fput(bdev_file);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 17157c4216a5..5ef43231fe77 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -253,17 +253,35 @@ MODULE_PARM_DESC(max_read_size, "Maximum size of a read request");
static unsigned int max_write_size = 0;
module_param(max_write_size, uint, 0644);
MODULE_PARM_DESC(max_write_size, "Maximum size of a write request");
-static unsigned get_max_request_size(struct crypt_config *cc, bool wrt)
+
+static unsigned get_max_request_sectors(struct dm_target *ti, struct bio *bio)
{
+ struct crypt_config *cc = ti->private;
unsigned val, sector_align;
- val = !wrt ? READ_ONCE(max_read_size) : READ_ONCE(max_write_size);
- if (likely(!val))
- val = !wrt ? DM_CRYPT_DEFAULT_MAX_READ_SIZE : DM_CRYPT_DEFAULT_MAX_WRITE_SIZE;
- if (wrt || cc->used_tag_size) {
- if (unlikely(val > BIO_MAX_VECS << PAGE_SHIFT))
- val = BIO_MAX_VECS << PAGE_SHIFT;
- }
- sector_align = max(bdev_logical_block_size(cc->dev->bdev), (unsigned)cc->sector_size);
+ bool wrt = op_is_write(bio_op(bio));
+
+ if (wrt) {
+ /*
+ * For zoned devices, splitting write operations creates the
+ * risk of deadlocking queue freeze operations with zone write
+ * plugging BIO work when the reminder of a split BIO is
+ * issued. So always allow the entire BIO to proceed.
+ */
+ if (ti->emulate_zone_append)
+ return bio_sectors(bio);
+
+ val = min_not_zero(READ_ONCE(max_write_size),
+ DM_CRYPT_DEFAULT_MAX_WRITE_SIZE);
+ } else {
+ val = min_not_zero(READ_ONCE(max_read_size),
+ DM_CRYPT_DEFAULT_MAX_READ_SIZE);
+ }
+
+ if (wrt || cc->used_tag_size)
+ val = min(val, BIO_MAX_VECS << PAGE_SHIFT);
+
+ sector_align = max(bdev_logical_block_size(cc->dev->bdev),
+ (unsigned)cc->sector_size);
val = round_down(val, sector_align);
if (unlikely(!val))
val = sector_align;
@@ -1192,11 +1210,11 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
return -EINVAL;
}
- if (bi->tuple_size < cc->used_tag_size) {
+ if (bi->metadata_size < cc->used_tag_size) {
ti->error = "Integrity profile tag size mismatch.";
return -EINVAL;
}
- cc->tuple_size = bi->tuple_size;
+ cc->tuple_size = bi->metadata_size;
if (1 << bi->interval_exp != cc->sector_size) {
ti->error = "Integrity profile sector size mismatch.";
return -EINVAL;
@@ -3496,7 +3514,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
/*
* Check if bio is too large, split as needed.
*/
- max_sectors = get_max_request_size(cc, bio_data_dir(bio) == WRITE);
+ max_sectors = get_max_request_sectors(ti, bio);
if (unlikely(bio_sectors(bio) > max_sectors))
dm_accept_partial_bio(bio, max_sectors);
@@ -3733,6 +3751,17 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
max_t(unsigned int, limits->physical_block_size, cc->sector_size);
limits->io_min = max_t(unsigned int, limits->io_min, cc->sector_size);
limits->dma_alignment = limits->logical_block_size - 1;
+
+ /*
+ * For zoned dm-crypt targets, there will be no internal splitting of
+ * write BIOs to avoid exceeding BIO_MAX_VECS vectors per BIO. But
+ * without respecting this limit, crypt_alloc_buffer() will trigger a
+ * BUG(). Avoid this by forcing DM core to split write BIOs to this
+ * limit.
+ */
+ if (ti->emulate_zone_append)
+ limits->max_hw_sectors = min(limits->max_hw_sectors,
+ BIO_MAX_VECS << PAGE_SECTORS_SHIFT);
}
static struct target_type crypt_target = {
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 4395657fa583..efeee0a873c0 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -3906,8 +3906,8 @@ static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *lim
struct blk_integrity *bi = &limits->integrity;
memset(bi, 0, sizeof(*bi));
- bi->tuple_size = ic->tag_size;
- bi->tag_size = bi->tuple_size;
+ bi->metadata_size = ic->tag_size;
+ bi->tag_size = bi->metadata_size;
bi->interval_exp =
ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
}
@@ -4746,18 +4746,18 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
ti->error = "Integrity profile not supported";
goto bad;
}
- /*printk("tag_size: %u, tuple_size: %u\n", bi->tag_size, bi->tuple_size);*/
- if (bi->tuple_size < ic->tag_size) {
+ /*printk("tag_size: %u, metadata_size: %u\n", bi->tag_size, bi->metadata_size);*/
+ if (bi->metadata_size < ic->tag_size) {
r = -EINVAL;
ti->error = "The integrity profile is smaller than tag size";
goto bad;
}
- if ((unsigned long)bi->tuple_size > PAGE_SIZE / 2) {
+ if ((unsigned long)bi->metadata_size > PAGE_SIZE / 2) {
r = -EINVAL;
ti->error = "Too big tuple size";
goto bad;
}
- ic->tuple_size = bi->tuple_size;
+ ic->tuple_size = bi->metadata_size;
if (1 << bi->interval_exp != ic->sectors_per_block << SECTOR_SHIFT) {
r = -EINVAL;
ti->error = "Integrity profile sector size mismatch";
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index a7dc04bd55e5..5bbbdf8fc1bd 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -458,6 +458,7 @@ static void stripe_io_hints(struct dm_target *ti,
struct stripe_c *sc = ti->private;
unsigned int chunk_size = sc->chunk_size << SECTOR_SHIFT;
+ limits->chunk_sectors = sc->chunk_size;
limits->io_min = chunk_size;
limits->io_opt = chunk_size * sc->stripes;
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 24a857ff6d0b..d9d5e6aa5707 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -2065,8 +2065,10 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
limits->discard_alignment = 0;
}
- if (!dm_table_supports_write_zeroes(t))
+ if (!dm_table_supports_write_zeroes(t)) {
limits->max_write_zeroes_sectors = 0;
+ limits->max_hw_wzeroes_unmap_sectors = 0;
+ }
if (!dm_table_supports_secure_erase(t))
limits->max_secure_erase_sectors = 0;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 1726f0f828cc..abfe0392b5a4 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1293,8 +1293,9 @@ out:
/*
* A target may call dm_accept_partial_bio only from the map routine. It is
* allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management
- * operations, REQ_OP_ZONE_APPEND (zone append writes) and any bio serviced by
- * __send_duplicate_bios().
+ * operations, zone append writes (native with REQ_OP_ZONE_APPEND or emulated
+ * with write BIOs flagged with BIO_EMULATES_ZONE_APPEND) and any bio serviced
+ * by __send_duplicate_bios().
*
* dm_accept_partial_bio informs the dm that the target only wants to process
* additional n_sectors sectors of the bio and the rest of the data should be
@@ -1327,11 +1328,19 @@ void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors)
unsigned int bio_sectors = bio_sectors(bio);
BUG_ON(dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO));
- BUG_ON(op_is_zone_mgmt(bio_op(bio)));
- BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND);
BUG_ON(bio_sectors > *tio->len_ptr);
BUG_ON(n_sectors > bio_sectors);
+ if (static_branch_unlikely(&zoned_enabled) &&
+ unlikely(bdev_is_zoned(bio->bi_bdev))) {
+ enum req_op op = bio_op(bio);
+
+ BUG_ON(op_is_zone_mgmt(op));
+ BUG_ON(op == REQ_OP_WRITE);
+ BUG_ON(op == REQ_OP_WRITE_ZEROES);
+ BUG_ON(op == REQ_OP_ZONE_APPEND);
+ }
+
*tio->len_ptr -= bio_sectors - n_sectors;
bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
@@ -1776,19 +1785,35 @@ static void init_clone_info(struct clone_info *ci, struct dm_io *io,
}
#ifdef CONFIG_BLK_DEV_ZONED
-static inline bool dm_zone_bio_needs_split(struct mapped_device *md,
- struct bio *bio)
+static inline bool dm_zone_bio_needs_split(struct bio *bio)
{
/*
- * For mapped device that need zone append emulation, we must
- * split any large BIO that straddles zone boundaries.
+ * Special case the zone operations that cannot or should not be split.
*/
- return dm_emulate_zone_append(md) && bio_straddles_zones(bio) &&
- !bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING);
+ switch (bio_op(bio)) {
+ case REQ_OP_ZONE_APPEND:
+ case REQ_OP_ZONE_FINISH:
+ case REQ_OP_ZONE_RESET:
+ case REQ_OP_ZONE_RESET_ALL:
+ return false;
+ default:
+ break;
+ }
+
+ /*
+ * When mapped devices use the block layer zone write plugging, we must
+ * split any large BIO to the mapped device limits to not submit BIOs
+ * that span zone boundaries and to avoid potential deadlocks with
+ * queue freeze operations.
+ */
+ return bio_needs_zone_write_plugging(bio) || bio_straddles_zones(bio);
}
+
static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio)
{
- return dm_emulate_zone_append(md) && blk_zone_plug_bio(bio, 0);
+ if (!bio_needs_zone_write_plugging(bio))
+ return false;
+ return blk_zone_plug_bio(bio, 0);
}
static blk_status_t __send_zone_reset_all_emulated(struct clone_info *ci,
@@ -1904,8 +1929,7 @@ static blk_status_t __send_zone_reset_all(struct clone_info *ci)
}
#else
-static inline bool dm_zone_bio_needs_split(struct mapped_device *md,
- struct bio *bio)
+static inline bool dm_zone_bio_needs_split(struct bio *bio)
{
return false;
}
@@ -1932,9 +1956,7 @@ static void dm_split_and_process_bio(struct mapped_device *md,
is_abnormal = is_abnormal_io(bio);
if (static_branch_unlikely(&zoned_enabled)) {
- /* Special case REQ_OP_ZONE_RESET_ALL as it cannot be split. */
- need_split = (bio_op(bio) != REQ_OP_ZONE_RESET_ALL) &&
- (is_abnormal || dm_zone_bio_needs_split(md, bio));
+ need_split = is_abnormal || dm_zone_bio_needs_split(bio);
} else {
need_split = is_abnormal;
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 0f03b21e66e4..046fe85c76fe 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -636,9 +636,6 @@ static void __mddev_put(struct mddev *mddev)
mddev->ctime || mddev->hold_active)
return;
- /* Array is not configured at all, and not held active, so destroy it */
- set_bit(MD_DELETED, &mddev->flags);
-
/*
* Call queue_work inside the spinlock so that flush_workqueue() after
* mddev_find will succeed in waiting for the work to be done.
@@ -873,6 +870,16 @@ void mddev_unlock(struct mddev *mddev)
kobject_del(&rdev->kobj);
export_rdev(rdev, mddev);
}
+
+ /* Call del_gendisk after release reconfig_mutex to avoid
+ * deadlock (e.g. call del_gendisk under the lock and an
+ * access to sysfs files waits the lock)
+ * And MD_DELETED is only used for md raid which is set in
+ * do_md_stop. dm raid only uses md_stop to stop. So dm raid
+ * doesn't need to check MD_DELETED when getting reconfig lock
+ */
+ if (test_bit(MD_DELETED, &mddev->flags))
+ del_gendisk(mddev->gendisk);
}
EXPORT_SYMBOL_GPL(mddev_unlock);
@@ -5774,19 +5781,30 @@ md_attr_store(struct kobject *kobj, struct attribute *attr,
struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
struct mddev *mddev = container_of(kobj, struct mddev, kobj);
ssize_t rv;
+ struct kernfs_node *kn = NULL;
if (!entry->store)
return -EIO;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
+
+ if (entry->store == array_state_store && cmd_match(page, "clear"))
+ kn = sysfs_break_active_protection(kobj, attr);
+
spin_lock(&all_mddevs_lock);
if (!mddev_get(mddev)) {
spin_unlock(&all_mddevs_lock);
+ if (kn)
+ sysfs_unbreak_active_protection(kn);
return -EBUSY;
}
spin_unlock(&all_mddevs_lock);
rv = entry->store(mddev, page, length);
mddev_put(mddev);
+
+ if (kn)
+ sysfs_unbreak_active_protection(kn);
+
return rv;
}
@@ -5794,12 +5812,6 @@ static void md_kobj_release(struct kobject *ko)
{
struct mddev *mddev = container_of(ko, struct mddev, kobj);
- if (mddev->sysfs_state)
- sysfs_put(mddev->sysfs_state);
- if (mddev->sysfs_level)
- sysfs_put(mddev->sysfs_level);
-
- del_gendisk(mddev->gendisk);
put_disk(mddev->gendisk);
}
@@ -6413,15 +6425,10 @@ static void md_clean(struct mddev *mddev)
mddev->persistent = 0;
mddev->level = LEVEL_NONE;
mddev->clevel[0] = 0;
- /*
- * Don't clear MD_CLOSING, or mddev can be opened again.
- * 'hold_active != 0' means mddev is still in the creation
- * process and will be used later.
- */
- if (mddev->hold_active)
- mddev->flags = 0;
- else
- mddev->flags &= BIT_ULL_MASK(MD_CLOSING);
+ /* if UNTIL_STOP is set, it's cleared here */
+ mddev->hold_active = 0;
+ /* Don't clear MD_CLOSING, or mddev can be opened again. */
+ mddev->flags &= BIT_ULL_MASK(MD_CLOSING);
mddev->sb_flags = 0;
mddev->ro = MD_RDWR;
mddev->metadata_type[0] = 0;
@@ -6516,8 +6523,6 @@ static void __md_stop(struct mddev *mddev)
if (mddev->private)
pers->free(mddev, mddev->private);
mddev->private = NULL;
- if (pers->sync_request && mddev->to_remove == NULL)
- mddev->to_remove = &md_redundancy_group;
put_pers(pers);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
@@ -6646,10 +6651,8 @@ static int do_md_stop(struct mddev *mddev, int mode)
mddev->bitmap_info.offset = 0;
export_array(mddev);
-
md_clean(mddev);
- if (mddev->hold_active == UNTIL_STOP)
- mddev->hold_active = 0;
+ set_bit(MD_DELETED, &mddev->flags);
}
md_new_event();
sysfs_notify_dirent_safe(mddev->sysfs_state);
@@ -9456,17 +9459,11 @@ static bool md_spares_need_change(struct mddev *mddev)
return false;
}
-static int remove_and_add_spares(struct mddev *mddev,
- struct md_rdev *this)
+static int remove_spares(struct mddev *mddev, struct md_rdev *this)
{
struct md_rdev *rdev;
- int spares = 0;
int removed = 0;
- if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
- /* Mustn't remove devices when resync thread is running */
- return 0;
-
rdev_for_each(rdev, mddev) {
if ((this == NULL || rdev == this) && rdev_removeable(rdev) &&
!mddev->pers->hot_remove_disk(mddev, rdev)) {
@@ -9480,6 +9477,21 @@ static int remove_and_add_spares(struct mddev *mddev,
if (removed && mddev->kobj.sd)
sysfs_notify_dirent_safe(mddev->sysfs_degraded);
+ return removed;
+}
+
+static int remove_and_add_spares(struct mddev *mddev,
+ struct md_rdev *this)
+{
+ struct md_rdev *rdev;
+ int spares = 0;
+ int removed = 0;
+
+ if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
+ /* Mustn't remove devices when resync thread is running */
+ return 0;
+
+ removed = remove_spares(mddev, this);
if (this && removed)
goto no_add;
@@ -9522,6 +9534,7 @@ static bool md_choose_sync_action(struct mddev *mddev, int *spares)
/* Check if resync is in progress. */
if (mddev->recovery_cp < MaxSector) {
+ remove_spares(mddev, NULL);
set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
return true;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index d45a9e6ead80..67b365621507 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -700,11 +700,26 @@ static inline bool reshape_interrupted(struct mddev *mddev)
static inline int __must_check mddev_lock(struct mddev *mddev)
{
- return mutex_lock_interruptible(&mddev->reconfig_mutex);
+ int ret;
+
+ ret = mutex_lock_interruptible(&mddev->reconfig_mutex);
+
+ /* MD_DELETED is set in do_md_stop with reconfig_mutex.
+ * So check it here.
+ */
+ if (!ret && test_bit(MD_DELETED, &mddev->flags)) {
+ ret = -ENODEV;
+ mutex_unlock(&mddev->reconfig_mutex);
+ }
+
+ return ret;
}
/* Sometimes we need to take the lock in a situation where
* failure due to interrupts is not acceptable.
+ * It doesn't need to check MD_DELETED here, the owner which
+ * holds the lock here can't be stopped. And all paths can't
+ * call this function after do_md_stop.
*/
static inline void mddev_lock_nointr(struct mddev *mddev)
{
@@ -713,7 +728,14 @@ static inline void mddev_lock_nointr(struct mddev *mddev)
static inline int mddev_trylock(struct mddev *mddev)
{
- return mutex_trylock(&mddev->reconfig_mutex);
+ int ret;
+
+ ret = mutex_trylock(&mddev->reconfig_mutex);
+ if (!ret && test_bit(MD_DELETED, &mddev->flags)) {
+ ret = -ENODEV;
+ mutex_unlock(&mddev->reconfig_mutex);
+ }
+ return ret;
}
extern void mddev_unlock(struct mddev *mddev);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index d8f639f4ae12..cbe2a9054cb9 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -384,6 +384,7 @@ static int raid0_set_limits(struct mddev *mddev)
lim.max_write_zeroes_sectors = mddev->chunk_sectors;
lim.io_min = mddev->chunk_sectors << 9;
lim.io_opt = lim.io_min * mddev->raid_disks;
+ lim.chunk_sectors = mddev->chunk_sectors;
lim.features |= BLK_FEAT_ATOMIC_WRITES;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
if (err)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index c9bd2005bfd0..95dc354a86a0 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2446,15 +2446,12 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
* that are active
*/
for (i = 0; i < conf->copies; i++) {
- int d;
-
tbio = r10_bio->devs[i].repl_bio;
if (!tbio || !tbio->bi_end_io)
continue;
if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
&& r10_bio->devs[i].bio != fbio)
bio_copy_data(tbio, fbio);
- d = r10_bio->devs[i].devnum;
atomic_inc(&r10_bio->remaining);
submit_bio_noacct(tbio);
}
@@ -4012,6 +4009,7 @@ static int raid10_set_queue_limits(struct mddev *mddev)
md_init_stacking_limits(&lim);
lim.max_write_zeroes_sectors = 0;
lim.io_min = mddev->chunk_sectors << 9;
+ lim.chunk_sectors = mddev->chunk_sectors;
lim.io_opt = lim.io_min * raid10_nr_stripes(conf);
lim.features |= BLK_FEAT_ATOMIC_WRITES;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index ca5b0e8ba707..7ec61ee7b218 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -9040,7 +9040,7 @@ static int __init raid5_init(void)
int ret;
raid5_wq = alloc_workqueue("raid5wq",
- WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE|WQ_SYSFS, 0);
+ WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_SYSFS, 0);
if (!raid5_wq)
return -ENOMEM;