summaryrefslogtreecommitdiff
path: root/drivers/md/raid1.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/raid1.c')
-rw-r--r--drivers/md/raid1.c3197
1 files changed, 1814 insertions, 1383 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index ec734588a1c6..57d50465eed1 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* raid1.c : Multiple Devices driver for Linux
*
@@ -20,15 +21,6 @@
*
* Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
* - persistent bitmap code
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * You should have received a copy of the GNU General Public License
- * (for example /usr/src/linux/COPYING); if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/slab.h>
@@ -37,76 +29,145 @@
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/ratelimit.h>
+#include <linux/interval_tree_generic.h>
+
+#include <trace/events/block.h>
+
#include "md.h"
#include "raid1.h"
-#include "bitmap.h"
+#include "md-bitmap.h"
+#include "md-cluster.h"
-/*
- * Number of guaranteed r1bios in case of extreme VM load:
- */
-#define NR_RAID1_BIOS 256
+#define UNSUPPORTED_MDDEV_FLAGS \
+ ((1L << MD_HAS_JOURNAL) | \
+ (1L << MD_JOURNAL_CLEAN) | \
+ (1L << MD_HAS_PPL) | \
+ (1L << MD_HAS_MULTIPLE_PPLS))
-/* when we get a read error on a read-only array, we redirect to another
- * device without failing the first device, or trying to over-write to
- * correct the read error. To keep track of bad blocks on a per-bio
- * level, we store IO_BLOCKED in the appropriate 'bios' pointer
- */
-#define IO_BLOCKED ((struct bio *)1)
-/* When we successfully write to a known bad-block, we need to remove the
- * bad-block marking which must be done from process context. So we record
- * the success by setting devs[n].bio to IO_MADE_GOOD
- */
-#define IO_MADE_GOOD ((struct bio *)2)
+static void allow_barrier(struct r1conf *conf, sector_t sector_nr);
+static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
+static void raid1_free(struct mddev *mddev, void *priv);
-#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
+#define RAID_1_10_NAME "raid1"
+#include "raid1-10.c"
-/* When there are this many requests queue to be written by
- * the raid1 thread, we become 'congested' to provide back-pressure
- * for writeback.
- */
-static int max_queued_requests = 1024;
+#define START(node) ((node)->start)
+#define LAST(node) ((node)->last)
+INTERVAL_TREE_DEFINE(struct serial_info, node, sector_t, _subtree_last,
+ START, LAST, static inline, raid1_rb);
-static void allow_barrier(struct r1conf *conf);
-static void lower_barrier(struct r1conf *conf);
+static int check_and_add_serial(struct md_rdev *rdev, struct r1bio *r1_bio,
+ struct serial_info *si, int idx)
+{
+ unsigned long flags;
+ int ret = 0;
+ sector_t lo = r1_bio->sector;
+ sector_t hi = lo + r1_bio->sectors;
+ struct serial_in_rdev *serial = &rdev->serial[idx];
+
+ spin_lock_irqsave(&serial->serial_lock, flags);
+ /* collision happened */
+ if (raid1_rb_iter_first(&serial->serial_rb, lo, hi))
+ ret = -EBUSY;
+ else {
+ si->start = lo;
+ si->last = hi;
+ raid1_rb_insert(si, &serial->serial_rb);
+ }
+ spin_unlock_irqrestore(&serial->serial_lock, flags);
-static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
+ return ret;
+}
+
+static void wait_for_serialization(struct md_rdev *rdev, struct r1bio *r1_bio)
{
- struct pool_info *pi = data;
- int size = offsetof(struct r1bio, bios[pi->raid_disks]);
+ struct mddev *mddev = rdev->mddev;
+ struct serial_info *si;
+ int idx = sector_to_idx(r1_bio->sector);
+ struct serial_in_rdev *serial = &rdev->serial[idx];
- /* allocate a r1bio with room for raid_disks entries in the bios array */
- return kzalloc(size, gfp_flags);
+ if (WARN_ON(!mddev->serial_info_pool))
+ return;
+ si = mempool_alloc(mddev->serial_info_pool, GFP_NOIO);
+ wait_event(serial->serial_io_wait,
+ check_and_add_serial(rdev, r1_bio, si, idx) == 0);
+}
+
+static void remove_serial(struct md_rdev *rdev, sector_t lo, sector_t hi)
+{
+ struct serial_info *si;
+ unsigned long flags;
+ int found = 0;
+ struct mddev *mddev = rdev->mddev;
+ int idx = sector_to_idx(lo);
+ struct serial_in_rdev *serial = &rdev->serial[idx];
+
+ spin_lock_irqsave(&serial->serial_lock, flags);
+ for (si = raid1_rb_iter_first(&serial->serial_rb, lo, hi);
+ si; si = raid1_rb_iter_next(si, lo, hi)) {
+ if (si->start == lo && si->last == hi) {
+ raid1_rb_remove(si, &serial->serial_rb);
+ mempool_free(si, mddev->serial_info_pool);
+ found = 1;
+ break;
+ }
+ }
+ if (!found)
+ WARN(1, "The write IO is not recorded for serialization\n");
+ spin_unlock_irqrestore(&serial->serial_lock, flags);
+ wake_up(&serial->serial_io_wait);
+}
+
+/*
+ * for resync bio, r1bio pointer can be retrieved from the per-bio
+ * 'struct resync_pages'.
+ */
+static inline struct r1bio *get_resync_r1bio(struct bio *bio)
+{
+ return get_resync_pages(bio)->raid_bio;
}
-static void r1bio_pool_free(void *r1_bio, void *data)
+static void *r1bio_pool_alloc(gfp_t gfp_flags, struct r1conf *conf)
{
- kfree(r1_bio);
+ int size = offsetof(struct r1bio, bios[conf->raid_disks * 2]);
+
+ /* allocate a r1bio with room for raid_disks entries in the bios array */
+ return kzalloc(size, gfp_flags);
}
-#define RESYNC_BLOCK_SIZE (64*1024)
-//#define RESYNC_BLOCK_SIZE PAGE_SIZE
+#define RESYNC_DEPTH 32
#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
-#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
-#define RESYNC_WINDOW (2048*1024)
+#define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
+#define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
+#define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW)
+#define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
{
- struct pool_info *pi = data;
+ struct r1conf *conf = data;
struct r1bio *r1_bio;
struct bio *bio;
- int i, j;
+ int need_pages;
+ int j;
+ struct resync_pages *rps;
- r1_bio = r1bio_pool_alloc(gfp_flags, pi);
+ r1_bio = r1bio_pool_alloc(gfp_flags, conf);
if (!r1_bio)
return NULL;
+ rps = kmalloc_array(conf->raid_disks * 2, sizeof(struct resync_pages),
+ gfp_flags);
+ if (!rps)
+ goto out_free_r1bio;
+
/*
* Allocate bios : 1 for reading, n-1 for writing
*/
- for (j = pi->raid_disks ; j-- ; ) {
- bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
+ for (j = conf->raid_disks * 2; j-- ; ) {
+ bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
if (!bio)
goto out_free_bio;
+ bio_init_inline(bio, NULL, RESYNC_PAGES, 0);
r1_bio->bios[j] = bio;
}
/*
@@ -115,53 +176,65 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
* If this is a user-requested check/repair, allocate
* RESYNC_PAGES for each bio.
*/
- if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
- j = pi->raid_disks;
+ if (test_bit(MD_RECOVERY_REQUESTED, &conf->mddev->recovery))
+ need_pages = conf->raid_disks * 2;
else
- j = 1;
- while(j--) {
+ need_pages = 1;
+ for (j = 0; j < conf->raid_disks * 2; j++) {
+ struct resync_pages *rp = &rps[j];
+
bio = r1_bio->bios[j];
- bio->bi_vcnt = RESYNC_PAGES;
- if (bio_alloc_pages(bio, gfp_flags))
- goto out_free_bio;
- }
- /* If not user-requests, copy the page pointers to all bios */
- if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
- for (i=0; i<RESYNC_PAGES ; i++)
- for (j=1; j<pi->raid_disks; j++)
- r1_bio->bios[j]->bi_io_vec[i].bv_page =
- r1_bio->bios[0]->bi_io_vec[i].bv_page;
+ if (j < need_pages) {
+ if (resync_alloc_pages(rp, gfp_flags))
+ goto out_free_pages;
+ } else {
+ memcpy(rp, &rps[0], sizeof(*rp));
+ resync_get_all_pages(rp);
+ }
+
+ rp->raid_bio = r1_bio;
+ bio->bi_private = rp;
}
r1_bio->master_bio = NULL;
return r1_bio;
+out_free_pages:
+ while (--j >= 0)
+ resync_free_pages(&rps[j]);
+
out_free_bio:
- while (++j < pi->raid_disks)
- bio_put(r1_bio->bios[j]);
- r1bio_pool_free(r1_bio, data);
+ while (++j < conf->raid_disks * 2) {
+ bio_uninit(r1_bio->bios[j]);
+ kfree(r1_bio->bios[j]);
+ }
+ kfree(rps);
+
+out_free_r1bio:
+ rbio_pool_free(r1_bio, data);
return NULL;
}
static void r1buf_pool_free(void *__r1_bio, void *data)
{
- struct pool_info *pi = data;
- int i,j;
+ struct r1conf *conf = data;
+ int i;
struct r1bio *r1bio = __r1_bio;
+ struct resync_pages *rp = NULL;
- for (i = 0; i < RESYNC_PAGES; i++)
- for (j = pi->raid_disks; j-- ;) {
- if (j == 0 ||
- r1bio->bios[j]->bi_io_vec[i].bv_page !=
- r1bio->bios[0]->bi_io_vec[i].bv_page)
- safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page);
- }
- for (i=0 ; i < pi->raid_disks; i++)
- bio_put(r1bio->bios[i]);
+ for (i = conf->raid_disks * 2; i--; ) {
+ rp = get_resync_pages(r1bio->bios[i]);
+ resync_free_pages(rp);
+ bio_uninit(r1bio->bios[i]);
+ kfree(r1bio->bios[i]);
+ }
- r1bio_pool_free(r1bio, data);
+ /* resync pages array stored in the 1st bio's .bi_private */
+ kfree(rp);
+
+ rbio_pool_free(r1bio, data);
}
static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
@@ -187,6 +260,7 @@ static void free_r1bio(struct r1bio *r1_bio)
static void put_buf(struct r1bio *r1_bio)
{
struct r1conf *conf = r1_bio->mddev->private;
+ sector_t sect = r1_bio->sector;
int i;
for (i = 0; i < conf->raid_disks * 2; i++) {
@@ -195,9 +269,9 @@ static void put_buf(struct r1bio *r1_bio)
rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
}
- mempool_free(r1_bio, conf->r1buf_pool);
+ mempool_free(r1_bio, &conf->r1buf_pool);
- lower_barrier(conf);
+ lower_barrier(conf, sect);
}
static void reschedule_retry(struct r1bio *r1_bio)
@@ -205,10 +279,12 @@ static void reschedule_retry(struct r1bio *r1_bio)
unsigned long flags;
struct mddev *mddev = r1_bio->mddev;
struct r1conf *conf = mddev->private;
+ int idx;
+ idx = sector_to_idx(r1_bio->sector);
spin_lock_irqsave(&conf->device_lock, flags);
list_add(&r1_bio->retry_list, &conf->retry_list);
- conf->nr_queued ++;
+ atomic_inc(&conf->nr_queued[idx]);
spin_unlock_irqrestore(&conf->device_lock, flags);
wake_up(&conf->wait_barrier);
@@ -223,45 +299,35 @@ static void reschedule_retry(struct r1bio *r1_bio)
static void call_bio_endio(struct r1bio *r1_bio)
{
struct bio *bio = r1_bio->master_bio;
- int done;
- struct r1conf *conf = r1_bio->mddev->private;
-
- if (bio->bi_phys_segments) {
- unsigned long flags;
- spin_lock_irqsave(&conf->device_lock, flags);
- bio->bi_phys_segments--;
- done = (bio->bi_phys_segments == 0);
- spin_unlock_irqrestore(&conf->device_lock, flags);
- } else
- done = 1;
if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
- clear_bit(BIO_UPTODATE, &bio->bi_flags);
- if (done) {
- bio_endio(bio, 0);
- /*
- * Wake up any possible resync thread that waits for the device
- * to go idle.
- */
- allow_barrier(conf);
- }
+ bio->bi_status = BLK_STS_IOERR;
+
+ bio_endio(bio);
}
static void raid_end_bio_io(struct r1bio *r1_bio)
{
struct bio *bio = r1_bio->master_bio;
+ struct r1conf *conf = r1_bio->mddev->private;
+ sector_t sector = r1_bio->sector;
/* if nobody has done the final endio yet, do it now */
if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
(bio_data_dir(bio) == WRITE) ? "write" : "read",
- (unsigned long long) bio->bi_sector,
- (unsigned long long) bio->bi_sector +
- bio_sectors(bio) - 1);
+ (unsigned long long) bio->bi_iter.bi_sector,
+ (unsigned long long) bio_end_sector(bio) - 1);
call_bio_endio(r1_bio);
}
+
free_r1bio(r1_bio);
+ /*
+ * Wake up any possible resync thread that waits for the device
+ * to go idle. All I/Os, even write-behind writes, are done.
+ */
+ allow_barrier(conf, sector);
}
/*
@@ -294,22 +360,28 @@ static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
return mirror;
}
-static void raid1_end_read_request(struct bio *bio, int error)
+static void raid1_end_read_request(struct bio *bio)
{
- int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+ int uptodate = !bio->bi_status;
struct r1bio *r1_bio = bio->bi_private;
- int mirror;
struct r1conf *conf = r1_bio->mddev->private;
+ struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev;
- mirror = r1_bio->read_disk;
/*
* this branch is our 'one mirror IO has finished' event handler:
*/
- update_head_pos(mirror, r1_bio);
+ update_head_pos(r1_bio->read_disk, r1_bio);
- if (uptodate)
+ if (uptodate) {
set_bit(R1BIO_Uptodate, &r1_bio->state);
- else {
+ } else if (test_bit(FailFast, &rdev->flags) &&
+ test_bit(R1BIO_FailFast, &r1_bio->state)) {
+ /* This was a fail-fast read so we definitely
+ * want to retry */
+ ;
+ } else if (!raid1_should_handle_error(bio)) {
+ uptodate = 1;
+ } else {
/* If all other devices have failed, we want to return
* the error upwards rather than fail the last device.
* Here we redefine "uptodate" to mean "Don't want to retry"
@@ -318,26 +390,22 @@ static void raid1_end_read_request(struct bio *bio, int error)
spin_lock_irqsave(&conf->device_lock, flags);
if (r1_bio->mddev->degraded == conf->raid_disks ||
(r1_bio->mddev->degraded == conf->raid_disks-1 &&
- !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
+ test_bit(In_sync, &rdev->flags)))
uptodate = 1;
spin_unlock_irqrestore(&conf->device_lock, flags);
}
if (uptodate) {
raid_end_bio_io(r1_bio);
- rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
+ rdev_dec_pending(rdev, conf->mddev);
} else {
/*
* oops, read error:
*/
- char b[BDEVNAME_SIZE];
- printk_ratelimited(
- KERN_ERR "md/raid1:%s: %s: "
- "rescheduling sector %llu\n",
- mdname(conf->mddev),
- bdevname(conf->mirrors[mirror].rdev->bdev,
- b),
- (unsigned long long)r1_bio->sector);
+ pr_err_ratelimited("md/raid1:%s: %pg: rescheduling sector %llu\n",
+ mdname(conf->mddev),
+ rdev->bdev,
+ (unsigned long long)r1_bio->sector);
set_bit(R1BIO_ReadError, &r1_bio->state);
reschedule_retry(r1_bio);
/* don't drop the reference on read_disk yet */
@@ -346,21 +414,18 @@ static void raid1_end_read_request(struct bio *bio, int error)
static void close_write(struct r1bio *r1_bio)
{
+ struct mddev *mddev = r1_bio->mddev;
+
/* it really is the end of this request */
if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
- /* free extra copy of the data pages */
- int i = r1_bio->behind_page_count;
- while (i--)
- safe_put_page(r1_bio->behind_bvecs[i].bv_page);
- kfree(r1_bio->behind_bvecs);
- r1_bio->behind_bvecs = NULL;
+ bio_free_pages(r1_bio->behind_master_bio);
+ bio_put(r1_bio->behind_master_bio);
+ r1_bio->behind_master_bio = NULL;
}
- /* clear the bitmap if all writes complete successfully */
- bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
- r1_bio->sectors,
- !test_bit(R1BIO_Degraded, &r1_bio->state),
- test_bit(R1BIO_BehindIO, &r1_bio->state));
- md_write_end(r1_bio->mddev);
+
+ if (test_bit(R1BIO_BehindIO, &r1_bio->state))
+ mddev->bitmap_ops->end_behind_write(mddev);
+ md_write_end(mddev);
}
static void r1_bio_write_done(struct r1bio *r1_bio)
@@ -379,28 +444,46 @@ static void r1_bio_write_done(struct r1bio *r1_bio)
}
}
-static void raid1_end_write_request(struct bio *bio, int error)
+static void raid1_end_write_request(struct bio *bio)
{
- int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct r1bio *r1_bio = bio->bi_private;
- int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
+ int behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
struct r1conf *conf = r1_bio->mddev->private;
struct bio *to_put = NULL;
-
- mirror = find_bio_disk(r1_bio, bio);
+ int mirror = find_bio_disk(r1_bio, bio);
+ struct md_rdev *rdev = conf->mirrors[mirror].rdev;
+ sector_t lo = r1_bio->sector;
+ sector_t hi = r1_bio->sector + r1_bio->sectors;
+ bool ignore_error = !raid1_should_handle_error(bio) ||
+ (bio->bi_status && bio_op(bio) == REQ_OP_DISCARD);
/*
* 'one mirror IO has finished' event handler:
*/
- if (!uptodate) {
- set_bit(WriteErrorSeen,
- &conf->mirrors[mirror].rdev->flags);
- if (!test_and_set_bit(WantReplacement,
- &conf->mirrors[mirror].rdev->flags))
+ if (bio->bi_status && !ignore_error) {
+ set_bit(WriteErrorSeen, &rdev->flags);
+ if (!test_and_set_bit(WantReplacement, &rdev->flags))
set_bit(MD_RECOVERY_NEEDED, &
conf->mddev->recovery);
- set_bit(R1BIO_WriteError, &r1_bio->state);
+ if (test_bit(FailFast, &rdev->flags) &&
+ (bio->bi_opf & MD_FAILFAST) &&
+ /* We never try FailFast to WriteMostly devices */
+ !test_bit(WriteMostly, &rdev->flags)) {
+ md_error(r1_bio->mddev, rdev);
+ }
+
+ /*
+ * When the device is faulty, it is not necessary to
+ * handle write error.
+ */
+ if (!test_bit(Faulty, &rdev->flags))
+ set_bit(R1BIO_WriteError, &r1_bio->state);
+ else {
+ /* Finished with this branch */
+ r1_bio->bios[mirror] = NULL;
+ to_put = bio;
+ }
} else {
/*
* Set R1BIO_Uptodate in our master bio, so that we
@@ -412,9 +495,6 @@ static void raid1_end_write_request(struct bio *bio, int error)
* to user-side. So if something waits for IO, then it
* will wait for the 'master' bio.
*/
- sector_t first_bad;
- int bad_sectors;
-
r1_bio->bios[mirror] = NULL;
to_put = bio;
/*
@@ -425,21 +505,22 @@ static void raid1_end_write_request(struct bio *bio, int error)
* before rdev->recovery_offset, but for simplicity we don't
* check this here.
*/
- if (test_bit(In_sync, &conf->mirrors[mirror].rdev->flags) &&
- !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))
+ if (test_bit(In_sync, &rdev->flags) &&
+ !test_bit(Faulty, &rdev->flags))
set_bit(R1BIO_Uptodate, &r1_bio->state);
/* Maybe we can clear some bad blocks. */
- if (is_badblock(conf->mirrors[mirror].rdev,
- r1_bio->sector, r1_bio->sectors,
- &first_bad, &bad_sectors)) {
+ if (rdev_has_badblock(rdev, r1_bio->sector, r1_bio->sectors) &&
+ !ignore_error) {
r1_bio->bios[mirror] = IO_MADE_GOOD;
set_bit(R1BIO_MadeGood, &r1_bio->state);
}
}
if (behind) {
- if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags))
+ if (test_bit(CollisionCheck, &rdev->flags))
+ remove_serial(rdev, lo, hi);
+ if (test_bit(WriteMostly, &rdev->flags))
atomic_dec(&r1_bio->behind_remaining);
/*
@@ -456,16 +537,15 @@ static void raid1_end_write_request(struct bio *bio, int error)
struct bio *mbio = r1_bio->master_bio;
pr_debug("raid1: behind end write sectors"
" %llu-%llu\n",
- (unsigned long long) mbio->bi_sector,
- (unsigned long long) mbio->bi_sector +
- bio_sectors(mbio) - 1);
+ (unsigned long long) mbio->bi_iter.bi_sector,
+ (unsigned long long) bio_end_sector(mbio) - 1);
call_bio_endio(r1_bio);
}
}
- }
+ } else if (rdev->mddev->serialize_policy)
+ remove_serial(rdev, lo, hi);
if (r1_bio->bios[mirror] == NULL)
- rdev_dec_pending(conf->mirrors[mirror].rdev,
- conf->mddev);
+ rdev_dec_pending(rdev, conf->mddev);
/*
* Let's see if all mirrored write operations have finished
@@ -477,288 +557,359 @@ static void raid1_end_write_request(struct bio *bio, int error)
bio_put(to_put);
}
-
-/*
- * This routine returns the disk from which the requested read should
- * be done. There is a per-array 'next expected sequential IO' sector
- * number - if this matches on the next IO then we use the last disk.
- * There is also a per-disk 'last know head position' sector that is
- * maintained from IRQ contexts, both the normal and the resync IO
- * completion handlers update this position correctly. If there is no
- * perfect sequential match then we pick the disk whose head is closest.
- *
- * If there are 2 mirrors in the same 2 devices, performance degrades
- * because position is mirror, not device based.
- *
- * The rdev for the device selected will have nr_pending incremented.
- */
-static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
+static sector_t align_to_barrier_unit_end(sector_t start_sector,
+ sector_t sectors)
{
- const sector_t this_sector = r1_bio->sector;
- int sectors;
- int best_good_sectors;
- int best_disk, best_dist_disk, best_pending_disk;
- int has_nonrot_disk;
- int disk;
- sector_t best_dist;
- unsigned int min_pending;
- struct md_rdev *rdev;
- int choose_first;
- int choose_next_idle;
+ sector_t len;
- rcu_read_lock();
+ WARN_ON(sectors == 0);
/*
- * Check if we can balance. We can balance on the whole
- * device if no resync is going on, or below the resync window.
- * We take the first readable disk when above the resync window.
+ * len is the number of sectors from start_sector to end of the
+ * barrier unit which start_sector belongs to.
*/
- retry:
- sectors = r1_bio->sectors;
- best_disk = -1;
- best_dist_disk = -1;
- best_dist = MaxSector;
- best_pending_disk = -1;
- min_pending = UINT_MAX;
- best_good_sectors = 0;
- has_nonrot_disk = 0;
- choose_next_idle = 0;
-
- if (conf->mddev->recovery_cp < MaxSector &&
- (this_sector + sectors >= conf->next_resync))
- choose_first = 1;
- else
- choose_first = 0;
+ len = round_up(start_sector + 1, BARRIER_UNIT_SECTOR_SIZE) -
+ start_sector;
+
+ if (len > sectors)
+ len = sectors;
+
+ return len;
+}
+
+static void update_read_sectors(struct r1conf *conf, int disk,
+ sector_t this_sector, int len)
+{
+ struct raid1_info *info = &conf->mirrors[disk];
+
+ atomic_inc(&info->rdev->nr_pending);
+ if (info->next_seq_sect != this_sector)
+ info->seq_start = this_sector;
+ info->next_seq_sect = this_sector + len;
+}
+
+static int choose_first_rdev(struct r1conf *conf, struct r1bio *r1_bio,
+ int *max_sectors)
+{
+ sector_t this_sector = r1_bio->sector;
+ int len = r1_bio->sectors;
+ int disk;
for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
- sector_t dist;
- sector_t first_bad;
- int bad_sectors;
- unsigned int pending;
- bool nonrot;
+ struct md_rdev *rdev;
+ int read_len;
- rdev = rcu_dereference(conf->mirrors[disk].rdev);
- if (r1_bio->bios[disk] == IO_BLOCKED
- || rdev == NULL
- || test_bit(Unmerged, &rdev->flags)
- || test_bit(Faulty, &rdev->flags))
- continue;
- if (!test_bit(In_sync, &rdev->flags) &&
- rdev->recovery_offset < this_sector + sectors)
+ if (r1_bio->bios[disk] == IO_BLOCKED)
continue;
- if (test_bit(WriteMostly, &rdev->flags)) {
- /* Don't balance among write-mostly, just
- * use the first as a last resort */
- if (best_disk < 0) {
- if (is_badblock(rdev, this_sector, sectors,
- &first_bad, &bad_sectors)) {
- if (first_bad < this_sector)
- /* Cannot use this */
- continue;
- best_good_sectors = first_bad - this_sector;
- } else
- best_good_sectors = sectors;
- best_disk = disk;
- }
+
+ rdev = conf->mirrors[disk].rdev;
+ if (!rdev || test_bit(Faulty, &rdev->flags))
continue;
+
+ /* choose the first disk even if it has some bad blocks. */
+ read_len = raid1_check_read_range(rdev, this_sector, &len);
+ if (read_len > 0) {
+ update_read_sectors(conf, disk, this_sector, read_len);
+ *max_sectors = read_len;
+ return disk;
}
- /* This is a reasonable device to use. It might
- * even be best.
- */
- if (is_badblock(rdev, this_sector, sectors,
- &first_bad, &bad_sectors)) {
- if (best_dist < MaxSector)
- /* already have a better device */
- continue;
- if (first_bad <= this_sector) {
- /* cannot read here. If this is the 'primary'
- * device, then we must not read beyond
- * bad_sectors from another device..
- */
- bad_sectors -= (this_sector - first_bad);
- if (choose_first && sectors > bad_sectors)
- sectors = bad_sectors;
- if (best_good_sectors > sectors)
- best_good_sectors = sectors;
-
- } else {
- sector_t good_sectors = first_bad - this_sector;
- if (good_sectors > best_good_sectors) {
- best_good_sectors = good_sectors;
- best_disk = disk;
- }
- if (choose_first)
- break;
- }
+ }
+
+ return -1;
+}
+
+static bool rdev_in_recovery(struct md_rdev *rdev, struct r1bio *r1_bio)
+{
+ return !test_bit(In_sync, &rdev->flags) &&
+ rdev->recovery_offset < r1_bio->sector + r1_bio->sectors;
+}
+
+static int choose_bb_rdev(struct r1conf *conf, struct r1bio *r1_bio,
+ int *max_sectors)
+{
+ sector_t this_sector = r1_bio->sector;
+ int best_disk = -1;
+ int best_len = 0;
+ int disk;
+
+ for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
+ struct md_rdev *rdev;
+ int len;
+ int read_len;
+
+ if (r1_bio->bios[disk] == IO_BLOCKED)
continue;
- } else
- best_good_sectors = sectors;
- nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
- has_nonrot_disk |= nonrot;
- pending = atomic_read(&rdev->nr_pending);
- dist = abs(this_sector - conf->mirrors[disk].head_position);
- if (choose_first) {
- best_disk = disk;
- break;
- }
- /* Don't change to another disk for sequential reads */
- if (conf->mirrors[disk].next_seq_sect == this_sector
- || dist == 0) {
- int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
- struct raid1_info *mirror = &conf->mirrors[disk];
+ rdev = conf->mirrors[disk].rdev;
+ if (!rdev || test_bit(Faulty, &rdev->flags) ||
+ rdev_in_recovery(rdev, r1_bio) ||
+ test_bit(WriteMostly, &rdev->flags))
+ continue;
+ /* keep track of the disk with the most readable sectors. */
+ len = r1_bio->sectors;
+ read_len = raid1_check_read_range(rdev, this_sector, &len);
+ if (read_len > best_len) {
best_disk = disk;
- /*
- * If buffered sequential IO size exceeds optimal
- * iosize, check if there is idle disk. If yes, choose
- * the idle disk. read_balance could already choose an
- * idle disk before noticing it's a sequential IO in
- * this disk. This doesn't matter because this disk
- * will idle, next time it will be utilized after the
- * first disk has IO size exceeds optimal iosize. In
- * this way, iosize of the first disk will be optimal
- * iosize at least. iosize of the second disk might be
- * small, but not a big deal since when the second disk
- * starts IO, the first disk is likely still busy.
- */
- if (nonrot && opt_iosize > 0 &&
- mirror->seq_start != MaxSector &&
- mirror->next_seq_sect > opt_iosize &&
- mirror->next_seq_sect - opt_iosize >=
- mirror->seq_start) {
- choose_next_idle = 1;
- continue;
- }
- break;
- }
- /* If device is idle, use it */
- if (pending == 0) {
- best_disk = disk;
- break;
+ best_len = read_len;
}
+ }
+
+ if (best_disk != -1) {
+ *max_sectors = best_len;
+ update_read_sectors(conf, best_disk, this_sector, best_len);
+ }
+
+ return best_disk;
+}
+
+static int choose_slow_rdev(struct r1conf *conf, struct r1bio *r1_bio,
+ int *max_sectors)
+{
+ sector_t this_sector = r1_bio->sector;
+ int bb_disk = -1;
+ int bb_read_len = 0;
+ int disk;
+
+ for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
+ struct md_rdev *rdev;
+ int len;
+ int read_len;
- if (choose_next_idle)
+ if (r1_bio->bios[disk] == IO_BLOCKED)
continue;
- if (min_pending > pending) {
- min_pending = pending;
- best_pending_disk = disk;
+ rdev = conf->mirrors[disk].rdev;
+ if (!rdev || test_bit(Faulty, &rdev->flags) ||
+ !test_bit(WriteMostly, &rdev->flags) ||
+ rdev_in_recovery(rdev, r1_bio))
+ continue;
+
+ /* there are no bad blocks, we can use this disk */
+ len = r1_bio->sectors;
+ read_len = raid1_check_read_range(rdev, this_sector, &len);
+ if (read_len == r1_bio->sectors) {
+ *max_sectors = read_len;
+ update_read_sectors(conf, disk, this_sector, read_len);
+ return disk;
}
- if (dist < best_dist) {
- best_dist = dist;
- best_dist_disk = disk;
+ /*
+ * there are partial bad blocks, choose the rdev with largest
+ * read length.
+ */
+ if (read_len > bb_read_len) {
+ bb_disk = disk;
+ bb_read_len = read_len;
}
}
- /*
- * If all disks are rotational, choose the closest disk. If any disk is
- * non-rotational, choose the disk with less pending request even the
- * disk is rotational, which might/might not be optimal for raids with
- * mixed ratation/non-rotational disks depending on workload.
- */
- if (best_disk == -1) {
- if (has_nonrot_disk)
- best_disk = best_pending_disk;
- else
- best_disk = best_dist_disk;
+ if (bb_disk != -1) {
+ *max_sectors = bb_read_len;
+ update_read_sectors(conf, bb_disk, this_sector, bb_read_len);
}
- if (best_disk >= 0) {
- rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
- if (!rdev)
- goto retry;
- atomic_inc(&rdev->nr_pending);
- if (test_bit(Faulty, &rdev->flags)) {
- /* cannot risk returning a device that failed
- * before we inc'ed nr_pending
- */
- rdev_dec_pending(rdev, conf->mddev);
- goto retry;
- }
- sectors = best_good_sectors;
+ return bb_disk;
+}
- if (conf->mirrors[best_disk].next_seq_sect != this_sector)
- conf->mirrors[best_disk].seq_start = this_sector;
+static bool is_sequential(struct r1conf *conf, int disk, struct r1bio *r1_bio)
+{
+ /* TODO: address issues with this check and concurrency. */
+ return conf->mirrors[disk].next_seq_sect == r1_bio->sector ||
+ conf->mirrors[disk].head_position == r1_bio->sector;
+}
- conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
- }
- rcu_read_unlock();
- *max_sectors = sectors;
+/*
+ * If buffered sequential IO size exceeds optimal iosize, check if there is idle
+ * disk. If yes, choose the idle disk.
+ */
+static bool should_choose_next(struct r1conf *conf, int disk)
+{
+ struct raid1_info *mirror = &conf->mirrors[disk];
+ int opt_iosize;
- return best_disk;
+ if (!test_bit(Nonrot, &mirror->rdev->flags))
+ return false;
+
+ opt_iosize = bdev_io_opt(mirror->rdev->bdev) >> 9;
+ return opt_iosize > 0 && mirror->seq_start != MaxSector &&
+ mirror->next_seq_sect > opt_iosize &&
+ mirror->next_seq_sect - opt_iosize >= mirror->seq_start;
}
-static int raid1_mergeable_bvec(struct request_queue *q,
- struct bvec_merge_data *bvm,
- struct bio_vec *biovec)
+static bool rdev_readable(struct md_rdev *rdev, struct r1bio *r1_bio)
{
- struct mddev *mddev = q->queuedata;
- struct r1conf *conf = mddev->private;
- sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
- int max = biovec->bv_len;
-
- if (mddev->merge_check_needed) {
- int disk;
- rcu_read_lock();
- for (disk = 0; disk < conf->raid_disks * 2; disk++) {
- struct md_rdev *rdev = rcu_dereference(
- conf->mirrors[disk].rdev);
- if (rdev && !test_bit(Faulty, &rdev->flags)) {
- struct request_queue *q =
- bdev_get_queue(rdev->bdev);
- if (q->merge_bvec_fn) {
- bvm->bi_sector = sector +
- rdev->data_offset;
- bvm->bi_bdev = rdev->bdev;
- max = min(max, q->merge_bvec_fn(
- q, bvm, biovec));
- }
- }
- }
- rcu_read_unlock();
- }
- return max;
+ if (!rdev || test_bit(Faulty, &rdev->flags))
+ return false;
+
+ if (rdev_in_recovery(rdev, r1_bio))
+ return false;
+ /* don't read from slow disk unless have to */
+ if (test_bit(WriteMostly, &rdev->flags))
+ return false;
+
+ /* don't split IO for bad blocks unless have to */
+ if (rdev_has_badblock(rdev, r1_bio->sector, r1_bio->sectors))
+ return false;
+
+ return true;
}
-int md_raid1_congested(struct mddev *mddev, int bits)
+struct read_balance_ctl {
+ sector_t closest_dist;
+ int closest_dist_disk;
+ int min_pending;
+ int min_pending_disk;
+ int sequential_disk;
+ int readable_disks;
+};
+
+static int choose_best_rdev(struct r1conf *conf, struct r1bio *r1_bio)
{
- struct r1conf *conf = mddev->private;
- int i, ret = 0;
+ int disk;
+ struct read_balance_ctl ctl = {
+ .closest_dist_disk = -1,
+ .closest_dist = MaxSector,
+ .min_pending_disk = -1,
+ .min_pending = UINT_MAX,
+ .sequential_disk = -1,
+ };
- if ((bits & (1 << BDI_async_congested)) &&
- conf->pending_count >= max_queued_requests)
- return 1;
+ for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
+ struct md_rdev *rdev;
+ sector_t dist;
+ unsigned int pending;
- rcu_read_lock();
- for (i = 0; i < conf->raid_disks * 2; i++) {
- struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
- if (rdev && !test_bit(Faulty, &rdev->flags)) {
- struct request_queue *q = bdev_get_queue(rdev->bdev);
+ if (r1_bio->bios[disk] == IO_BLOCKED)
+ continue;
+
+ rdev = conf->mirrors[disk].rdev;
+ if (!rdev_readable(rdev, r1_bio))
+ continue;
+
+ /* At least two disks to choose from so failfast is OK */
+ if (ctl.readable_disks++ == 1)
+ set_bit(R1BIO_FailFast, &r1_bio->state);
+
+ pending = atomic_read(&rdev->nr_pending);
+ dist = abs(r1_bio->sector - conf->mirrors[disk].head_position);
- BUG_ON(!q);
+ /* Don't change to another disk for sequential reads */
+ if (is_sequential(conf, disk, r1_bio)) {
+ if (!should_choose_next(conf, disk))
+ return disk;
- /* Note the '|| 1' - when read_balance prefers
- * non-congested targets, it can be removed
+ /*
+ * Add 'pending' to avoid choosing this disk if
+ * there is other idle disk.
*/
- if ((bits & (1<<BDI_async_congested)) || 1)
- ret |= bdi_congested(&q->backing_dev_info, bits);
- else
- ret &= bdi_congested(&q->backing_dev_info, bits);
+ pending++;
+ /*
+ * If there is no other idle disk, this disk
+ * will be chosen.
+ */
+ ctl.sequential_disk = disk;
+ }
+
+ if (ctl.min_pending > pending) {
+ ctl.min_pending = pending;
+ ctl.min_pending_disk = disk;
+ }
+
+ if (ctl.closest_dist > dist) {
+ ctl.closest_dist = dist;
+ ctl.closest_dist_disk = disk;
}
}
- rcu_read_unlock();
- return ret;
+
+ /*
+ * sequential IO size exceeds optimal iosize, however, there is no other
+ * idle disk, so choose the sequential disk.
+ */
+ if (ctl.sequential_disk != -1 && ctl.min_pending != 0)
+ return ctl.sequential_disk;
+
+ /*
+ * If all disks are rotational, choose the closest disk. If any disk is
+ * non-rotational, choose the disk with less pending request even the
+ * disk is rotational, which might/might not be optimal for raids with
+ * mixed ratation/non-rotational disks depending on workload.
+ */
+ if (ctl.min_pending_disk != -1 &&
+ (READ_ONCE(conf->nonrot_disks) || ctl.min_pending == 0))
+ return ctl.min_pending_disk;
+ else
+ return ctl.closest_dist_disk;
+}
+
+/*
+ * This routine returns the disk from which the requested read should be done.
+ *
+ * 1) If resync is in progress, find the first usable disk and use it even if it
+ * has some bad blocks.
+ *
+ * 2) Now that there is no resync, loop through all disks and skipping slow
+ * disks and disks with bad blocks for now. Only pay attention to key disk
+ * choice.
+ *
+ * 3) If we've made it this far, now look for disks with bad blocks and choose
+ * the one with most number of sectors.
+ *
+ * 4) If we are all the way at the end, we have no choice but to use a disk even
+ * if it is write mostly.
+ *
+ * The rdev for the device selected will have nr_pending incremented.
+ */
+static int read_balance(struct r1conf *conf, struct r1bio *r1_bio,
+ int *max_sectors)
+{
+ int disk;
+
+ clear_bit(R1BIO_FailFast, &r1_bio->state);
+
+ if (raid1_should_read_first(conf->mddev, r1_bio->sector,
+ r1_bio->sectors))
+ return choose_first_rdev(conf, r1_bio, max_sectors);
+
+ disk = choose_best_rdev(conf, r1_bio);
+ if (disk >= 0) {
+ *max_sectors = r1_bio->sectors;
+ update_read_sectors(conf, disk, r1_bio->sector,
+ r1_bio->sectors);
+ return disk;
+ }
+
+ /*
+ * If we are here it means we didn't find a perfectly good disk so
+ * now spend a bit more time trying to find one with the most good
+ * sectors.
+ */
+ disk = choose_bb_rdev(conf, r1_bio, max_sectors);
+ if (disk >= 0)
+ return disk;
+
+ return choose_slow_rdev(conf, r1_bio, max_sectors);
}
-EXPORT_SYMBOL_GPL(md_raid1_congested);
-static int raid1_congested(void *data, int bits)
+static void wake_up_barrier(struct r1conf *conf)
{
- struct mddev *mddev = data;
+ if (wq_has_sleeper(&conf->wait_barrier))
+ wake_up(&conf->wait_barrier);
+}
+
+static void flush_bio_list(struct r1conf *conf, struct bio *bio)
+{
+ /* flush any pending bitmap writes to disk before proceeding w/ I/O */
+ raid1_prepare_flush_writes(conf->mddev);
+ wake_up_barrier(conf);
+
+ while (bio) { /* submit pending writes */
+ struct bio *next = bio->bi_next;
- return mddev_congested(mddev, bits) ||
- md_raid1_congested(mddev, bits);
+ raid1_submit_write(bio);
+ bio = next;
+ cond_resched();
+ }
}
static void flush_pending_writes(struct r1conf *conf)
@@ -769,26 +920,25 @@ static void flush_pending_writes(struct r1conf *conf)
spin_lock_irq(&conf->device_lock);
if (conf->pending_bio_list.head) {
+ struct blk_plug plug;
struct bio *bio;
+
bio = bio_list_get(&conf->pending_bio_list);
- conf->pending_count = 0;
spin_unlock_irq(&conf->device_lock);
- /* flush any pending bitmap writes to
- * disk before proceeding w/ I/O */
- bitmap_unplug(conf->mddev->bitmap);
- wake_up(&conf->wait_barrier);
- while (bio) { /* submit pending writes */
- struct bio *next = bio->bi_next;
- bio->bi_next = NULL;
- if (unlikely((bio->bi_rw & REQ_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
- /* Just ignore it */
- bio_endio(bio, 0);
- else
- generic_make_request(bio);
- bio = next;
- }
+ /*
+ * As this is called in a wait_event() loop (see freeze_array),
+ * current->state might be TASK_UNINTERRUPTIBLE which will
+ * cause a warning when we prepare to wait again. As it is
+ * rare that this path is taken, it is perfectly safe to force
+ * us to go around the wait_event() loop again, so the warning
+ * is a false-positive. Silence the warning by resetting
+ * thread state
+ */
+ __set_current_state(TASK_RUNNING);
+ blk_start_plug(&plug);
+ flush_bio_list(conf, bio);
+ blk_finish_plug(&plug);
} else
spin_unlock_irq(&conf->device_lock);
}
@@ -813,147 +963,308 @@ static void flush_pending_writes(struct r1conf *conf)
* backgroup IO calls must call raise_barrier. Once that returns
* there is no normal IO happeing. It must arrange to call
* lower_barrier when the particular background IO completes.
+ *
+ * If resync/recovery is interrupted, returns -EINTR;
+ * Otherwise, returns 0.
*/
-#define RESYNC_DEPTH 32
-
-static void raise_barrier(struct r1conf *conf)
+static int raise_barrier(struct r1conf *conf, sector_t sector_nr)
{
+ int idx = sector_to_idx(sector_nr);
+
spin_lock_irq(&conf->resync_lock);
/* Wait until no block IO is waiting */
- wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
+ wait_event_lock_irq(conf->wait_barrier,
+ !atomic_read(&conf->nr_waiting[idx]),
conf->resync_lock);
/* block any new IO from starting */
- conf->barrier++;
-
- /* Now wait for all pending IO to complete */
+ atomic_inc(&conf->barrier[idx]);
+ /*
+ * In raise_barrier() we firstly increase conf->barrier[idx] then
+ * check conf->nr_pending[idx]. In _wait_barrier() we firstly
+ * increase conf->nr_pending[idx] then check conf->barrier[idx].
+ * A memory barrier here to make sure conf->nr_pending[idx] won't
+ * be fetched before conf->barrier[idx] is increased. Otherwise
+ * there will be a race between raise_barrier() and _wait_barrier().
+ */
+ smp_mb__after_atomic();
+
+ /* For these conditions we must wait:
+ * A: while the array is in frozen state
+ * B: while conf->nr_pending[idx] is not 0, meaning regular I/O
+ * existing in corresponding I/O barrier bucket.
+ * C: while conf->barrier[idx] >= RESYNC_DEPTH, meaning reaches
+ * max resync count which allowed on current I/O barrier bucket.
+ */
wait_event_lock_irq(conf->wait_barrier,
- !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
+ (!conf->array_frozen &&
+ !atomic_read(&conf->nr_pending[idx]) &&
+ atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) ||
+ test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery),
conf->resync_lock);
+ if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
+ atomic_dec(&conf->barrier[idx]);
+ spin_unlock_irq(&conf->resync_lock);
+ wake_up(&conf->wait_barrier);
+ return -EINTR;
+ }
+
+ atomic_inc(&conf->nr_sync_pending);
spin_unlock_irq(&conf->resync_lock);
+
+ return 0;
}
-static void lower_barrier(struct r1conf *conf)
+static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
{
- unsigned long flags;
- BUG_ON(conf->barrier <= 0);
- spin_lock_irqsave(&conf->resync_lock, flags);
- conf->barrier--;
- spin_unlock_irqrestore(&conf->resync_lock, flags);
+ int idx = sector_to_idx(sector_nr);
+
+ BUG_ON(atomic_read(&conf->barrier[idx]) <= 0);
+
+ atomic_dec(&conf->barrier[idx]);
+ atomic_dec(&conf->nr_sync_pending);
wake_up(&conf->wait_barrier);
}
-static void wait_barrier(struct r1conf *conf)
+static bool _wait_barrier(struct r1conf *conf, int idx, bool nowait)
{
+ bool ret = true;
+
+ /*
+ * We need to increase conf->nr_pending[idx] very early here,
+ * then raise_barrier() can be blocked when it waits for
+ * conf->nr_pending[idx] to be 0. Then we can avoid holding
+ * conf->resync_lock when there is no barrier raised in same
+ * barrier unit bucket. Also if the array is frozen, I/O
+ * should be blocked until array is unfrozen.
+ */
+ atomic_inc(&conf->nr_pending[idx]);
+ /*
+ * In _wait_barrier() we firstly increase conf->nr_pending[idx], then
+ * check conf->barrier[idx]. In raise_barrier() we firstly increase
+ * conf->barrier[idx], then check conf->nr_pending[idx]. A memory
+ * barrier is necessary here to make sure conf->barrier[idx] won't be
+ * fetched before conf->nr_pending[idx] is increased. Otherwise there
+ * will be a race between _wait_barrier() and raise_barrier().
+ */
+ smp_mb__after_atomic();
+
+ /*
+ * Don't worry about checking two atomic_t variables at same time
+ * here. If during we check conf->barrier[idx], the array is
+ * frozen (conf->array_frozen is 1), and chonf->barrier[idx] is
+ * 0, it is safe to return and make the I/O continue. Because the
+ * array is frozen, all I/O returned here will eventually complete
+ * or be queued, no race will happen. See code comment in
+ * frozen_array().
+ */
+ if (!READ_ONCE(conf->array_frozen) &&
+ !atomic_read(&conf->barrier[idx]))
+ return ret;
+
+ /*
+ * After holding conf->resync_lock, conf->nr_pending[idx]
+ * should be decreased before waiting for barrier to drop.
+ * Otherwise, we may encounter a race condition because
+ * raise_barrer() might be waiting for conf->nr_pending[idx]
+ * to be 0 at same time.
+ */
spin_lock_irq(&conf->resync_lock);
- if (conf->barrier) {
- conf->nr_waiting++;
- /* Wait for the barrier to drop.
- * However if there are already pending
- * requests (preventing the barrier from
- * rising completely), and the
- * pre-process bio queue isn't empty,
- * then don't wait, as we need to empty
- * that queue to get the nr_pending
- * count down.
- */
+ atomic_inc(&conf->nr_waiting[idx]);
+ atomic_dec(&conf->nr_pending[idx]);
+ /*
+ * In case freeze_array() is waiting for
+ * get_unqueued_pending() == extra
+ */
+ wake_up_barrier(conf);
+ /* Wait for the barrier in same barrier unit bucket to drop. */
+
+ /* Return false when nowait flag is set */
+ if (nowait) {
+ ret = false;
+ } else {
wait_event_lock_irq(conf->wait_barrier,
- !conf->barrier ||
- (conf->nr_pending &&
- current->bio_list &&
- !bio_list_empty(current->bio_list)),
- conf->resync_lock);
- conf->nr_waiting--;
- }
- conf->nr_pending++;
+ !conf->array_frozen &&
+ !atomic_read(&conf->barrier[idx]),
+ conf->resync_lock);
+ atomic_inc(&conf->nr_pending[idx]);
+ }
+
+ atomic_dec(&conf->nr_waiting[idx]);
spin_unlock_irq(&conf->resync_lock);
+ return ret;
}
-static void allow_barrier(struct r1conf *conf)
+static bool wait_read_barrier(struct r1conf *conf, sector_t sector_nr, bool nowait)
{
- unsigned long flags;
- spin_lock_irqsave(&conf->resync_lock, flags);
- conf->nr_pending--;
- spin_unlock_irqrestore(&conf->resync_lock, flags);
- wake_up(&conf->wait_barrier);
+ int idx = sector_to_idx(sector_nr);
+ bool ret = true;
+
+ /*
+ * Very similar to _wait_barrier(). The difference is, for read
+ * I/O we don't need wait for sync I/O, but if the whole array
+ * is frozen, the read I/O still has to wait until the array is
+ * unfrozen. Since there is no ordering requirement with
+ * conf->barrier[idx] here, memory barrier is unnecessary as well.
+ */
+ atomic_inc(&conf->nr_pending[idx]);
+
+ if (!READ_ONCE(conf->array_frozen))
+ return ret;
+
+ spin_lock_irq(&conf->resync_lock);
+ atomic_inc(&conf->nr_waiting[idx]);
+ atomic_dec(&conf->nr_pending[idx]);
+ /*
+ * In case freeze_array() is waiting for
+ * get_unqueued_pending() == extra
+ */
+ wake_up_barrier(conf);
+ /* Wait for array to be unfrozen */
+
+ /* Return false when nowait flag is set */
+ if (nowait) {
+ /* Return false when nowait flag is set */
+ ret = false;
+ } else {
+ wait_event_lock_irq(conf->wait_barrier,
+ !conf->array_frozen,
+ conf->resync_lock);
+ atomic_inc(&conf->nr_pending[idx]);
+ }
+
+ atomic_dec(&conf->nr_waiting[idx]);
+ spin_unlock_irq(&conf->resync_lock);
+ return ret;
+}
+
+static bool wait_barrier(struct r1conf *conf, sector_t sector_nr, bool nowait)
+{
+ int idx = sector_to_idx(sector_nr);
+
+ return _wait_barrier(conf, idx, nowait);
+}
+
+static void _allow_barrier(struct r1conf *conf, int idx)
+{
+ atomic_dec(&conf->nr_pending[idx]);
+ wake_up_barrier(conf);
+}
+
+static void allow_barrier(struct r1conf *conf, sector_t sector_nr)
+{
+ int idx = sector_to_idx(sector_nr);
+
+ _allow_barrier(conf, idx);
+}
+
+/* conf->resync_lock should be held */
+static int get_unqueued_pending(struct r1conf *conf)
+{
+ int idx, ret;
+
+ ret = atomic_read(&conf->nr_sync_pending);
+ for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
+ ret += atomic_read(&conf->nr_pending[idx]) -
+ atomic_read(&conf->nr_queued[idx]);
+
+ return ret;
}
static void freeze_array(struct r1conf *conf, int extra)
{
- /* stop syncio and normal IO and wait for everything to
- * go quite.
- * We increment barrier and nr_waiting, and then
- * wait until nr_pending match nr_queued+extra
- * This is called in the context of one normal IO request
- * that has failed. Thus any sync request that might be pending
- * will be blocked by nr_pending, and we need to wait for
- * pending IO requests to complete or be queued for re-try.
- * Thus the number queued (nr_queued) plus this request (extra)
- * must match the number of pending IOs (nr_pending) before
- * we continue.
+ /* Stop sync I/O and normal I/O and wait for everything to
+ * go quiet.
+ * This is called in two situations:
+ * 1) management command handlers (reshape, remove disk, quiesce).
+ * 2) one normal I/O request failed.
+
+ * After array_frozen is set to 1, new sync IO will be blocked at
+ * raise_barrier(), and new normal I/O will blocked at _wait_barrier()
+ * or wait_read_barrier(). The flying I/Os will either complete or be
+ * queued. When everything goes quite, there are only queued I/Os left.
+
+ * Every flying I/O contributes to a conf->nr_pending[idx], idx is the
+ * barrier bucket index which this I/O request hits. When all sync and
+ * normal I/O are queued, sum of all conf->nr_pending[] will match sum
+ * of all conf->nr_queued[]. But normal I/O failure is an exception,
+ * in handle_read_error(), we may call freeze_array() before trying to
+ * fix the read error. In this case, the error read I/O is not queued,
+ * so get_unqueued_pending() == 1.
+ *
+ * Therefore before this function returns, we need to wait until
+ * get_unqueued_pendings(conf) gets equal to extra. For
+ * normal I/O context, extra is 1, in rested situations extra is 0.
*/
spin_lock_irq(&conf->resync_lock);
- conf->barrier++;
- conf->nr_waiting++;
- wait_event_lock_irq_cmd(conf->wait_barrier,
- conf->nr_pending == conf->nr_queued+extra,
- conf->resync_lock,
- flush_pending_writes(conf));
+ conf->array_frozen = 1;
+ mddev_add_trace_msg(conf->mddev, "raid1 wait freeze");
+ wait_event_lock_irq_cmd(
+ conf->wait_barrier,
+ get_unqueued_pending(conf) == extra,
+ conf->resync_lock,
+ flush_pending_writes(conf));
spin_unlock_irq(&conf->resync_lock);
}
static void unfreeze_array(struct r1conf *conf)
{
/* reverse the effect of the freeze */
spin_lock_irq(&conf->resync_lock);
- conf->barrier--;
- conf->nr_waiting--;
- wake_up(&conf->wait_barrier);
+ conf->array_frozen = 0;
spin_unlock_irq(&conf->resync_lock);
+ wake_up(&conf->wait_barrier);
}
-
-/* duplicate the data pages for behind I/O
- */
-static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio)
+static void alloc_behind_master_bio(struct r1bio *r1_bio,
+ struct bio *bio)
{
- int i;
- struct bio_vec *bvec;
- struct bio_vec *bvecs = kzalloc(bio->bi_vcnt * sizeof(struct bio_vec),
- GFP_NOIO);
- if (unlikely(!bvecs))
- return;
+ int size = bio->bi_iter.bi_size;
+ unsigned vcnt = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ int i = 0;
+ struct bio *behind_bio = NULL;
- bio_for_each_segment_all(bvec, bio, i) {
- bvecs[i] = *bvec;
- bvecs[i].bv_page = alloc_page(GFP_NOIO);
- if (unlikely(!bvecs[i].bv_page))
- goto do_sync_io;
- memcpy(kmap(bvecs[i].bv_page) + bvec->bv_offset,
- kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
- kunmap(bvecs[i].bv_page);
- kunmap(bvec->bv_page);
- }
- r1_bio->behind_bvecs = bvecs;
- r1_bio->behind_page_count = bio->bi_vcnt;
+ behind_bio = bio_alloc_bioset(NULL, vcnt, bio->bi_opf, GFP_NOIO,
+ &r1_bio->mddev->bio_set);
+
+ /* discard op, we don't support writezero/writesame yet */
+ if (!bio_has_data(bio)) {
+ behind_bio->bi_iter.bi_size = size;
+ goto skip_copy;
+ }
+
+ while (i < vcnt && size) {
+ struct page *page;
+ int len = min_t(int, PAGE_SIZE, size);
+
+ page = alloc_page(GFP_NOIO);
+ if (unlikely(!page))
+ goto free_pages;
+
+ if (!bio_add_page(behind_bio, page, len, 0)) {
+ put_page(page);
+ goto free_pages;
+ }
+
+ size -= len;
+ i++;
+ }
+
+ bio_copy_data(behind_bio, bio);
+skip_copy:
+ r1_bio->behind_master_bio = behind_bio;
set_bit(R1BIO_BehindIO, &r1_bio->state);
+
return;
-do_sync_io:
- for (i = 0; i < bio->bi_vcnt; i++)
- if (bvecs[i].bv_page)
- put_page(bvecs[i].bv_page);
- kfree(bvecs);
- pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
+free_pages:
+ pr_debug("%dB behind alloc failed, doing sync I/O\n",
+ bio->bi_iter.bi_size);
+ bio_free_pages(behind_bio);
+ bio_put(behind_bio);
}
-struct raid1_plug_cb {
- struct blk_plug_cb cb;
- struct bio_list pending;
- int pending_cnt;
-};
-
static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
{
struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
@@ -962,12 +1273,11 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
struct r1conf *conf = mddev->private;
struct bio *bio;
- if (from_schedule || current->bio_list) {
+ if (from_schedule) {
spin_lock_irq(&conf->device_lock);
bio_list_merge(&conf->pending_bio_list, &plug->pending);
- conf->pending_count += plug->pending_cnt;
spin_unlock_irq(&conf->device_lock);
- wake_up(&conf->wait_barrier);
+ wake_up_barrier(conf);
md_wakeup_thread(mddev->thread);
kfree(plug);
return;
@@ -975,181 +1285,245 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
/* we aren't scheduling, so we can do the write-out directly. */
bio = bio_list_get(&plug->pending);
- bitmap_unplug(mddev->bitmap);
- wake_up(&conf->wait_barrier);
-
- while (bio) { /* submit pending writes */
- struct bio *next = bio->bi_next;
- bio->bi_next = NULL;
- if (unlikely((bio->bi_rw & REQ_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
- /* Just ignore it */
- bio_endio(bio, 0);
- else
- generic_make_request(bio);
- bio = next;
- }
+ flush_bio_list(conf, bio);
kfree(plug);
}
-static void make_request(struct mddev *mddev, struct bio * bio)
+static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio)
+{
+ r1_bio->master_bio = bio;
+ r1_bio->sectors = bio_sectors(bio);
+ r1_bio->state = 0;
+ r1_bio->mddev = mddev;
+ r1_bio->sector = bio->bi_iter.bi_sector;
+}
+
+static inline struct r1bio *
+alloc_r1bio(struct mddev *mddev, struct bio *bio)
{
struct r1conf *conf = mddev->private;
- struct raid1_info *mirror;
struct r1bio *r1_bio;
+
+ r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
+ memset(r1_bio, 0, offsetof(struct r1bio, bios[conf->raid_disks * 2]));
+ init_r1bio(r1_bio, mddev, bio);
+ return r1_bio;
+}
+
+static void raid1_read_request(struct mddev *mddev, struct bio *bio,
+ int max_read_sectors, struct r1bio *r1_bio)
+{
+ struct r1conf *conf = mddev->private;
+ struct raid1_info *mirror;
struct bio *read_bio;
- int i, disks;
- struct bitmap *bitmap;
- unsigned long flags;
- const int rw = bio_data_dir(bio);
- const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
- const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
- const unsigned long do_discard = (bio->bi_rw
- & (REQ_DISCARD | REQ_SECURE));
- const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
- struct md_rdev *blocked_rdev;
- struct blk_plug_cb *cb;
- struct raid1_plug_cb *plug = NULL;
- int first_clone;
- int sectors_handled;
int max_sectors;
+ int rdisk;
+ bool r1bio_existed = !!r1_bio;
/*
- * Register the new request and wait if the reconstruction
- * thread has put up a bar for new requests.
- * Continue immediately if no resync is active currently.
+ * If r1_bio is set, we are blocking the raid1d thread
+ * so there is a tiny risk of deadlock. So ask for
+ * emergency memory if needed.
*/
+ gfp_t gfp = r1_bio ? (GFP_NOIO | __GFP_HIGH) : GFP_NOIO;
- md_write_start(mddev, bio); /* wait on superblock update early */
-
- if (bio_data_dir(bio) == WRITE &&
- bio_end_sector(bio) > mddev->suspend_lo &&
- bio->bi_sector < mddev->suspend_hi) {
- /* As the suspend_* range is controlled by
- * userspace, we want an interruptible
- * wait.
- */
- DEFINE_WAIT(w);
- for (;;) {
- flush_signals(current);
- prepare_to_wait(&conf->wait_barrier,
- &w, TASK_INTERRUPTIBLE);
- if (bio_end_sector(bio) <= mddev->suspend_lo ||
- bio->bi_sector >= mddev->suspend_hi)
- break;
- schedule();
- }
- finish_wait(&conf->wait_barrier, &w);
+ /*
+ * Still need barrier for READ in case that whole
+ * array is frozen.
+ */
+ if (!wait_read_barrier(conf, bio->bi_iter.bi_sector,
+ bio->bi_opf & REQ_NOWAIT)) {
+ bio_wouldblock_error(bio);
+ return;
}
- wait_barrier(conf);
-
- bitmap = mddev->bitmap;
+ if (!r1_bio)
+ r1_bio = alloc_r1bio(mddev, bio);
+ else
+ init_r1bio(r1_bio, mddev, bio);
+ r1_bio->sectors = max_read_sectors;
/*
- * make_request() can abort the operation when READA is being
+ * make_request() can abort the operation when read-ahead is being
* used and no empty request is available.
- *
*/
- r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
+ rdisk = read_balance(conf, r1_bio, &max_sectors);
+ if (rdisk < 0) {
+ /* couldn't find anywhere to read from */
+ if (r1bio_existed)
+ pr_crit_ratelimited("md/raid1:%s: %pg: unrecoverable I/O read error for block %llu\n",
+ mdname(mddev),
+ conf->mirrors[r1_bio->read_disk].rdev->bdev,
+ r1_bio->sector);
+ raid_end_bio_io(r1_bio);
+ return;
+ }
+ mirror = conf->mirrors + rdisk;
- r1_bio->master_bio = bio;
- r1_bio->sectors = bio_sectors(bio);
- r1_bio->state = 0;
- r1_bio->mddev = mddev;
- r1_bio->sector = bio->bi_sector;
-
- /* We might need to issue multiple reads to different
- * devices if there are bad blocks around, so we keep
- * track of the number of reads in bio->bi_phys_segments.
- * If this is 0, there is only one r1_bio and no locking
- * will be needed when requests complete. If it is
- * non-zero, then it is the number of not-completed requests.
- */
- bio->bi_phys_segments = 0;
- clear_bit(BIO_SEG_VALID, &bio->bi_flags);
+ if (r1bio_existed)
+ pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %pg\n",
+ mdname(mddev),
+ (unsigned long long)r1_bio->sector,
+ mirror->rdev->bdev);
- if (rw == READ) {
+ if (test_bit(WriteMostly, &mirror->rdev->flags) &&
+ md_bitmap_enabled(mddev, false)) {
/*
- * read balancing logic:
+ * Reading from a write-mostly device must take care not to
+ * over-take any writes that are 'behind'
*/
- int rdisk;
-
-read_again:
- rdisk = read_balance(conf, r1_bio, &max_sectors);
+ mddev_add_trace_msg(mddev, "raid1 wait behind writes");
+ mddev->bitmap_ops->wait_behind_writes(mddev);
+ }
- if (rdisk < 0) {
- /* couldn't find anywhere to read from */
- raid_end_bio_io(r1_bio);
- return;
+ if (max_sectors < bio_sectors(bio)) {
+ bio = bio_submit_split_bioset(bio, max_sectors,
+ &conf->bio_split);
+ if (!bio) {
+ set_bit(R1BIO_Returned, &r1_bio->state);
+ goto err_handle;
}
- mirror = conf->mirrors + rdisk;
- if (test_bit(WriteMostly, &mirror->rdev->flags) &&
- bitmap) {
- /* Reading from a write-mostly device must
- * take care not to over-take any writes
- * that are 'behind'
- */
- wait_event(bitmap->behind_wait,
- atomic_read(&bitmap->behind_writes) == 0);
- }
- r1_bio->read_disk = rdisk;
+ r1_bio->master_bio = bio;
+ r1_bio->sectors = max_sectors;
+ }
+
+ r1_bio->read_disk = rdisk;
+ if (!r1bio_existed) {
+ md_account_bio(mddev, &bio);
+ r1_bio->master_bio = bio;
+ }
+ read_bio = bio_alloc_clone(mirror->rdev->bdev, bio, gfp,
+ &mddev->bio_set);
+ read_bio->bi_opf &= ~REQ_NOWAIT;
+ r1_bio->bios[rdisk] = read_bio;
+
+ read_bio->bi_iter.bi_sector = r1_bio->sector +
+ mirror->rdev->data_offset;
+ read_bio->bi_end_io = raid1_end_read_request;
+ if (test_bit(FailFast, &mirror->rdev->flags) &&
+ test_bit(R1BIO_FailFast, &r1_bio->state))
+ read_bio->bi_opf |= MD_FAILFAST;
+ read_bio->bi_private = r1_bio;
+ mddev_trace_remap(mddev, read_bio, r1_bio->sector);
+ submit_bio_noacct(read_bio);
+ return;
- read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- md_trim_bio(read_bio, r1_bio->sector - bio->bi_sector,
- max_sectors);
+err_handle:
+ atomic_dec(&mirror->rdev->nr_pending);
+ raid_end_bio_io(r1_bio);
+}
- r1_bio->bios[rdisk] = read_bio;
+static bool wait_blocked_rdev(struct mddev *mddev, struct bio *bio)
+{
+ struct r1conf *conf = mddev->private;
+ int disks = conf->raid_disks * 2;
+ int i;
- read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
- read_bio->bi_bdev = mirror->rdev->bdev;
- read_bio->bi_end_io = raid1_end_read_request;
- read_bio->bi_rw = READ | do_sync;
- read_bio->bi_private = r1_bio;
+retry:
+ for (i = 0; i < disks; i++) {
+ struct md_rdev *rdev = conf->mirrors[i].rdev;
- if (max_sectors < r1_bio->sectors) {
- /* could not read all from this device, so we will
- * need another r1_bio.
- */
+ if (!rdev)
+ continue;
- sectors_handled = (r1_bio->sector + max_sectors
- - bio->bi_sector);
- r1_bio->sectors = max_sectors;
- spin_lock_irq(&conf->device_lock);
- if (bio->bi_phys_segments == 0)
- bio->bi_phys_segments = 2;
- else
- bio->bi_phys_segments++;
- spin_unlock_irq(&conf->device_lock);
- /* Cannot call generic_make_request directly
- * as that will be queued in __make_request
- * and subsequent mempool_alloc might block waiting
- * for it. So hand bio over to raid1d.
- */
- reschedule_retry(r1_bio);
+ /* don't write here until the bad block is acknowledged */
+ if (test_bit(WriteErrorSeen, &rdev->flags) &&
+ rdev_has_badblock(rdev, bio->bi_iter.bi_sector,
+ bio_sectors(bio)) < 0)
+ set_bit(BlockedBadBlocks, &rdev->flags);
- r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
+ if (rdev_blocked(rdev)) {
+ if (bio->bi_opf & REQ_NOWAIT)
+ return false;
- r1_bio->master_bio = bio;
- r1_bio->sectors = bio_sectors(bio) - sectors_handled;
- r1_bio->state = 0;
- r1_bio->mddev = mddev;
- r1_bio->sector = bio->bi_sector + sectors_handled;
- goto read_again;
- } else
- generic_make_request(read_bio);
+ mddev_add_trace_msg(rdev->mddev, "raid1 wait rdev %d blocked",
+ rdev->raid_disk);
+ atomic_inc(&rdev->nr_pending);
+ md_wait_for_blocked_rdev(rdev, rdev->mddev);
+ goto retry;
+ }
+ }
+
+ return true;
+}
+
+static void raid1_start_write_behind(struct mddev *mddev, struct r1bio *r1_bio,
+ struct bio *bio)
+{
+ unsigned long max_write_behind = mddev->bitmap_info.max_write_behind;
+ struct md_bitmap_stats stats;
+ int err;
+
+ /* behind write rely on bitmap, see bitmap_operations */
+ if (!md_bitmap_enabled(mddev, false))
+ return;
+
+ err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats);
+ if (err)
return;
+
+ /* Don't do behind IO if reader is waiting, or there are too many. */
+ if (!stats.behind_wait && stats.behind_writes < max_write_behind)
+ alloc_behind_master_bio(r1_bio, bio);
+
+ if (test_bit(R1BIO_BehindIO, &r1_bio->state))
+ mddev->bitmap_ops->start_behind_write(mddev);
+
+}
+
+static void raid1_write_request(struct mddev *mddev, struct bio *bio,
+ int max_write_sectors)
+{
+ struct r1conf *conf = mddev->private;
+ struct r1bio *r1_bio;
+ int i, disks, k;
+ unsigned long flags;
+ int first_clone;
+ int max_sectors;
+ bool write_behind = false;
+ bool is_discard = (bio_op(bio) == REQ_OP_DISCARD);
+
+ if (mddev_is_clustered(mddev) &&
+ mddev->cluster_ops->area_resyncing(mddev, WRITE,
+ bio->bi_iter.bi_sector, bio_end_sector(bio))) {
+
+ DEFINE_WAIT(w);
+ if (bio->bi_opf & REQ_NOWAIT) {
+ bio_wouldblock_error(bio);
+ return;
+ }
+ for (;;) {
+ prepare_to_wait(&conf->wait_barrier,
+ &w, TASK_IDLE);
+ if (!mddev->cluster_ops->area_resyncing(mddev, WRITE,
+ bio->bi_iter.bi_sector,
+ bio_end_sector(bio)))
+ break;
+ schedule();
+ }
+ finish_wait(&conf->wait_barrier, &w);
}
/*
- * WRITE:
+ * Register the new request and wait if the reconstruction
+ * thread has put up a bar for new requests.
+ * Continue immediately if no resync is active currently.
*/
- if (conf->pending_count >= max_queued_requests) {
- md_wakeup_thread(mddev->thread);
- wait_event(conf->wait_barrier,
- conf->pending_count < max_queued_requests);
+ if (!wait_barrier(conf, bio->bi_iter.bi_sector,
+ bio->bi_opf & REQ_NOWAIT)) {
+ bio_wouldblock_error(bio);
+ return;
+ }
+
+ if (!wait_blocked_rdev(mddev, bio)) {
+ bio_wouldblock_error(bio);
+ return;
}
+
+ r1_bio = alloc_r1bio(mddev, bio);
+ r1_bio->sectors = max_write_sectors;
+
/* first select target devices under rcu_lock and
* inc refcount on their rdev. Record them by setting
* bios[x] to bio
@@ -1162,41 +1536,30 @@ read_again:
*/
disks = conf->raid_disks * 2;
- retry_write:
- blocked_rdev = NULL;
- rcu_read_lock();
max_sectors = r1_bio->sectors;
for (i = 0; i < disks; i++) {
- struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
- if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
- atomic_inc(&rdev->nr_pending);
- blocked_rdev = rdev;
- break;
- }
+ struct md_rdev *rdev = conf->mirrors[i].rdev;
+
+ /*
+ * The write-behind io is only attempted on drives marked as
+ * write-mostly, which means we could allocate write behind
+ * bio later.
+ */
+ if (!is_discard && rdev && test_bit(WriteMostly, &rdev->flags))
+ write_behind = true;
+
r1_bio->bios[i] = NULL;
- if (!rdev || test_bit(Faulty, &rdev->flags)
- || test_bit(Unmerged, &rdev->flags)) {
- if (i < conf->raid_disks)
- set_bit(R1BIO_Degraded, &r1_bio->state);
+ if (!rdev || test_bit(Faulty, &rdev->flags))
continue;
- }
atomic_inc(&rdev->nr_pending);
if (test_bit(WriteErrorSeen, &rdev->flags)) {
sector_t first_bad;
- int bad_sectors;
+ sector_t bad_sectors;
int is_bad;
- is_bad = is_badblock(rdev, r1_bio->sector,
- max_sectors,
+ is_bad = is_badblock(rdev, r1_bio->sector, max_sectors,
&first_bad, &bad_sectors);
- if (is_bad < 0) {
- /* mustn't write here until the bad block is
- * acknowledged*/
- set_bit(BlockedBadBlocks, &rdev->flags);
- blocked_rdev = rdev;
- break;
- }
if (is_bad && first_bad <= r1_bio->sector) {
/* Cannot write here at all */
bad_sectors -= (r1_bio->sector - first_bad);
@@ -1206,243 +1569,252 @@ read_again:
*/
max_sectors = bad_sectors;
rdev_dec_pending(rdev, mddev);
- /* We don't set R1BIO_Degraded as that
- * only applies if the disk is
- * missing, so it might be re-added,
- * and we want to know to recover this
- * chunk.
- * In this case the device is here,
- * and the fact that this chunk is not
- * in-sync is recorded in the bad
- * block log
- */
continue;
}
if (is_bad) {
- int good_sectors = first_bad - r1_bio->sector;
+ int good_sectors;
+
+ /*
+ * We cannot atomically write this, so just
+ * error in that case. It could be possible to
+ * atomically write other mirrors, but the
+ * complexity of supporting that is not worth
+ * the benefit.
+ */
+ if (bio->bi_opf & REQ_ATOMIC)
+ goto err_handle;
+
+ good_sectors = first_bad - r1_bio->sector;
if (good_sectors < max_sectors)
max_sectors = good_sectors;
}
}
r1_bio->bios[i] = bio;
}
- rcu_read_unlock();
-
- if (unlikely(blocked_rdev)) {
- /* Wait for this device to become unblocked */
- int j;
- for (j = 0; j < i; j++)
- if (r1_bio->bios[j])
- rdev_dec_pending(conf->mirrors[j].rdev, mddev);
- r1_bio->state = 0;
- allow_barrier(conf);
- md_wait_for_blocked_rdev(blocked_rdev, mddev);
- wait_barrier(conf);
- goto retry_write;
- }
+ /*
+ * When using a bitmap, we may call alloc_behind_master_bio below.
+ * alloc_behind_master_bio allocates a copy of the data payload a page
+ * at a time and thus needs a new bio that can fit the whole payload
+ * this bio in page sized chunks.
+ */
+ if (write_behind && mddev->bitmap)
+ max_sectors = min_t(int, max_sectors,
+ BIO_MAX_VECS * (PAGE_SIZE >> 9));
+ if (max_sectors < bio_sectors(bio)) {
+ bio = bio_submit_split_bioset(bio, max_sectors,
+ &conf->bio_split);
+ if (!bio) {
+ set_bit(R1BIO_Returned, &r1_bio->state);
+ goto err_handle;
+ }
- if (max_sectors < r1_bio->sectors) {
- /* We are splitting this write into multiple parts, so
- * we need to prepare for allocating another r1_bio.
- */
+ r1_bio->master_bio = bio;
r1_bio->sectors = max_sectors;
- spin_lock_irq(&conf->device_lock);
- if (bio->bi_phys_segments == 0)
- bio->bi_phys_segments = 2;
- else
- bio->bi_phys_segments++;
- spin_unlock_irq(&conf->device_lock);
}
- sectors_handled = r1_bio->sector + max_sectors - bio->bi_sector;
+ md_account_bio(mddev, &bio);
+ r1_bio->master_bio = bio;
atomic_set(&r1_bio->remaining, 1);
atomic_set(&r1_bio->behind_remaining, 0);
first_clone = 1;
+
for (i = 0; i < disks; i++) {
- struct bio *mbio;
+ struct bio *mbio = NULL;
+ struct md_rdev *rdev = conf->mirrors[i].rdev;
if (!r1_bio->bios[i])
continue;
- mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- md_trim_bio(mbio, r1_bio->sector - bio->bi_sector, max_sectors);
-
if (first_clone) {
- /* do behind I/O ?
- * Not if there are too many, or cannot
- * allocate memory, or a reader on WriteMostly
- * is waiting for behind writes to flush */
- if (bitmap &&
- (atomic_read(&bitmap->behind_writes)
- < mddev->bitmap_info.max_write_behind) &&
- !waitqueue_active(&bitmap->behind_wait))
- alloc_behind_pages(mbio, r1_bio);
-
- bitmap_startwrite(bitmap, r1_bio->sector,
- r1_bio->sectors,
- test_bit(R1BIO_BehindIO,
- &r1_bio->state));
+ if (write_behind)
+ raid1_start_write_behind(mddev, r1_bio, bio);
first_clone = 0;
}
- if (r1_bio->behind_bvecs) {
- struct bio_vec *bvec;
- int j;
- /*
- * We trimmed the bio, so _all is legit
- */
- bio_for_each_segment_all(bvec, mbio, j)
- bvec->bv_page = r1_bio->behind_bvecs[j].bv_page;
- if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
+ if (r1_bio->behind_master_bio) {
+ mbio = bio_alloc_clone(rdev->bdev,
+ r1_bio->behind_master_bio,
+ GFP_NOIO, &mddev->bio_set);
+ if (test_bit(CollisionCheck, &rdev->flags))
+ wait_for_serialization(rdev, r1_bio);
+ if (test_bit(WriteMostly, &rdev->flags))
atomic_inc(&r1_bio->behind_remaining);
+ } else {
+ mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO,
+ &mddev->bio_set);
+
+ if (mddev->serialize_policy)
+ wait_for_serialization(rdev, r1_bio);
}
+ mbio->bi_opf &= ~REQ_NOWAIT;
r1_bio->bios[i] = mbio;
- mbio->bi_sector = (r1_bio->sector +
- conf->mirrors[i].rdev->data_offset);
- mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
+ mbio->bi_iter.bi_sector = (r1_bio->sector + rdev->data_offset);
mbio->bi_end_io = raid1_end_write_request;
- mbio->bi_rw =
- WRITE | do_flush_fua | do_sync | do_discard | do_same;
+ if (test_bit(FailFast, &rdev->flags) &&
+ !test_bit(WriteMostly, &rdev->flags) &&
+ conf->raid_disks - mddev->degraded > 1)
+ mbio->bi_opf |= MD_FAILFAST;
mbio->bi_private = r1_bio;
atomic_inc(&r1_bio->remaining);
-
- cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
- if (cb)
- plug = container_of(cb, struct raid1_plug_cb, cb);
- else
- plug = NULL;
- spin_lock_irqsave(&conf->device_lock, flags);
- if (plug) {
- bio_list_add(&plug->pending, mbio);
- plug->pending_cnt++;
- } else {
+ mddev_trace_remap(mddev, mbio, r1_bio->sector);
+ /* flush_pending_writes() needs access to the rdev so...*/
+ mbio->bi_bdev = (void *)rdev;
+ if (!raid1_add_bio_to_plug(mddev, mbio, raid1_unplug, disks)) {
+ spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
- conf->pending_count++;
- }
- spin_unlock_irqrestore(&conf->device_lock, flags);
- if (!plug)
+ spin_unlock_irqrestore(&conf->device_lock, flags);
md_wakeup_thread(mddev->thread);
- }
- /* Mustn't call r1_bio_write_done before this next test,
- * as it could result in the bio being freed.
- */
- if (sectors_handled < bio_sectors(bio)) {
- r1_bio_write_done(r1_bio);
- /* We need another r1_bio. It has already been counted
- * in bio->bi_phys_segments
- */
- r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
- r1_bio->master_bio = bio;
- r1_bio->sectors = bio_sectors(bio) - sectors_handled;
- r1_bio->state = 0;
- r1_bio->mddev = mddev;
- r1_bio->sector = bio->bi_sector + sectors_handled;
- goto retry_write;
+ }
}
r1_bio_write_done(r1_bio);
/* In case raid1d snuck in to freeze_array */
- wake_up(&conf->wait_barrier);
+ wake_up_barrier(conf);
+ return;
+err_handle:
+ for (k = 0; k < i; k++) {
+ if (r1_bio->bios[k]) {
+ rdev_dec_pending(conf->mirrors[k].rdev, mddev);
+ r1_bio->bios[k] = NULL;
+ }
+ }
+
+ raid_end_bio_io(r1_bio);
}
-static void status(struct seq_file *seq, struct mddev *mddev)
+static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
+{
+ sector_t sectors;
+
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH)
+ && md_flush_request(mddev, bio))
+ return true;
+
+ /*
+ * There is a limit to the maximum size, but
+ * the read/write handler might find a lower limit
+ * due to bad blocks. To avoid multiple splits,
+ * we pass the maximum number of sectors down
+ * and let the lower level perform the split.
+ */
+ sectors = align_to_barrier_unit_end(
+ bio->bi_iter.bi_sector, bio_sectors(bio));
+
+ if (bio_data_dir(bio) == READ)
+ raid1_read_request(mddev, bio, sectors, NULL);
+ else {
+ md_write_start(mddev,bio);
+ raid1_write_request(mddev, bio, sectors);
+ }
+ return true;
+}
+
+static void raid1_status(struct seq_file *seq, struct mddev *mddev)
{
struct r1conf *conf = mddev->private;
int i;
+ lockdep_assert_held(&mddev->lock);
+
seq_printf(seq, " [%d/%d] [", conf->raid_disks,
conf->raid_disks - mddev->degraded);
- rcu_read_lock();
for (i = 0; i < conf->raid_disks; i++) {
- struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
+ struct md_rdev *rdev = READ_ONCE(conf->mirrors[i].rdev);
+
seq_printf(seq, "%s",
rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
}
- rcu_read_unlock();
seq_printf(seq, "]");
}
-
-static void error(struct mddev *mddev, struct md_rdev *rdev)
+/**
+ * raid1_error() - RAID1 error handler.
+ * @mddev: affected md device.
+ * @rdev: member device to fail.
+ *
+ * The routine acknowledges &rdev failure and determines new @mddev state.
+ * If it failed, then:
+ * - &MD_BROKEN flag is set in &mddev->flags.
+ * - recovery is disabled.
+ * Otherwise, it must be degraded:
+ * - recovery is interrupted.
+ * - &mddev->degraded is bumped.
+ *
+ * @rdev is marked as &Faulty excluding case when array is failed and
+ * &mddev->fail_last_dev is off.
+ */
+static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
{
- char b[BDEVNAME_SIZE];
struct r1conf *conf = mddev->private;
+ unsigned long flags;
- /*
- * If it is not operational, then we have already marked it as dead
- * else if it is the last working disks, ignore the error, let the
- * next level up know.
- * else mark the drive as failed
- */
- if (test_bit(In_sync, &rdev->flags)
- && (conf->raid_disks - mddev->degraded) == 1) {
- /*
- * Don't fail the drive, act as though we were just a
- * normal single drive.
- * However don't try a recovery from this drive as
- * it is very likely to fail.
- */
- conf->recovery_disabled = mddev->recovery_disabled;
- return;
+ spin_lock_irqsave(&conf->device_lock, flags);
+
+ if (test_bit(In_sync, &rdev->flags) &&
+ (conf->raid_disks - mddev->degraded) == 1) {
+ set_bit(MD_BROKEN, &mddev->flags);
+
+ if (!mddev->fail_last_dev) {
+ conf->recovery_disabled = mddev->recovery_disabled;
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ return;
+ }
}
set_bit(Blocked, &rdev->flags);
- if (test_and_clear_bit(In_sync, &rdev->flags)) {
- unsigned long flags;
- spin_lock_irqsave(&conf->device_lock, flags);
+ if (test_and_clear_bit(In_sync, &rdev->flags))
mddev->degraded++;
- set_bit(Faulty, &rdev->flags);
- spin_unlock_irqrestore(&conf->device_lock, flags);
- /*
- * if recovery is running, make sure it aborts.
- */
- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- } else
- set_bit(Faulty, &rdev->flags);
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
- printk(KERN_ALERT
- "md/raid1:%s: Disk failure on %s, disabling device.\n"
- "md/raid1:%s: Operation continuing on %d devices.\n",
- mdname(mddev), bdevname(rdev->bdev, b),
- mdname(mddev), conf->raid_disks - mddev->degraded);
+ set_bit(Faulty, &rdev->flags);
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ /*
+ * if recovery is running, make sure it aborts.
+ */
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ set_mask_bits(&mddev->sb_flags, 0,
+ BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
+ pr_crit("md/raid1:%s: Disk failure on %pg, disabling device.\n"
+ "md/raid1:%s: Operation continuing on %d devices.\n",
+ mdname(mddev), rdev->bdev,
+ mdname(mddev), conf->raid_disks - mddev->degraded);
}
static void print_conf(struct r1conf *conf)
{
int i;
- printk(KERN_DEBUG "RAID1 conf printout:\n");
+ pr_debug("RAID1 conf printout:\n");
if (!conf) {
- printk(KERN_DEBUG "(!conf)\n");
+ pr_debug("(!conf)\n");
return;
}
- printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
- conf->raid_disks);
+ pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
+ conf->raid_disks);
- rcu_read_lock();
+ lockdep_assert_held(&conf->mddev->reconfig_mutex);
for (i = 0; i < conf->raid_disks; i++) {
- char b[BDEVNAME_SIZE];
- struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
+ struct md_rdev *rdev = conf->mirrors[i].rdev;
if (rdev)
- printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
- i, !test_bit(In_sync, &rdev->flags),
- !test_bit(Faulty, &rdev->flags),
- bdevname(rdev->bdev,b));
+ pr_debug(" disk %d, wo:%d, o:%d, dev:%pg\n",
+ i, !test_bit(In_sync, &rdev->flags),
+ !test_bit(Faulty, &rdev->flags),
+ rdev->bdev);
}
- rcu_read_unlock();
}
static void close_sync(struct r1conf *conf)
{
- wait_barrier(conf);
- allow_barrier(conf);
+ int idx;
+
+ for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) {
+ _wait_barrier(conf, idx, false);
+ _allow_barrier(conf, idx);
+ }
- mempool_destroy(conf->r1buf_pool);
- conf->r1buf_pool = NULL;
+ mempool_exit(&conf->r1buf_pool);
}
static int raid1_spare_active(struct mddev *mddev)
@@ -1453,14 +1825,18 @@ static int raid1_spare_active(struct mddev *mddev)
unsigned long flags;
/*
- * Find all failed disks within the RAID1 configuration
+ * Find all failed disks within the RAID1 configuration
* and mark them readable.
* Called under mddev lock, so rcu protection not needed.
+ * device_lock used to avoid races with raid1_end_read_request
+ * which expects 'In_sync' flags and ->degraded to be consistent.
*/
+ spin_lock_irqsave(&conf->device_lock, flags);
for (i = 0; i < conf->raid_disks; i++) {
struct md_rdev *rdev = conf->mirrors[i].rdev;
struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
if (repl
+ && !test_bit(Candidate, &repl->flags)
&& repl->recovery_offset == MaxSector
&& !test_bit(Faulty, &repl->flags)
&& !test_and_set_bit(In_sync, &repl->flags)) {
@@ -1479,13 +1855,13 @@ static int raid1_spare_active(struct mddev *mddev)
}
}
if (rdev
+ && rdev->recovery_offset == MaxSector
&& !test_bit(Faulty, &rdev->flags)
&& !test_and_set_bit(In_sync, &rdev->flags)) {
count++;
sysfs_notify_dirent_safe(rdev->sysfs_state);
}
}
- spin_lock_irqsave(&conf->device_lock, flags);
mddev->degraded -= count;
spin_unlock_irqrestore(&conf->device_lock, flags);
@@ -1493,16 +1869,60 @@ static int raid1_spare_active(struct mddev *mddev)
return count;
}
+static bool raid1_add_conf(struct r1conf *conf, struct md_rdev *rdev, int disk,
+ bool replacement)
+{
+ struct raid1_info *info = conf->mirrors + disk;
+
+ if (replacement)
+ info += conf->raid_disks;
+
+ if (info->rdev)
+ return false;
+
+ if (bdev_nonrot(rdev->bdev)) {
+ set_bit(Nonrot, &rdev->flags);
+ WRITE_ONCE(conf->nonrot_disks, conf->nonrot_disks + 1);
+ }
+
+ rdev->raid_disk = disk;
+ info->head_position = 0;
+ info->seq_start = MaxSector;
+ WRITE_ONCE(info->rdev, rdev);
+
+ return true;
+}
+
+static bool raid1_remove_conf(struct r1conf *conf, int disk)
+{
+ struct raid1_info *info = conf->mirrors + disk;
+ struct md_rdev *rdev = info->rdev;
+
+ if (!rdev || test_bit(In_sync, &rdev->flags) ||
+ atomic_read(&rdev->nr_pending))
+ return false;
+
+ /* Only remove non-faulty devices if recovery is not possible. */
+ if (!test_bit(Faulty, &rdev->flags) &&
+ rdev->mddev->recovery_disabled != conf->recovery_disabled &&
+ rdev->mddev->degraded < conf->raid_disks)
+ return false;
+
+ if (test_and_clear_bit(Nonrot, &rdev->flags))
+ WRITE_ONCE(conf->nonrot_disks, conf->nonrot_disks - 1);
+
+ WRITE_ONCE(info->rdev, NULL);
+ return true;
+}
static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
{
struct r1conf *conf = mddev->private;
int err = -EEXIST;
- int mirror = 0;
+ int mirror = 0, repl_slot = -1;
struct raid1_info *p;
int first = 0;
int last = conf->raid_disks - 1;
- struct request_queue *q = bdev_get_queue(rdev->bdev);
if (mddev->recovery_disabled == conf->recovery_disabled)
return -EBUSY;
@@ -1510,58 +1930,45 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
if (rdev->raid_disk >= 0)
first = last = rdev->raid_disk;
- if (q->merge_bvec_fn) {
- set_bit(Unmerged, &rdev->flags);
- mddev->merge_check_needed = 1;
- }
+ /*
+ * find the disk ... but prefer rdev->saved_raid_disk
+ * if possible.
+ */
+ if (rdev->saved_raid_disk >= 0 &&
+ rdev->saved_raid_disk >= first &&
+ rdev->saved_raid_disk < conf->raid_disks &&
+ conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
+ first = last = rdev->saved_raid_disk;
for (mirror = first; mirror <= last; mirror++) {
- p = conf->mirrors+mirror;
+ p = conf->mirrors + mirror;
if (!p->rdev) {
+ err = mddev_stack_new_rdev(mddev, rdev);
+ if (err)
+ return err;
- if (mddev->gendisk)
- disk_stack_limits(mddev->gendisk, rdev->bdev,
- rdev->data_offset << 9);
-
- p->head_position = 0;
- rdev->raid_disk = mirror;
- err = 0;
+ raid1_add_conf(conf, rdev, mirror, false);
/* As all devices are equivalent, we don't need a full recovery
* if this was recently any drive of the array
*/
if (rdev->saved_raid_disk < 0)
conf->fullsync = 1;
- rcu_assign_pointer(p->rdev, rdev);
break;
}
if (test_bit(WantReplacement, &p->rdev->flags) &&
- p[conf->raid_disks].rdev == NULL) {
- /* Add this device as a replacement */
- clear_bit(In_sync, &rdev->flags);
- set_bit(Replacement, &rdev->flags);
- rdev->raid_disk = mirror;
- err = 0;
- conf->fullsync = 1;
- rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
- break;
- }
+ p[conf->raid_disks].rdev == NULL && repl_slot < 0)
+ repl_slot = mirror;
}
- if (err == 0 && test_bit(Unmerged, &rdev->flags)) {
- /* Some requests might not have seen this new
- * merge_bvec_fn. We must wait for them to complete
- * before merging the device fully.
- * First we make sure any code which has tested
- * our function has submitted the request, then
- * we wait for all outstanding requests to complete.
- */
- synchronize_sched();
- freeze_array(conf, 0);
- unfreeze_array(conf);
- clear_bit(Unmerged, &rdev->flags);
+
+ if (err && repl_slot >= 0) {
+ /* Add this device as a replacement */
+ clear_bit(In_sync, &rdev->flags);
+ set_bit(Replacement, &rdev->flags);
+ raid1_add_conf(conf, rdev, repl_slot, true);
+ err = 0;
+ conf->fullsync = 1;
}
- md_integrity_add_rdev(rdev, mddev);
- if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
+
print_conf(conf);
return err;
}
@@ -1573,33 +1980,23 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
int number = rdev->raid_disk;
struct raid1_info *p = conf->mirrors + number;
- if (rdev != p->rdev)
- p = conf->mirrors + conf->raid_disks + number;
+ if (unlikely(number >= conf->raid_disks))
+ goto abort;
+
+ if (rdev != p->rdev) {
+ number += conf->raid_disks;
+ p = conf->mirrors + number;
+ }
print_conf(conf);
if (rdev == p->rdev) {
- if (test_bit(In_sync, &rdev->flags) ||
- atomic_read(&rdev->nr_pending)) {
+ if (!raid1_remove_conf(conf, number)) {
err = -EBUSY;
goto abort;
}
- /* Only remove non-faulty devices if recovery
- * is not possible.
- */
- if (!test_bit(Faulty, &rdev->flags) &&
- mddev->recovery_disabled != conf->recovery_disabled &&
- mddev->degraded < conf->raid_disks) {
- err = -EBUSY;
- goto abort;
- }
- p->rdev = NULL;
- synchronize_rcu();
- if (atomic_read(&rdev->nr_pending)) {
- /* lost the race, try later */
- err = -EBUSY;
- p->rdev = rdev;
- goto abort;
- } else if (conf->mirrors[conf->raid_disks + number].rdev) {
+
+ if (number < conf->raid_disks &&
+ conf->mirrors[conf->raid_disks + number].rdev) {
/* We just removed a device that is being replaced.
* Move down the replacement. We drain all IO before
* doing this to avoid confusion.
@@ -1607,13 +2004,24 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
struct md_rdev *repl =
conf->mirrors[conf->raid_disks + number].rdev;
freeze_array(conf, 0);
+ if (atomic_read(&repl->nr_pending)) {
+ /* It means that some queued IO of retry_list
+ * hold repl. Thus, we cannot set replacement
+ * as NULL, avoiding rdev NULL pointer
+ * dereference in sync_request_write and
+ * handle_write_finished.
+ */
+ err = -EBUSY;
+ unfreeze_array(conf);
+ goto abort;
+ }
clear_bit(Replacement, &repl->flags);
- p->rdev = repl;
+ WRITE_ONCE(p->rdev, repl);
conf->mirrors[conf->raid_disks + number].rdev = NULL;
unfreeze_array(conf);
- clear_bit(WantReplacement, &rdev->flags);
- } else
- clear_bit(WantReplacement, &rdev->flags);
+ }
+
+ clear_bit(WantReplacement, &rdev->flags);
err = md_integrity_register(mddev);
}
abort:
@@ -1622,10 +2030,9 @@ abort:
return err;
}
-
-static void end_sync_read(struct bio *bio, int error)
+static void end_sync_read(struct bio *bio)
{
- struct r1bio *r1_bio = bio->bi_private;
+ struct r1bio *r1_bio = get_resync_r1bio(bio);
update_head_pos(r1_bio->read_disk, r1_bio);
@@ -1634,56 +2041,33 @@ static void end_sync_read(struct bio *bio, int error)
* or re-read if the read failed.
* We don't do much here, just schedule handling by raid1d
*/
- if (test_bit(BIO_UPTODATE, &bio->bi_flags))
+ if (!bio->bi_status)
set_bit(R1BIO_Uptodate, &r1_bio->state);
if (atomic_dec_and_test(&r1_bio->remaining))
reschedule_retry(r1_bio);
}
-static void end_sync_write(struct bio *bio, int error)
+static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
{
- int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- struct r1bio *r1_bio = bio->bi_private;
- struct mddev *mddev = r1_bio->mddev;
- struct r1conf *conf = mddev->private;
- int mirror=0;
- sector_t first_bad;
- int bad_sectors;
-
- mirror = find_bio_disk(r1_bio, bio);
+ sector_t sync_blocks = 0;
+ sector_t s = r1_bio->sector;
+ long sectors_to_go = r1_bio->sectors;
- if (!uptodate) {
- sector_t sync_blocks = 0;
- sector_t s = r1_bio->sector;
- long sectors_to_go = r1_bio->sectors;
- /* make sure these bits doesn't get cleared. */
- do {
- bitmap_end_sync(mddev->bitmap, s,
- &sync_blocks, 1);
- s += sync_blocks;
- sectors_to_go -= sync_blocks;
- } while (sectors_to_go > 0);
- set_bit(WriteErrorSeen,
- &conf->mirrors[mirror].rdev->flags);
- if (!test_and_set_bit(WantReplacement,
- &conf->mirrors[mirror].rdev->flags))
- set_bit(MD_RECOVERY_NEEDED, &
- mddev->recovery);
- set_bit(R1BIO_WriteError, &r1_bio->state);
- } else if (is_badblock(conf->mirrors[mirror].rdev,
- r1_bio->sector,
- r1_bio->sectors,
- &first_bad, &bad_sectors) &&
- !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
- r1_bio->sector,
- r1_bio->sectors,
- &first_bad, &bad_sectors)
- )
- set_bit(R1BIO_MadeGood, &r1_bio->state);
+ /* make sure these bits don't get cleared. */
+ do {
+ md_bitmap_end_sync(mddev, s, &sync_blocks);
+ s += sync_blocks;
+ sectors_to_go -= sync_blocks;
+ } while (sectors_to_go > 0);
+}
+static void put_sync_write_buf(struct r1bio *r1_bio, int uptodate)
+{
if (atomic_dec_and_test(&r1_bio->remaining)) {
+ struct mddev *mddev = r1_bio->mddev;
int s = r1_bio->sectors;
+
if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
test_bit(R1BIO_WriteError, &r1_bio->state))
reschedule_retry(r1_bio);
@@ -1694,13 +2078,37 @@ static void end_sync_write(struct bio *bio, int error)
}
}
+static void end_sync_write(struct bio *bio)
+{
+ int uptodate = !bio->bi_status;
+ struct r1bio *r1_bio = get_resync_r1bio(bio);
+ struct mddev *mddev = r1_bio->mddev;
+ struct r1conf *conf = mddev->private;
+ struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
+
+ if (!uptodate) {
+ abort_sync_write(mddev, r1_bio);
+ set_bit(WriteErrorSeen, &rdev->flags);
+ if (!test_and_set_bit(WantReplacement, &rdev->flags))
+ set_bit(MD_RECOVERY_NEEDED, &
+ mddev->recovery);
+ set_bit(R1BIO_WriteError, &r1_bio->state);
+ } else if (rdev_has_badblock(rdev, r1_bio->sector, r1_bio->sectors) &&
+ !rdev_has_badblock(conf->mirrors[r1_bio->read_disk].rdev,
+ r1_bio->sector, r1_bio->sectors)) {
+ set_bit(R1BIO_MadeGood, &r1_bio->state);
+ }
+
+ put_sync_write_buf(r1_bio, uptodate);
+}
+
static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
- int sectors, struct page *page, int rw)
+ int sectors, struct page *page, blk_opf_t rw)
{
if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
/* success */
return 1;
- if (rw == WRITE) {
+ if (rw == REQ_OP_WRITE) {
set_bit(WriteErrorSeen, &rdev->flags);
if (!test_and_set_bit(WantReplacement,
&rdev->flags))
@@ -1729,15 +2137,28 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
struct mddev *mddev = r1_bio->mddev;
struct r1conf *conf = mddev->private;
struct bio *bio = r1_bio->bios[r1_bio->read_disk];
+ struct page **pages = get_resync_pages(bio)->pages;
sector_t sect = r1_bio->sector;
int sectors = r1_bio->sectors;
int idx = 0;
+ struct md_rdev *rdev;
+
+ rdev = conf->mirrors[r1_bio->read_disk].rdev;
+ if (test_bit(FailFast, &rdev->flags)) {
+ /* Don't try recovering from here - just fail it
+ * ... unless it is the last working device of course */
+ md_error(mddev, rdev);
+ if (test_bit(Faulty, &rdev->flags))
+ /* Don't try to read from here, but make sure
+ * put_buf does it's thing
+ */
+ bio->bi_end_io = end_sync_write;
+ }
while(sectors) {
int s = sectors;
int d = r1_bio->read_disk;
int success = 0;
- struct md_rdev *rdev;
int start;
if (s > (PAGE_SIZE>>9))
@@ -1750,8 +2171,8 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
*/
rdev = conf->mirrors[d].rdev;
if (sync_page_io(rdev, sect, s<<9,
- bio->bi_io_vec[idx].bv_page,
- READ, false)) {
+ pages[idx],
+ REQ_OP_READ, false)) {
success = 1;
break;
}
@@ -1762,18 +2183,15 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
} while (!success && d != r1_bio->read_disk);
if (!success) {
- char b[BDEVNAME_SIZE];
int abort = 0;
/* Cannot read from anywhere, this block is lost.
* Record a bad block on each device. If that doesn't
* work just disable and interrupt the recovery.
* Don't fail devices as that won't really help.
*/
- printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error"
- " for block %llu\n",
- mdname(mddev),
- bdevname(bio->bi_bdev, b),
- (unsigned long long)r1_bio->sector);
+ pr_crit_ratelimited("md/raid1:%s: %pg: unrecoverable I/O read error for block %llu\n",
+ mdname(mddev), bio->bi_bdev,
+ (unsigned long long)r1_bio->sector);
for (d = 0; d < conf->raid_disks * 2; d++) {
rdev = conf->mirrors[d].rdev;
if (!rdev || test_bit(Faulty, &rdev->flags))
@@ -1781,14 +2199,9 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
if (!rdev_set_badblocks(rdev, sect, s, 0))
abort = 1;
}
- if (abort) {
- conf->recovery_disabled =
- mddev->recovery_disabled;
- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- md_done_sync(mddev, r1_bio->sectors, 0);
- put_buf(r1_bio);
+ if (abort)
return 0;
- }
+
/* Try next page */
sectors -= s;
sect += s;
@@ -1806,8 +2219,8 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
continue;
rdev = conf->mirrors[d].rdev;
if (r1_sync_page_io(rdev, sect, s,
- bio->bi_io_vec[idx].bv_page,
- WRITE) == 0) {
+ pages[idx],
+ REQ_OP_WRITE) == 0) {
r1_bio->bios[d]->bi_end_io = NULL;
rdev_dec_pending(rdev, mddev);
}
@@ -1821,8 +2234,8 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
continue;
rdev = conf->mirrors[d].rdev;
if (r1_sync_page_io(rdev, sect, s,
- bio->bi_io_vec[idx].bv_page,
- READ) != 0)
+ pages[idx],
+ REQ_OP_READ) != 0)
atomic_add(s, &rdev->corrected_errors);
}
sectors -= s;
@@ -1830,11 +2243,11 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
idx ++;
}
set_bit(R1BIO_Uptodate, &r1_bio->state);
- set_bit(BIO_UPTODATE, &bio->bi_flags);
+ bio->bi_status = 0;
return 1;
}
-static int process_checks(struct r1bio *r1_bio)
+static void process_checks(struct r1bio *r1_bio)
{
/* We have read all readable devices. If we haven't
* got the block, then there is no hope left.
@@ -1849,32 +2262,59 @@ static int process_checks(struct r1bio *r1_bio)
int i;
int vcnt;
+ /* Fix variable parts of all bios */
+ vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
+ for (i = 0; i < conf->raid_disks * 2; i++) {
+ blk_status_t status;
+ struct bio *b = r1_bio->bios[i];
+ struct resync_pages *rp = get_resync_pages(b);
+ if (b->bi_end_io != end_sync_read)
+ continue;
+ /* fixup the bio for reuse, but preserve errno */
+ status = b->bi_status;
+ bio_reset(b, conf->mirrors[i].rdev->bdev, REQ_OP_READ);
+ b->bi_status = status;
+ b->bi_iter.bi_sector = r1_bio->sector +
+ conf->mirrors[i].rdev->data_offset;
+ b->bi_end_io = end_sync_read;
+ rp->raid_bio = r1_bio;
+ b->bi_private = rp;
+
+ /* initialize bvec table again */
+ md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9);
+ }
for (primary = 0; primary < conf->raid_disks * 2; primary++)
if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
- test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
+ !r1_bio->bios[primary]->bi_status) {
r1_bio->bios[primary]->bi_end_io = NULL;
rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
break;
}
r1_bio->read_disk = primary;
- vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
for (i = 0; i < conf->raid_disks * 2; i++) {
- int j;
+ int j = 0;
struct bio *pbio = r1_bio->bios[primary];
struct bio *sbio = r1_bio->bios[i];
- int size;
+ blk_status_t status = sbio->bi_status;
+ struct page **ppages = get_resync_pages(pbio)->pages;
+ struct page **spages = get_resync_pages(sbio)->pages;
+ struct bio_vec *bi;
+ int page_len[RESYNC_PAGES] = { 0 };
+ struct bvec_iter_all iter_all;
if (sbio->bi_end_io != end_sync_read)
continue;
+ /* Now we can 'fixup' the error value */
+ sbio->bi_status = 0;
- if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) {
+ bio_for_each_segment_all(bi, sbio, iter_all)
+ page_len[j++] = bi->bv_len;
+
+ if (!status) {
for (j = vcnt; j-- ; ) {
- struct page *p, *s;
- p = pbio->bi_io_vec[j].bv_page;
- s = sbio->bi_io_vec[j].bv_page;
- if (memcmp(page_address(p),
- page_address(s),
- sbio->bi_io_vec[j].bv_len))
+ if (memcmp(page_address(ppages[j]),
+ page_address(spages[j]),
+ page_len[j]))
break;
}
} else
@@ -1882,37 +2322,15 @@ static int process_checks(struct r1bio *r1_bio)
if (j >= 0)
atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
- && test_bit(BIO_UPTODATE, &sbio->bi_flags))) {
+ && !status)) {
/* No need to write to this device. */
sbio->bi_end_io = NULL;
rdev_dec_pending(conf->mirrors[i].rdev, mddev);
continue;
}
- /* fixup the bio for reuse */
- bio_reset(sbio);
- sbio->bi_vcnt = vcnt;
- sbio->bi_size = r1_bio->sectors << 9;
- sbio->bi_sector = r1_bio->sector +
- conf->mirrors[i].rdev->data_offset;
- sbio->bi_bdev = conf->mirrors[i].rdev->bdev;
- sbio->bi_end_io = end_sync_read;
- sbio->bi_private = r1_bio;
-
- size = sbio->bi_size;
- for (j = 0; j < vcnt ; j++) {
- struct bio_vec *bi;
- bi = &sbio->bi_io_vec[j];
- bi->bv_offset = 0;
- if (size > PAGE_SIZE)
- bi->bv_len = PAGE_SIZE;
- else
- bi->bv_len = size;
- size -= PAGE_SIZE;
- }
bio_copy_data(sbio, pbio);
}
- return 0;
}
static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
@@ -1920,18 +2338,27 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
struct r1conf *conf = mddev->private;
int i;
int disks = conf->raid_disks * 2;
- struct bio *bio, *wbio;
-
- bio = r1_bio->bios[r1_bio->read_disk];
+ struct bio *wbio;
- if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
- /* ouch - failed to read all of that. */
- if (!fix_sync_read_error(r1_bio))
+ if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
+ /*
+ * ouch - failed to read all of that.
+ * No need to fix read error for check/repair
+ * because all member disks are read.
+ */
+ if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) ||
+ !fix_sync_read_error(r1_bio)) {
+ conf->recovery_disabled = mddev->recovery_disabled;
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ md_done_sync(mddev, r1_bio->sectors, 0);
+ put_buf(r1_bio);
return;
+ }
+ }
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
- if (process_checks(r1_bio) < 0)
- return;
+ process_checks(r1_bio);
+
/*
* schedule writes
*/
@@ -1943,26 +2370,22 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
(i == r1_bio->read_disk ||
!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
continue;
+ if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
+ abort_sync_write(mddev, r1_bio);
+ continue;
+ }
+
+ wbio->bi_opf = REQ_OP_WRITE;
+ if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
+ wbio->bi_opf |= MD_FAILFAST;
- wbio->bi_rw = WRITE;
wbio->bi_end_io = end_sync_write;
atomic_inc(&r1_bio->remaining);
- md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
- generic_make_request(wbio);
+ submit_bio_noacct(wbio);
}
- if (atomic_dec_and_test(&r1_bio->remaining)) {
- /* if we're here, all write(s) have completed, so clean up */
- int s = r1_bio->sectors;
- if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
- test_bit(R1BIO_WriteError, &r1_bio->state))
- reschedule_retry(r1_bio);
- else {
- put_buf(r1_bio);
- md_done_sync(mddev, s, 1);
- }
- }
+ put_sync_write_buf(r1_bio, 1);
}
/*
@@ -1973,45 +2396,48 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
* 3. Performs writes following reads for array synchronising.
*/
-static void fix_read_error(struct r1conf *conf, int read_disk,
- sector_t sect, int sectors)
+static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
{
+ sector_t sect = r1_bio->sector;
+ int sectors = r1_bio->sectors;
+ int read_disk = r1_bio->read_disk;
struct mddev *mddev = conf->mddev;
+ struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
+
+ if (exceed_read_errors(mddev, rdev)) {
+ r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
+ return;
+ }
+
while(sectors) {
int s = sectors;
int d = read_disk;
int success = 0;
int start;
- struct md_rdev *rdev;
if (s > (PAGE_SIZE>>9))
s = PAGE_SIZE >> 9;
do {
- /* Note: no rcu protection needed here
- * as this is synchronous in the raid1d thread
- * which is the thread that might remove
- * a device. If raid1d ever becomes multi-threaded....
- */
- sector_t first_bad;
- int bad_sectors;
-
rdev = conf->mirrors[d].rdev;
if (rdev &&
(test_bit(In_sync, &rdev->flags) ||
(!test_bit(Faulty, &rdev->flags) &&
rdev->recovery_offset >= sect + s)) &&
- is_badblock(rdev, sect, s,
- &first_bad, &bad_sectors) == 0 &&
- sync_page_io(rdev, sect, s<<9,
- conf->tmppage, READ, false))
- success = 1;
- else {
- d++;
- if (d == conf->raid_disks * 2)
- d = 0;
+ rdev_has_badblock(rdev, sect, s) == 0) {
+ atomic_inc(&rdev->nr_pending);
+ if (sync_page_io(rdev, sect, s<<9,
+ conf->tmppage, REQ_OP_READ, false))
+ success = 1;
+ rdev_dec_pending(rdev, mddev);
+ if (success)
+ break;
}
- } while (!success && d != read_disk);
+
+ d++;
+ if (d == conf->raid_disks * 2)
+ d = 0;
+ } while (d != read_disk);
if (!success) {
/* Cannot read from anywhere - mark it bad */
@@ -2028,30 +2454,32 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
d--;
rdev = conf->mirrors[d].rdev;
if (rdev &&
- test_bit(In_sync, &rdev->flags))
+ !test_bit(Faulty, &rdev->flags)) {
+ atomic_inc(&rdev->nr_pending);
r1_sync_page_io(rdev, sect, s,
- conf->tmppage, WRITE);
+ conf->tmppage, REQ_OP_WRITE);
+ rdev_dec_pending(rdev, mddev);
+ }
}
d = start;
while (d != read_disk) {
- char b[BDEVNAME_SIZE];
if (d==0)
d = conf->raid_disks * 2;
d--;
rdev = conf->mirrors[d].rdev;
if (rdev &&
- test_bit(In_sync, &rdev->flags)) {
+ !test_bit(Faulty, &rdev->flags)) {
+ atomic_inc(&rdev->nr_pending);
if (r1_sync_page_io(rdev, sect, s,
- conf->tmppage, READ)) {
+ conf->tmppage, REQ_OP_READ)) {
atomic_add(s, &rdev->corrected_errors);
- printk(KERN_INFO
- "md/raid1:%s: read error corrected "
- "(%d sectors at %llu on %s)\n",
- mdname(mddev), s,
- (unsigned long long)(sect +
- rdev->data_offset),
- bdevname(rdev->bdev, b));
+ pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %pg)\n",
+ mdname(mddev), s,
+ (unsigned long long)(sect +
+ rdev->data_offset),
+ rdev->bdev);
}
+ rdev_dec_pending(rdev, mddev);
}
}
sectors -= s;
@@ -2059,7 +2487,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
}
}
-static int narrow_write_error(struct r1bio *r1_bio, int i)
+static bool narrow_write_error(struct r1bio *r1_bio, int i)
{
struct mddev *mddev = r1_bio->mddev;
struct r1conf *conf = mddev->private;
@@ -2080,12 +2508,13 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
sector_t sector;
int sectors;
int sect_to_write = r1_bio->sectors;
- int ok = 1;
+ bool ok = true;
if (rdev->badblocks.shift < 0)
- return 0;
+ return false;
- block_sectors = 1 << rdev->badblocks.shift;
+ block_sectors = roundup(1 << rdev->badblocks.shift,
+ bdev_logical_block_size(rdev->bdev) >> 9);
sector = r1_bio->sector;
sectors = ((sector + block_sectors)
& ~(sector_t)(block_sectors - 1))
@@ -2098,30 +2527,22 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
/* Write at 'sector' for 'sectors'*/
if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
- unsigned vcnt = r1_bio->behind_page_count;
- struct bio_vec *vec = r1_bio->behind_bvecs;
-
- while (!vec->bv_page) {
- vec++;
- vcnt--;
- }
-
- wbio = bio_alloc_mddev(GFP_NOIO, vcnt, mddev);
- memcpy(wbio->bi_io_vec, vec, vcnt * sizeof(struct bio_vec));
-
- wbio->bi_vcnt = vcnt;
+ wbio = bio_alloc_clone(rdev->bdev,
+ r1_bio->behind_master_bio,
+ GFP_NOIO, &mddev->bio_set);
} else {
- wbio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
+ wbio = bio_alloc_clone(rdev->bdev, r1_bio->master_bio,
+ GFP_NOIO, &mddev->bio_set);
}
- wbio->bi_rw = WRITE;
- wbio->bi_sector = r1_bio->sector;
- wbio->bi_size = r1_bio->sectors << 9;
+ wbio->bi_opf = REQ_OP_WRITE;
+ wbio->bi_iter.bi_sector = r1_bio->sector;
+ wbio->bi_iter.bi_size = r1_bio->sectors << 9;
+
+ bio_trim(wbio, sector - r1_bio->sector, sectors);
+ wbio->bi_iter.bi_sector += rdev->data_offset;
- md_trim_bio(wbio, sector - r1_bio->sector, sectors);
- wbio->bi_sector += rdev->data_offset;
- wbio->bi_bdev = rdev->bdev;
- if (submit_bio_wait(WRITE, wbio) == 0)
+ if (submit_bio_wait(wbio) < 0)
/* failure! */
ok = rdev_set_badblocks(rdev, sector,
sectors, 0)
@@ -2144,11 +2565,11 @@ static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio
struct bio *bio = r1_bio->bios[m];
if (bio->bi_end_io == NULL)
continue;
- if (test_bit(BIO_UPTODATE, &bio->bi_flags) &&
+ if (!bio->bi_status &&
test_bit(R1BIO_MadeGood, &r1_bio->state)) {
rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
}
- if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
+ if (bio->bi_status &&
test_bit(R1BIO_WriteError, &r1_bio->state)) {
if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
md_error(conf->mddev, rdev);
@@ -2160,7 +2581,9 @@ static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio
static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
{
- int m;
+ int m, idx;
+ bool fail = false;
+
for (m = 0; m < conf->raid_disks * 2 ; m++)
if (r1_bio->bios[m] == IO_MADE_GOOD) {
struct md_rdev *rdev = conf->mirrors[m].rdev;
@@ -2173,28 +2596,39 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
* narrow down and record precise write
* errors.
*/
- if (!narrow_write_error(r1_bio, m)) {
+ fail = true;
+ if (!narrow_write_error(r1_bio, m))
md_error(conf->mddev,
conf->mirrors[m].rdev);
/* an I/O failed, we can't clear the bitmap */
- set_bit(R1BIO_Degraded, &r1_bio->state);
- }
rdev_dec_pending(conf->mirrors[m].rdev,
conf->mddev);
}
- if (test_bit(R1BIO_WriteError, &r1_bio->state))
- close_write(r1_bio);
- raid_end_bio_io(r1_bio);
+ if (fail) {
+ spin_lock_irq(&conf->device_lock);
+ list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
+ idx = sector_to_idx(r1_bio->sector);
+ atomic_inc(&conf->nr_queued[idx]);
+ spin_unlock_irq(&conf->device_lock);
+ /*
+ * In case freeze_array() is waiting for condition
+ * get_unqueued_pending() == extra to be true.
+ */
+ wake_up(&conf->wait_barrier);
+ md_wakeup_thread(conf->mddev->thread);
+ } else {
+ if (test_bit(R1BIO_WriteError, &r1_bio->state))
+ close_write(r1_bio);
+ raid_end_bio_io(r1_bio);
+ }
}
static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
{
- int disk;
- int max_sectors;
struct mddev *mddev = conf->mddev;
struct bio *bio;
- char b[BDEVNAME_SIZE];
struct md_rdev *rdev;
+ sector_t sector;
clear_bit(R1BIO_ReadError, &r1_bio->state);
/* we got a read error. Maybe the drive is bad. Maybe just
@@ -2205,76 +2639,31 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
* This is all done synchronously while the array is
* frozen
*/
- if (mddev->ro == 0) {
- freeze_array(conf, 1);
- fix_read_error(conf, r1_bio->read_disk,
- r1_bio->sector, r1_bio->sectors);
- unfreeze_array(conf);
- } else
- md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
- rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev);
bio = r1_bio->bios[r1_bio->read_disk];
- bdevname(bio->bi_bdev, b);
-read_more:
- disk = read_balance(conf, r1_bio, &max_sectors);
- if (disk == -1) {
- printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O"
- " read error for block %llu\n",
- mdname(mddev), b, (unsigned long long)r1_bio->sector);
- raid_end_bio_io(r1_bio);
- } else {
- const unsigned long do_sync
- = r1_bio->master_bio->bi_rw & REQ_SYNC;
- if (bio) {
- r1_bio->bios[r1_bio->read_disk] =
- mddev->ro ? IO_BLOCKED : NULL;
- bio_put(bio);
- }
- r1_bio->read_disk = disk;
- bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
- md_trim_bio(bio, r1_bio->sector - bio->bi_sector, max_sectors);
- r1_bio->bios[r1_bio->read_disk] = bio;
- rdev = conf->mirrors[disk].rdev;
- printk_ratelimited(KERN_ERR
- "md/raid1:%s: redirecting sector %llu"
- " to other mirror: %s\n",
- mdname(mddev),
- (unsigned long long)r1_bio->sector,
- bdevname(rdev->bdev, b));
- bio->bi_sector = r1_bio->sector + rdev->data_offset;
- bio->bi_bdev = rdev->bdev;
- bio->bi_end_io = raid1_end_read_request;
- bio->bi_rw = READ | do_sync;
- bio->bi_private = r1_bio;
- if (max_sectors < r1_bio->sectors) {
- /* Drat - have to split this up more */
- struct bio *mbio = r1_bio->master_bio;
- int sectors_handled = (r1_bio->sector + max_sectors
- - mbio->bi_sector);
- r1_bio->sectors = max_sectors;
- spin_lock_irq(&conf->device_lock);
- if (mbio->bi_phys_segments == 0)
- mbio->bi_phys_segments = 2;
- else
- mbio->bi_phys_segments++;
- spin_unlock_irq(&conf->device_lock);
- generic_make_request(bio);
- bio = NULL;
+ bio_put(bio);
+ r1_bio->bios[r1_bio->read_disk] = NULL;
- r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
+ rdev = conf->mirrors[r1_bio->read_disk].rdev;
+ if (mddev->ro == 0
+ && !test_bit(FailFast, &rdev->flags)) {
+ freeze_array(conf, 1);
+ fix_read_error(conf, r1_bio);
+ unfreeze_array(conf);
+ } else if (mddev->ro == 0 && test_bit(FailFast, &rdev->flags)) {
+ md_error(mddev, rdev);
+ } else {
+ r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
+ }
- r1_bio->master_bio = mbio;
- r1_bio->sectors = bio_sectors(mbio) - sectors_handled;
- r1_bio->state = 0;
- set_bit(R1BIO_ReadError, &r1_bio->state);
- r1_bio->mddev = mddev;
- r1_bio->sector = mbio->bi_sector + sectors_handled;
+ rdev_dec_pending(rdev, conf->mddev);
+ sector = r1_bio->sector;
+ bio = r1_bio->master_bio;
- goto read_more;
- } else
- generic_make_request(bio);
- }
+ /* Reuse the old r1_bio so that the IO_BLOCKED settings are preserved */
+ r1_bio->state = 0;
+ raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio);
+ allow_barrier(conf, sector);
}
static void raid1d(struct md_thread *thread)
@@ -2285,9 +2674,29 @@ static void raid1d(struct md_thread *thread)
struct r1conf *conf = mddev->private;
struct list_head *head = &conf->retry_list;
struct blk_plug plug;
+ int idx;
md_check_recovery(mddev);
+ if (!list_empty_careful(&conf->bio_end_io_list) &&
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
+ LIST_HEAD(tmp);
+ spin_lock_irqsave(&conf->device_lock, flags);
+ if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
+ list_splice_init(&conf->bio_end_io_list, &tmp);
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ while (!list_empty(&tmp)) {
+ r1_bio = list_first_entry(&tmp, struct r1bio,
+ retry_list);
+ list_del(&r1_bio->retry_list);
+ idx = sector_to_idx(r1_bio->sector);
+ atomic_dec(&conf->nr_queued[idx]);
+ if (test_bit(R1BIO_WriteError, &r1_bio->state))
+ close_write(r1_bio);
+ raid_end_bio_io(r1_bio);
+ }
+ }
+
blk_start_plug(&plug);
for (;;) {
@@ -2300,7 +2709,8 @@ static void raid1d(struct md_thread *thread)
}
r1_bio = list_entry(head->prev, struct r1bio, retry_list);
list_del(head->prev);
- conf->nr_queued--;
+ idx = sector_to_idx(r1_bio->sector);
+ atomic_dec(&conf->nr_queued[idx]);
spin_unlock_irqrestore(&conf->device_lock, flags);
mddev = r1_bio->mddev;
@@ -2317,31 +2727,41 @@ static void raid1d(struct md_thread *thread)
else if (test_bit(R1BIO_ReadError, &r1_bio->state))
handle_read_error(conf, r1_bio);
else
- /* just a partial read to be scheduled from separate
- * context
- */
- generic_make_request(r1_bio->bios[r1_bio->read_disk]);
+ WARN_ON_ONCE(1);
cond_resched();
- if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
+ if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
md_check_recovery(mddev);
}
blk_finish_plug(&plug);
}
-
static int init_resync(struct r1conf *conf)
{
int buffs;
buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
- BUG_ON(conf->r1buf_pool);
- conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
- conf->poolinfo);
- if (!conf->r1buf_pool)
- return -ENOMEM;
- conf->next_resync = 0;
- return 0;
+ BUG_ON(mempool_initialized(&conf->r1buf_pool));
+
+ return mempool_init(&conf->r1buf_pool, buffs, r1buf_pool_alloc,
+ r1buf_pool_free, conf);
+}
+
+static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
+{
+ struct r1bio *r1bio = mempool_alloc(&conf->r1buf_pool, GFP_NOIO);
+ struct resync_pages *rps;
+ struct bio *bio;
+ int i;
+
+ for (i = conf->raid_disks * 2; i--; ) {
+ bio = r1bio->bios[i];
+ rps = bio->bi_private;
+ bio_reset(bio, NULL, 0);
+ bio->bi_private = rps;
+ }
+ r1bio->master_bio = NULL;
+ return r1bio;
}
/*
@@ -2354,26 +2774,28 @@ static int init_resync(struct r1conf *conf)
* that can be installed to exclude normal IO requests.
*/
-static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster)
+static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
+ sector_t max_sector, int *skipped)
{
struct r1conf *conf = mddev->private;
struct r1bio *r1_bio;
struct bio *bio;
- sector_t max_sector, nr_sectors;
+ sector_t nr_sectors;
int disk = -1;
int i;
int wonly = -1;
int write_targets = 0, read_targets = 0;
sector_t sync_blocks;
- int still_degraded = 0;
+ bool still_degraded = false;
int good_sectors = RESYNC_SECTORS;
int min_bad = 0; /* number of sectors that are bad in all devices */
+ int idx = sector_to_idx(sector_nr);
+ int page_idx = 0;
- if (!conf->r1buf_pool)
+ if (!mempool_initialized(&conf->r1buf_pool))
if (init_resync(conf))
return 0;
- max_sector = mddev->dev_sectors;
if (sector_nr >= max_sector) {
/* If we aborted, we need to abort the
* sync on the 'current' bitmap chunk (there will
@@ -2381,18 +2803,24 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
* We can find the current addess in mddev->curr_resync
*/
if (mddev->curr_resync < max_sector) /* aborted */
- bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
- &sync_blocks, 1);
+ md_bitmap_end_sync(mddev, mddev->curr_resync,
+ &sync_blocks);
else /* completed sync */
conf->fullsync = 0;
- bitmap_close_sync(mddev->bitmap);
+ if (md_bitmap_enabled(mddev, false))
+ mddev->bitmap_ops->close_sync(mddev);
close_sync(conf);
+
+ if (mddev_is_clustered(mddev)) {
+ conf->cluster_sync_low = 0;
+ conf->cluster_sync_high = 0;
+ }
return 0;
}
if (mddev->bitmap == NULL &&
- mddev->recovery_cp == MaxSector &&
+ mddev->resync_offset == MaxSector &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
conf->fullsync == 0) {
*skipped = 1;
@@ -2401,27 +2829,34 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
/* before building a request, check if we can skip these blocks..
* This call the bitmap_start_sync doesn't actually record anything
*/
- if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
+ if (!md_bitmap_start_sync(mddev, sector_nr, &sync_blocks, true) &&
!conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
/* We can skip this block, and probably several more */
*skipped = 1;
return sync_blocks;
}
+
/*
- * If there is non-resync activity waiting for a turn,
- * and resync is going fast enough,
- * then let it though before starting on this new sync request.
+ * If there is non-resync activity waiting for a turn, then let it
+ * though before starting on this new sync request.
*/
- if (!go_faster && conf->nr_waiting)
- msleep_interruptible(1000);
+ if (atomic_read(&conf->nr_waiting[idx]))
+ schedule_timeout_uninterruptible(1);
- bitmap_cond_end_sync(mddev->bitmap, sector_nr);
- r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
- raise_barrier(conf);
+ /* we are incrementing sector_nr below. To be safe, we check against
+ * sector_nr + two times RESYNC_SECTORS
+ */
+ if (md_bitmap_enabled(mddev, false))
+ mddev->bitmap_ops->cond_end_sync(mddev, sector_nr,
+ mddev_is_clustered(mddev) &&
+ (sector_nr + 2 * RESYNC_SECTORS >
+ conf->cluster_sync_high));
+
+ if (raise_barrier(conf, sector_nr))
+ return 0;
- conf->next_resync = sector_nr;
+ r1_bio = raid1_alloc_init_r1buf(conf);
- rcu_read_lock();
/*
* If we get a correctably read error during resync or recovery,
* we might want to read from a different device. So we
@@ -2435,25 +2870,26 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
r1_bio->sector = sector_nr;
r1_bio->state = 0;
set_bit(R1BIO_IsSync, &r1_bio->state);
+ /* make sure good_sectors won't go across barrier unit boundary */
+ good_sectors = align_to_barrier_unit_end(sector_nr, good_sectors);
for (i = 0; i < conf->raid_disks * 2; i++) {
struct md_rdev *rdev;
bio = r1_bio->bios[i];
- bio_reset(bio);
- rdev = rcu_dereference(conf->mirrors[i].rdev);
+ rdev = conf->mirrors[i].rdev;
if (rdev == NULL ||
test_bit(Faulty, &rdev->flags)) {
if (i < conf->raid_disks)
- still_degraded = 1;
+ still_degraded = true;
} else if (!test_bit(In_sync, &rdev->flags)) {
- bio->bi_rw = WRITE;
+ bio->bi_opf = REQ_OP_WRITE;
bio->bi_end_io = end_sync_write;
write_targets ++;
} else {
/* may need to read from here */
sector_t first_bad = MaxSector;
- int bad_sectors;
+ sector_t bad_sectors;
if (is_badblock(rdev, sector_nr, good_sectors,
&first_bad, &bad_sectors)) {
@@ -2474,7 +2910,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
if (disk < 0)
disk = i;
}
- bio->bi_rw = READ;
+ bio->bi_opf = REQ_OP_READ;
bio->bi_end_io = end_sync_read;
read_targets++;
} else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
@@ -2486,19 +2922,19 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
* if we are doing resync or repair. Otherwise, leave
* this device alone for this sync request.
*/
- bio->bi_rw = WRITE;
+ bio->bi_opf = REQ_OP_WRITE;
bio->bi_end_io = end_sync_write;
write_targets++;
}
}
- if (bio->bi_end_io) {
+ if (rdev && bio->bi_end_io) {
atomic_inc(&rdev->nr_pending);
- bio->bi_sector = sector_nr + rdev->data_offset;
- bio->bi_bdev = rdev->bdev;
- bio->bi_private = r1_bio;
+ bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
+ bio_set_dev(bio, rdev->bdev);
+ if (test_bit(FailFast, &rdev->flags))
+ bio->bi_opf |= MD_FAILFAST;
}
}
- rcu_read_unlock();
if (disk < 0)
disk = wonly;
r1_bio->read_disk = disk;
@@ -2515,7 +2951,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
min_bad, 0
) && ok;
}
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
*skipped = 1;
put_buf(r1_bio);
@@ -2569,44 +3005,47 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
if (len == 0)
break;
if (sync_blocks == 0) {
- if (!bitmap_start_sync(mddev->bitmap, sector_nr,
- &sync_blocks, still_degraded) &&
+ if (!md_bitmap_start_sync(mddev, sector_nr,
+ &sync_blocks, still_degraded) &&
!conf->fullsync &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
break;
- BUG_ON(sync_blocks < (PAGE_SIZE>>9));
if ((len >> 9) > sync_blocks)
len = sync_blocks<<9;
}
for (i = 0 ; i < conf->raid_disks * 2; i++) {
+ struct resync_pages *rp;
+
bio = r1_bio->bios[i];
+ rp = get_resync_pages(bio);
if (bio->bi_end_io) {
- page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
- if (bio_add_page(bio, page, len, 0) == 0) {
- /* stop here */
- bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
- while (i > 0) {
- i--;
- bio = r1_bio->bios[i];
- if (bio->bi_end_io==NULL)
- continue;
- /* remove last page from this bio */
- bio->bi_vcnt--;
- bio->bi_size -= len;
- bio->bi_flags &= ~(1<< BIO_SEG_VALID);
- }
- goto bio_full;
- }
+ page = resync_fetch_page(rp, page_idx);
+
+ /*
+ * won't fail because the vec table is big
+ * enough to hold all these pages
+ */
+ __bio_add_page(bio, page, len, 0);
}
}
nr_sectors += len>>9;
sector_nr += len>>9;
sync_blocks -= (len>>9);
- } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES);
- bio_full:
+ } while (++page_idx < RESYNC_PAGES);
+
r1_bio->sectors = nr_sectors;
+ if (mddev_is_clustered(mddev) &&
+ conf->cluster_sync_high < sector_nr + nr_sectors) {
+ conf->cluster_sync_low = mddev->curr_resync_completed;
+ conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS;
+ /* Send resync message */
+ mddev->cluster_ops->resync_info_update(mddev,
+ conf->cluster_sync_low,
+ conf->cluster_sync_high);
+ }
+
/* For a user-requested sync, we read all readable devices and do a
* compare
*/
@@ -2616,16 +3055,17 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
bio = r1_bio->bios[i];
if (bio->bi_end_io == end_sync_read) {
read_targets--;
- md_sync_acct(bio->bi_bdev, nr_sectors);
- generic_make_request(bio);
+ if (read_targets == 1)
+ bio->bi_opf &= ~MD_FAILFAST;
+ submit_bio_noacct(bio);
}
}
} else {
atomic_set(&r1_bio->remaining, 1);
bio = r1_bio->bios[r1_bio->read_disk];
- md_sync_acct(bio->bi_bdev, nr_sectors);
- generic_make_request(bio);
-
+ if (read_targets == 1)
+ bio->bi_opf &= ~MD_FAILFAST;
+ submit_bio_noacct(bio);
}
return nr_sectors;
}
@@ -2644,15 +3084,36 @@ static struct r1conf *setup_conf(struct mddev *mddev)
int i;
struct raid1_info *disk;
struct md_rdev *rdev;
+ size_t r1bio_size;
int err = -ENOMEM;
conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
if (!conf)
goto abort;
- conf->mirrors = kzalloc(sizeof(struct raid1_info)
- * mddev->raid_disks * 2,
- GFP_KERNEL);
+ conf->nr_pending = kcalloc(BARRIER_BUCKETS_NR,
+ sizeof(atomic_t), GFP_KERNEL);
+ if (!conf->nr_pending)
+ goto abort;
+
+ conf->nr_waiting = kcalloc(BARRIER_BUCKETS_NR,
+ sizeof(atomic_t), GFP_KERNEL);
+ if (!conf->nr_waiting)
+ goto abort;
+
+ conf->nr_queued = kcalloc(BARRIER_BUCKETS_NR,
+ sizeof(atomic_t), GFP_KERNEL);
+ if (!conf->nr_queued)
+ goto abort;
+
+ conf->barrier = kcalloc(BARRIER_BUCKETS_NR,
+ sizeof(atomic_t), GFP_KERNEL);
+ if (!conf->barrier)
+ goto abort;
+
+ conf->mirrors = kzalloc(array3_size(sizeof(struct raid1_info),
+ mddev->raid_disks, 2),
+ GFP_KERNEL);
if (!conf->mirrors)
goto abort;
@@ -2660,50 +3121,36 @@ static struct r1conf *setup_conf(struct mddev *mddev)
if (!conf->tmppage)
goto abort;
- conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
- if (!conf->poolinfo)
- goto abort;
- conf->poolinfo->raid_disks = mddev->raid_disks * 2;
- conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
- r1bio_pool_free,
- conf->poolinfo);
+ r1bio_size = offsetof(struct r1bio, bios[mddev->raid_disks * 2]);
+ conf->r1bio_pool = mempool_create_kmalloc_pool(NR_RAID_BIOS, r1bio_size);
if (!conf->r1bio_pool)
goto abort;
- conf->poolinfo->mddev = mddev;
+ err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
+ if (err)
+ goto abort;
err = -EINVAL;
spin_lock_init(&conf->device_lock);
+ conf->raid_disks = mddev->raid_disks;
rdev_for_each(rdev, mddev) {
- struct request_queue *q;
int disk_idx = rdev->raid_disk;
- if (disk_idx >= mddev->raid_disks
- || disk_idx < 0)
+
+ if (disk_idx >= conf->raid_disks || disk_idx < 0)
continue;
- if (test_bit(Replacement, &rdev->flags))
- disk = conf->mirrors + mddev->raid_disks + disk_idx;
- else
- disk = conf->mirrors + disk_idx;
- if (disk->rdev)
+ if (!raid1_add_conf(conf, rdev, disk_idx,
+ test_bit(Replacement, &rdev->flags)))
goto abort;
- disk->rdev = rdev;
- q = bdev_get_queue(rdev->bdev);
- if (q->merge_bvec_fn)
- mddev->merge_check_needed = 1;
-
- disk->head_position = 0;
- disk->seq_start = MaxSector;
}
- conf->raid_disks = mddev->raid_disks;
conf->mddev = mddev;
INIT_LIST_HEAD(&conf->retry_list);
+ INIT_LIST_HEAD(&conf->bio_end_io_list);
spin_lock_init(&conf->resync_lock);
init_waitqueue_head(&conf->wait_barrier);
bio_list_init(&conf->pending_bio_list);
- conf->pending_count = 0;
conf->recovery_disabled = mddev->recovery_disabled - 1;
err = -EIO;
@@ -2736,51 +3183,65 @@ static struct r1conf *setup_conf(struct mddev *mddev)
}
err = -ENOMEM;
- conf->thread = md_register_thread(raid1d, mddev, "raid1");
- if (!conf->thread) {
- printk(KERN_ERR
- "md/raid1:%s: couldn't allocate thread\n",
- mdname(mddev));
+ rcu_assign_pointer(conf->thread,
+ md_register_thread(raid1d, mddev, "raid1"));
+ if (!conf->thread)
goto abort;
- }
return conf;
abort:
if (conf) {
- if (conf->r1bio_pool)
- mempool_destroy(conf->r1bio_pool);
+ mempool_destroy(conf->r1bio_pool);
kfree(conf->mirrors);
safe_put_page(conf->tmppage);
- kfree(conf->poolinfo);
+ kfree(conf->nr_pending);
+ kfree(conf->nr_waiting);
+ kfree(conf->nr_queued);
+ kfree(conf->barrier);
+ bioset_exit(&conf->bio_split);
kfree(conf);
}
return ERR_PTR(err);
}
-static int stop(struct mddev *mddev);
-static int run(struct mddev *mddev)
+static int raid1_set_limits(struct mddev *mddev)
+{
+ struct queue_limits lim;
+ int err;
+
+ md_init_stacking_limits(&lim);
+ lim.max_write_zeroes_sectors = 0;
+ lim.max_hw_wzeroes_unmap_sectors = 0;
+ lim.logical_block_size = mddev->logical_block_size;
+ lim.features |= BLK_FEAT_ATOMIC_WRITES;
+ err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
+ if (err)
+ return err;
+ return queue_limits_set(mddev->gendisk->queue, &lim);
+}
+
+static int raid1_run(struct mddev *mddev)
{
struct r1conf *conf;
int i;
- struct md_rdev *rdev;
int ret;
- bool discard_supported = false;
if (mddev->level != 1) {
- printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n",
- mdname(mddev), mddev->level);
+ pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n",
+ mdname(mddev), mddev->level);
return -EIO;
}
if (mddev->reshape_position != MaxSector) {
- printk(KERN_ERR "md/raid1:%s: reshape_position set but not supported\n",
- mdname(mddev));
+ pr_warn("md/raid1:%s: reshape_position set but not supported\n",
+ mdname(mddev));
return -EIO;
}
+
/*
* copy the already verified devices into our private RAID1
* bookkeeping area. [whatever we allocate in run(),
- * should be freed in stop()]
+ * should be freed in raid1_free()]
*/
if (mddev->private == NULL)
conf = setup_conf(mddev);
@@ -2790,91 +3251,70 @@ static int run(struct mddev *mddev)
if (IS_ERR(conf))
return PTR_ERR(conf);
- if (mddev->queue)
- blk_queue_max_write_same_sectors(mddev->queue, 0);
-
- rdev_for_each(rdev, mddev) {
- if (!mddev->gendisk)
- continue;
- disk_stack_limits(mddev->gendisk, rdev->bdev,
- rdev->data_offset << 9);
- if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
- discard_supported = true;
+ if (!mddev_is_dm(mddev)) {
+ ret = raid1_set_limits(mddev);
+ if (ret) {
+ if (!mddev->private)
+ raid1_free(mddev, conf);
+ return ret;
+ }
}
mddev->degraded = 0;
- for (i=0; i < conf->raid_disks; i++)
+ for (i = 0; i < conf->raid_disks; i++)
if (conf->mirrors[i].rdev == NULL ||
!test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
test_bit(Faulty, &conf->mirrors[i].rdev->flags))
mddev->degraded++;
+ /*
+ * RAID1 needs at least one disk in active
+ */
+ if (conf->raid_disks - mddev->degraded < 1) {
+ md_unregister_thread(mddev, &conf->thread);
+ if (!mddev->private)
+ raid1_free(mddev, conf);
+ return -EINVAL;
+ }
if (conf->raid_disks - mddev->degraded == 1)
- mddev->recovery_cp = MaxSector;
-
- if (mddev->recovery_cp != MaxSector)
- printk(KERN_NOTICE "md/raid1:%s: not clean"
- " -- starting background reconstruction\n",
- mdname(mddev));
- printk(KERN_INFO
- "md/raid1:%s: active with %d out of %d mirrors\n",
- mdname(mddev), mddev->raid_disks - mddev->degraded,
+ mddev->resync_offset = MaxSector;
+
+ if (mddev->resync_offset != MaxSector)
+ pr_info("md/raid1:%s: not clean -- starting background reconstruction\n",
+ mdname(mddev));
+ pr_info("md/raid1:%s: active with %d out of %d mirrors\n",
+ mdname(mddev), mddev->raid_disks - mddev->degraded,
mddev->raid_disks);
/*
* Ok, everything is just fine now
*/
- mddev->thread = conf->thread;
- conf->thread = NULL;
+ rcu_assign_pointer(mddev->thread, conf->thread);
+ rcu_assign_pointer(conf->thread, NULL);
mddev->private = conf;
+ set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
- if (mddev->queue) {
- mddev->queue->backing_dev_info.congested_fn = raid1_congested;
- mddev->queue->backing_dev_info.congested_data = mddev;
- blk_queue_merge_bvec(mddev->queue, raid1_mergeable_bvec);
-
- if (discard_supported)
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
- mddev->queue);
- else
- queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
- mddev->queue);
- }
-
- ret = md_integrity_register(mddev);
+ ret = md_integrity_register(mddev);
if (ret)
- stop(mddev);
+ md_unregister_thread(mddev, &mddev->thread);
return ret;
}
-static int stop(struct mddev *mddev)
+static void raid1_free(struct mddev *mddev, void *priv)
{
- struct r1conf *conf = mddev->private;
- struct bitmap *bitmap = mddev->bitmap;
+ struct r1conf *conf = priv;
- /* wait for behind writes to complete */
- if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
- printk(KERN_INFO "md/raid1:%s: behind writes in progress - waiting to stop.\n",
- mdname(mddev));
- /* need to kick something here to make sure I/O goes? */
- wait_event(bitmap->behind_wait,
- atomic_read(&bitmap->behind_writes) == 0);
- }
-
- raise_barrier(conf);
- lower_barrier(conf);
-
- md_unregister_thread(&mddev->thread);
- if (conf->r1bio_pool)
- mempool_destroy(conf->r1bio_pool);
+ mempool_destroy(conf->r1bio_pool);
kfree(conf->mirrors);
safe_put_page(conf->tmppage);
- kfree(conf->poolinfo);
+ kfree(conf->nr_pending);
+ kfree(conf->nr_waiting);
+ kfree(conf->nr_queued);
+ kfree(conf->barrier);
+ bioset_exit(&conf->bio_split);
kfree(conf);
- mddev->private = NULL;
- return 0;
}
static int raid1_resize(struct mddev *mddev, sector_t sectors)
@@ -2887,20 +3327,22 @@ static int raid1_resize(struct mddev *mddev, sector_t sectors)
* worth it.
*/
sector_t newsize = raid1_size(mddev, sectors, 0);
+
if (mddev->external_size &&
mddev->array_sectors > newsize)
return -EINVAL;
- if (mddev->bitmap) {
- int ret = bitmap_resize(mddev->bitmap, newsize, 0, 0);
+
+ if (md_bitmap_enabled(mddev, false)) {
+ int ret = mddev->bitmap_ops->resize(mddev, newsize, 0);
+
if (ret)
return ret;
}
+
md_set_array_sectors(mddev, newsize);
- set_capacity(mddev->gendisk, mddev->array_sectors);
- revalidate_disk(mddev->gendisk);
if (sectors > mddev->dev_sectors &&
- mddev->recovery_cp > mddev->dev_sectors) {
- mddev->recovery_cp = mddev->dev_sectors;
+ mddev->resync_offset > mddev->dev_sectors) {
+ mddev->resync_offset = mddev->dev_sectors;
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
}
mddev->dev_sectors = sectors;
@@ -2922,12 +3364,12 @@ static int raid1_reshape(struct mddev *mddev)
* devices have the higher raid_disk numbers.
*/
mempool_t *newpool, *oldpool;
- struct pool_info *newpoolinfo;
+ size_t new_r1bio_size;
struct raid1_info *newmirrors;
struct r1conf *conf = mddev->private;
int cnt, raid_disks;
unsigned long flags;
- int d, d2, err;
+ int d, d2;
/* Cannot change chunk_size, layout, or level */
if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
@@ -2939,9 +3381,8 @@ static int raid1_reshape(struct mddev *mddev)
return -EINVAL;
}
- err = md_allow_write(mddev);
- if (err)
- return err;
+ if (!mddev_is_clustered(mddev))
+ md_allow_write(mddev);
raid_disks = mddev->raid_disks + mddev->delta_disks;
@@ -2954,22 +3395,15 @@ static int raid1_reshape(struct mddev *mddev)
return -EBUSY;
}
- newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
- if (!newpoolinfo)
- return -ENOMEM;
- newpoolinfo->mddev = mddev;
- newpoolinfo->raid_disks = raid_disks * 2;
-
- newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
- r1bio_pool_free, newpoolinfo);
+ new_r1bio_size = offsetof(struct r1bio, bios[raid_disks * 2]);
+ newpool = mempool_create_kmalloc_pool(NR_RAID_BIOS, new_r1bio_size);
if (!newpool) {
- kfree(newpoolinfo);
return -ENOMEM;
}
- newmirrors = kzalloc(sizeof(struct raid1_info) * raid_disks * 2,
+ newmirrors = kzalloc(array3_size(sizeof(struct raid1_info),
+ raid_disks, 2),
GFP_KERNEL);
if (!newmirrors) {
- kfree(newpoolinfo);
mempool_destroy(newpool);
return -ENOMEM;
}
@@ -2987,17 +3421,14 @@ static int raid1_reshape(struct mddev *mddev)
rdev->raid_disk = d2;
sysfs_unlink_rdev(mddev, rdev);
if (sysfs_link_rdev(mddev, rdev))
- printk(KERN_WARNING
- "md/raid1:%s: cannot register rd%d\n",
- mdname(mddev), rdev->raid_disk);
+ pr_warn("md/raid1:%s: cannot register rd%d\n",
+ mdname(mddev), rdev->raid_disk);
}
if (rdev)
newmirrors[d2++].rdev = rdev;
}
kfree(conf->mirrors);
conf->mirrors = newmirrors;
- kfree(conf->poolinfo);
- conf->poolinfo = newpoolinfo;
spin_lock_irqsave(&conf->device_lock, flags);
mddev->degraded += (raid_disks - conf->raid_disks);
@@ -3007,6 +3438,7 @@ static int raid1_reshape(struct mddev *mddev)
unfreeze_array(conf);
+ set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
@@ -3014,21 +3446,14 @@ static int raid1_reshape(struct mddev *mddev)
return 0;
}
-static void raid1_quiesce(struct mddev *mddev, int state)
+static void raid1_quiesce(struct mddev *mddev, int quiesce)
{
struct r1conf *conf = mddev->private;
- switch(state) {
- case 2: /* wake for suspend */
- wake_up(&conf->wait_barrier);
- break;
- case 1:
- raise_barrier(conf);
- break;
- case 0:
- lower_barrier(conf);
- break;
- }
+ if (quiesce)
+ freeze_array(conf, 0);
+ else
+ unfreeze_array(conf);
}
static void *raid1_takeover(struct mddev *mddev)
@@ -3042,8 +3467,12 @@ static void *raid1_takeover(struct mddev *mddev)
mddev->new_layout = 0;
mddev->new_chunk_sectors = 0;
conf = setup_conf(mddev);
- if (!IS_ERR(conf))
- conf->barrier = 1;
+ if (!IS_ERR(conf)) {
+ /* Array must appear to be quiesced */
+ conf->array_frozen = 1;
+ mddev_clear_unsupported_flags(mddev,
+ UNSUPPORTED_MDDEV_FLAGS);
+ }
return conf;
}
return ERR_PTR(-EINVAL);
@@ -3051,18 +3480,22 @@ static void *raid1_takeover(struct mddev *mddev)
static struct md_personality raid1_personality =
{
- .name = "raid1",
- .level = 1,
- .owner = THIS_MODULE,
- .make_request = make_request,
- .run = run,
- .stop = stop,
- .status = status,
- .error_handler = error,
+ .head = {
+ .type = MD_PERSONALITY,
+ .id = ID_RAID1,
+ .name = "raid1",
+ .owner = THIS_MODULE,
+ },
+
+ .make_request = raid1_make_request,
+ .run = raid1_run,
+ .free = raid1_free,
+ .status = raid1_status,
+ .error_handler = raid1_error,
.hot_add_disk = raid1_add_disk,
.hot_remove_disk= raid1_remove_disk,
.spare_active = raid1_spare_active,
- .sync_request = sync_request,
+ .sync_request = raid1_sync_request,
.resize = raid1_resize,
.size = raid1_size,
.check_reshape = raid1_reshape,
@@ -3070,22 +3503,20 @@ static struct md_personality raid1_personality =
.takeover = raid1_takeover,
};
-static int __init raid_init(void)
+static int __init raid1_init(void)
{
- return register_md_personality(&raid1_personality);
+ return register_md_submodule(&raid1_personality.head);
}
-static void raid_exit(void)
+static void __exit raid1_exit(void)
{
- unregister_md_personality(&raid1_personality);
+ unregister_md_submodule(&raid1_personality.head);
}
-module_init(raid_init);
-module_exit(raid_exit);
+module_init(raid1_init);
+module_exit(raid1_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
MODULE_ALIAS("md-personality-3"); /* RAID1 */
MODULE_ALIAS("md-raid1");
MODULE_ALIAS("md-level-1");
-
-module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);