summaryrefslogtreecommitdiff
path: root/drivers/md
diff options
context:
space:
mode:
authorGuoqing Jiang <guoqing.jiang@cloud.ionos.com>2019-12-23 10:48:58 +0100
committerSong Liu <songliubraving@fb.com>2020-01-13 11:44:09 -0800
commit69df9cfc70421fb7949e8f7a19bfc36600b5522b (patch)
treef48b25606a762fd53bd7a0fa2c626831ea2470e9 /drivers/md
parentde31ee949739aba9ce7dbb8b10e72c6fce0e76c7 (diff)
raid1: serialize the overlap write
Before dispatch write bio, raid1 array which enables serialize_policy need to check if overlap exists between this bio and previous on-flying bios. If there is overlap, then it has to wait until the collision is disappeared. Signed-off-by: Guoqing Jiang <guoqing.jiang@cloud.ionos.com> Signed-off-by: Song Liu <songliubraving@fb.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/raid1.c27
1 files changed, 13 insertions, 14 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 0439f674ab14..3ad2f5a59d08 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -430,6 +430,8 @@ static void raid1_end_write_request(struct bio *bio)
int mirror = find_bio_disk(r1_bio, bio);
struct md_rdev *rdev = conf->mirrors[mirror].rdev;
bool discard_error;
+ sector_t lo = r1_bio->sector;
+ sector_t hi = r1_bio->sector + r1_bio->sectors;
discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
@@ -499,12 +501,8 @@ static void raid1_end_write_request(struct bio *bio)
}
if (behind) {
- if (test_bit(CollisionCheck, &rdev->flags)) {
- sector_t lo = r1_bio->sector;
- sector_t hi = r1_bio->sector + r1_bio->sectors;
-
+ if (test_bit(CollisionCheck, &rdev->flags))
remove_serial(rdev, lo, hi);
- }
if (test_bit(WriteMostly, &rdev->flags))
atomic_dec(&r1_bio->behind_remaining);
@@ -527,7 +525,8 @@ static void raid1_end_write_request(struct bio *bio)
call_bio_endio(r1_bio);
}
}
- }
+ } else if (rdev->mddev->serialize_policy)
+ remove_serial(rdev, lo, hi);
if (r1_bio->bios[mirror] == NULL)
rdev_dec_pending(rdev, conf->mddev);
@@ -1337,6 +1336,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
struct raid1_plug_cb *plug = NULL;
int first_clone;
int max_sectors;
+ sector_t lo, hi;
if (mddev_is_clustered(mddev) &&
md_cluster_ops->area_resyncing(mddev, WRITE,
@@ -1364,6 +1364,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
r1_bio = alloc_r1bio(mddev, bio);
r1_bio->sectors = max_write_sectors;
+ lo = r1_bio->sector;
+ hi = r1_bio->sector + r1_bio->sectors;
if (conf->pending_count >= max_queued_requests) {
md_wakeup_thread(mddev->thread);
@@ -1479,6 +1481,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
for (i = 0; i < disks; i++) {
struct bio *mbio = NULL;
+ struct md_rdev *rdev = conf->mirrors[i].rdev;
if (!r1_bio->bios[i])
continue;
@@ -1506,19 +1509,15 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
if (r1_bio->behind_master_bio) {
- struct md_rdev *rdev = conf->mirrors[i].rdev;
-
- if (test_bit(CollisionCheck, &rdev->flags)) {
- sector_t lo = r1_bio->sector;
- sector_t hi = r1_bio->sector + r1_bio->sectors;
-
+ if (test_bit(CollisionCheck, &rdev->flags))
wait_event(rdev->serial_io_wait,
check_and_add_serial(rdev, lo, hi)
== 0);
- }
if (test_bit(WriteMostly, &rdev->flags))
atomic_inc(&r1_bio->behind_remaining);
- }
+ } else if (mddev->serialize_policy)
+ wait_event(rdev->serial_io_wait,
+ check_and_add_serial(rdev, lo, hi) == 0);
r1_bio->bios[i] = mbio;