summaryrefslogtreecommitdiff
path: root/drivers/md
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-02-21 11:02:48 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2021-02-21 11:02:48 -0800
commit582cd91f69de8e44857cb610ebca661dac8656b7 (patch)
tree0d680db02a5c236ee87b408b3f13ce33ebaca907 /drivers/md
parentbd018bbaa58640da786d4289563e71c5ef3938c7 (diff)
parentf885056a48ccf4ad4332def91e973f3993fa8695 (diff)
Merge tag 'for-5.12/block-2021-02-17' of git://git.kernel.dk/linux-block
Pull core block updates from Jens Axboe: "Another nice round of removing more code than what is added, mostly due to Christoph's relentless pursuit of tech debt removal/cleanups. This pull request contains: - Two series of BFQ improvements (Paolo, Jan, Jia) - Block iov_iter improvements (Pavel) - bsg error path fix (Pan) - blk-mq scheduler improvements (Jan) - -EBUSY discard fix (Jan) - bvec allocation improvements (Ming, Christoph) - bio allocation and init improvements (Christoph) - Store bdev pointer in bio instead of gendisk + partno (Christoph) - Block trace point cleanups (Christoph) - hard read-only vs read-only split (Christoph) - Block based swap cleanups (Christoph) - Zoned write granularity support (Damien) - Various fixes/tweaks (Chunguang, Guoqing, Lei, Lukas, Huhai)" * tag 'for-5.12/block-2021-02-17' of git://git.kernel.dk/linux-block: (104 commits) mm: simplify swapdev_block sd_zbc: clear zone resources for non-zoned case block: introduce blk_queue_clear_zone_settings() zonefs: use zone write granularity as block size block: introduce zone_write_granularity limit block: use blk_queue_set_zoned in add_partition() nullb: use blk_queue_set_zoned() to setup zoned devices nvme: cleanup zone information initialization block: document zone_append_max_bytes attribute block: use bi_max_vecs to find the bvec pool md/raid10: remove dead code in reshape_request block: mark the bio as cloned in bio_iov_bvec_set block: set BIO_NO_PAGE_REF in bio_iov_bvec_set block: remove a layer of indentation in bio_iov_iter_get_pages block: turn the nr_iovecs argument to bio_alloc* into an unsigned short block: remove the 1 and 4 vec bvec_slabs entries block: streamline bvec_alloc block: factor out a bvec_alloc_gfp helper block: move struct biovec_slab to bio.c block: reuse BIO_INLINE_VECS for integrity bvecs ...
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bcache/debug.c2
-rw-r--r--drivers/md/bcache/request.c39
-rw-r--r--drivers/md/bcache/super.c2
-rw-r--r--drivers/md/dm-bio-record.h9
-rw-r--r--drivers/md/dm-cache-metadata.c2
-rw-r--r--drivers/md/dm-clone-target.c14
-rw-r--r--drivers/md/dm-raid1.c10
-rw-r--r--drivers/md/dm-thin-metadata.c2
-rw-r--r--drivers/md/dm-zoned-metadata.c6
-rw-r--r--drivers/md/dm.c14
-rw-r--r--drivers/md/md-linear.c2
-rw-r--r--drivers/md/md.c73
-rw-r--r--drivers/md/md.h8
-rw-r--r--drivers/md/raid1.c8
-rw-r--r--drivers/md/raid10.c18
-rw-r--r--drivers/md/raid5-ppl.c2
-rw-r--r--drivers/md/raid5.c110
17 files changed, 138 insertions, 183 deletions
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index b00fd08d696b..63e809f38e3f 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -114,7 +114,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
check = bio_kmalloc(GFP_NOIO, bio_segments(bio));
if (!check)
return;
- check->bi_disk = bio->bi_disk;
+ bio_set_dev(check, bio->bi_bdev);
check->bi_opf = REQ_OP_READ;
check->bi_iter.bi_sector = bio->bi_iter.bi_sector;
check->bi_iter.bi_size = bio->bi_iter.bi_size;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 85b1f2a9b72d..29c231758293 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -475,7 +475,7 @@ struct search {
unsigned int read_dirty_data:1;
unsigned int cache_missed:1;
- struct block_device *part;
+ struct block_device *orig_bdev;
unsigned long start_time;
struct btree_op op;
@@ -670,8 +670,8 @@ static void bio_complete(struct search *s)
{
if (s->orig_bio) {
/* Count on bcache device */
- part_end_io_acct(s->part, s->orig_bio, s->start_time);
-
+ bio_end_io_acct_remapped(s->orig_bio, s->start_time,
+ s->orig_bdev);
trace_bcache_request_end(s->d, s->orig_bio);
s->orig_bio->bi_status = s->iop.status;
bio_endio(s->orig_bio);
@@ -714,7 +714,8 @@ static void search_free(struct closure *cl)
}
static inline struct search *search_alloc(struct bio *bio,
- struct bcache_device *d)
+ struct bcache_device *d, struct block_device *orig_bdev,
+ unsigned long start_time)
{
struct search *s;
@@ -732,7 +733,8 @@ static inline struct search *search_alloc(struct bio *bio,
s->write = op_is_write(bio_op(bio));
s->read_dirty_data = 0;
/* Count on the bcache device */
- s->start_time = part_start_io_acct(d->disk, &s->part, bio);
+ s->orig_bdev = orig_bdev;
+ s->start_time = start_time;
s->iop.c = d->c;
s->iop.bio = NULL;
s->iop.inode = d->id;
@@ -894,7 +896,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
!(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
reada = min_t(sector_t, dc->readahead >> 9,
- get_capacity(bio->bi_disk) - bio_end_sector(bio));
+ get_capacity(bio->bi_bdev->bd_disk) -
+ bio_end_sector(bio));
s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
@@ -1073,7 +1076,7 @@ struct detached_dev_io_private {
unsigned long start_time;
bio_end_io_t *bi_end_io;
void *bi_private;
- struct block_device *part;
+ struct block_device *orig_bdev;
};
static void detached_dev_end_io(struct bio *bio)
@@ -1085,7 +1088,7 @@ static void detached_dev_end_io(struct bio *bio)
bio->bi_private = ddip->bi_private;
/* Count on the bcache device */
- part_end_io_acct(ddip->part, bio, ddip->start_time);
+ bio_end_io_acct_remapped(bio, ddip->start_time, ddip->orig_bdev);
if (bio->bi_status) {
struct cached_dev *dc = container_of(ddip->d,
@@ -1098,7 +1101,8 @@ static void detached_dev_end_io(struct bio *bio)
bio->bi_end_io(bio);
}
-static void detached_dev_do_request(struct bcache_device *d, struct bio *bio)
+static void detached_dev_do_request(struct bcache_device *d, struct bio *bio,
+ struct block_device *orig_bdev, unsigned long start_time)
{
struct detached_dev_io_private *ddip;
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
@@ -1111,7 +1115,8 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio)
ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO);
ddip->d = d;
/* Count on the bcache device */
- ddip->start_time = part_start_io_acct(d->disk, &ddip->part, bio);
+ ddip->orig_bdev = orig_bdev;
+ ddip->start_time = start_time;
ddip->bi_end_io = bio->bi_end_io;
ddip->bi_private = bio->bi_private;
bio->bi_end_io = detached_dev_end_io;
@@ -1167,8 +1172,10 @@ static void quit_max_writeback_rate(struct cache_set *c,
blk_qc_t cached_dev_submit_bio(struct bio *bio)
{
struct search *s;
- struct bcache_device *d = bio->bi_disk->private_data;
+ struct block_device *orig_bdev = bio->bi_bdev;
+ struct bcache_device *d = orig_bdev->bd_disk->private_data;
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
+ unsigned long start_time;
int rw = bio_data_dir(bio);
if (unlikely((d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags)) ||
@@ -1193,11 +1200,13 @@ blk_qc_t cached_dev_submit_bio(struct bio *bio)
}
}
+ start_time = bio_start_io_acct(bio);
+
bio_set_dev(bio, dc->bdev);
bio->bi_iter.bi_sector += dc->sb.data_offset;
if (cached_dev_get(dc)) {
- s = search_alloc(bio, d);
+ s = search_alloc(bio, d, orig_bdev, start_time);
trace_bcache_request_start(s->d, bio);
if (!bio->bi_iter.bi_size) {
@@ -1218,7 +1227,7 @@ blk_qc_t cached_dev_submit_bio(struct bio *bio)
}
} else
/* I/O request sent to backing device */
- detached_dev_do_request(d, bio);
+ detached_dev_do_request(d, bio, orig_bdev, start_time);
return BLK_QC_T_NONE;
}
@@ -1274,7 +1283,7 @@ blk_qc_t flash_dev_submit_bio(struct bio *bio)
{
struct search *s;
struct closure *cl;
- struct bcache_device *d = bio->bi_disk->private_data;
+ struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) {
bio->bi_status = BLK_STS_IOERR;
@@ -1282,7 +1291,7 @@ blk_qc_t flash_dev_submit_bio(struct bio *bio)
return BLK_QC_T_NONE;
}
- s = search_alloc(bio, d);
+ s = search_alloc(bio, d, bio->bi_bdev, bio_start_io_acct(bio));
cl = &s->cl;
bio = &s->bio.bio;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 2047a9cccdb5..193fe7652329 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1939,7 +1939,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
goto err;
if (bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio),
- BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
+ BIOSET_NEED_RESCUER))
goto err;
c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, sb);
diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h
index 2ea0360108e1..a3b71350eec8 100644
--- a/drivers/md/dm-bio-record.h
+++ b/drivers/md/dm-bio-record.h
@@ -18,8 +18,7 @@
*/
struct dm_bio_details {
- struct gendisk *bi_disk;
- u8 bi_partno;
+ struct block_device *bi_bdev;
int __bi_remaining;
unsigned long bi_flags;
struct bvec_iter bi_iter;
@@ -31,8 +30,7 @@ struct dm_bio_details {
static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
{
- bd->bi_disk = bio->bi_disk;
- bd->bi_partno = bio->bi_partno;
+ bd->bi_bdev = bio->bi_bdev;
bd->bi_flags = bio->bi_flags;
bd->bi_iter = bio->bi_iter;
bd->__bi_remaining = atomic_read(&bio->__bi_remaining);
@@ -44,8 +42,7 @@ static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
{
- bio->bi_disk = bd->bi_disk;
- bio->bi_partno = bd->bi_partno;
+ bio->bi_bdev = bd->bi_bdev;
bio->bi_flags = bd->bi_flags;
bio->bi_iter = bd->bi_iter;
atomic_set(&bio->__bi_remaining, bd->__bi_remaining);
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index af6d4f898e4c..89a73204dbf4 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -449,7 +449,7 @@ static int __check_incompat_features(struct cache_disk_superblock *disk_super,
/*
* Check for read-only metadata to skip the following RDWR checks.
*/
- if (get_disk_ro(cmd->bdev->bd_disk))
+ if (bdev_read_only(cmd->bdev))
return 0;
features = le32_to_cpu(disk_super->compat_ro_flags) & ~DM_CACHE_FEATURE_COMPAT_RO_SUPP;
diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
index bdb255edc200..a90bdf9b2ca6 100644
--- a/drivers/md/dm-clone-target.c
+++ b/drivers/md/dm-clone-target.c
@@ -85,12 +85,6 @@ struct clone {
struct dm_clone_metadata *cmd;
- /*
- * bio used to flush the destination device, before committing the
- * metadata.
- */
- struct bio flush_bio;
-
/* Region hydration hash table */
struct hash_table_bucket *ht;
@@ -1155,11 +1149,7 @@ static int commit_metadata(struct clone *clone, bool *dest_dev_flushed)
goto out;
}
- bio_reset(&clone->flush_bio);
- bio_set_dev(&clone->flush_bio, clone->dest_dev->bdev);
- clone->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
-
- r = submit_bio_wait(&clone->flush_bio);
+ r = blkdev_issue_flush(clone->dest_dev->bdev);
if (unlikely(r)) {
__metadata_operation_failed(clone, "flush destination device", r);
goto out;
@@ -1886,7 +1876,6 @@ static int clone_ctr(struct dm_target *ti, unsigned int argc, char **argv)
bio_list_init(&clone->deferred_flush_completions);
clone->hydration_offset = 0;
atomic_set(&clone->hydrations_in_flight, 0);
- bio_init(&clone->flush_bio, NULL, 0);
clone->wq = alloc_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM, 0);
if (!clone->wq) {
@@ -1958,7 +1947,6 @@ static void clone_dtr(struct dm_target *ti)
struct clone *clone = ti->private;
mutex_destroy(&clone->commit_lock);
- bio_uninit(&clone->flush_bio);
for (i = 0; i < clone->nr_ctr_args; i++)
kfree(clone->ctr_args[i]);
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index fa09bc4e4c54..b0a82f29a2e4 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -145,7 +145,7 @@ static void dispatch_bios(void *context, struct bio_list *bio_list)
struct dm_raid1_bio_record {
struct mirror *m;
- /* if details->bi_disk == NULL, details were not saved */
+ /* if details->bi_bdev == NULL, details were not saved */
struct dm_bio_details details;
region_t write_region;
};
@@ -1190,7 +1190,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
struct dm_raid1_bio_record *bio_record =
dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
- bio_record->details.bi_disk = NULL;
+ bio_record->details.bi_bdev = NULL;
if (rw == WRITE) {
/* Save region for mirror_end_io() handler */
@@ -1257,7 +1257,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
goto out;
if (unlikely(*error)) {
- if (!bio_record->details.bi_disk) {
+ if (!bio_record->details.bi_bdev) {
/*
* There wasn't enough memory to record necessary
* information for a retry or there was no other
@@ -1282,7 +1282,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
bd = &bio_record->details;
dm_bio_restore(bd, bio);
- bio_record->details.bi_disk = NULL;
+ bio_record->details.bi_bdev = NULL;
bio->bi_status = 0;
queue_bio(ms, bio, rw);
@@ -1292,7 +1292,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
}
out:
- bio_record->details.bi_disk = NULL;
+ bio_record->details.bi_bdev = NULL;
return DM_ENDIO_DONE;
}
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 6ebb2127f3e2..e75b20480e46 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -636,7 +636,7 @@ static int __check_incompat_features(struct thin_disk_superblock *disk_super,
/*
* Check for read-only metadata to skip the following RDWR checks.
*/
- if (get_disk_ro(pmd->bdev->bd_disk))
+ if (bdev_read_only(pmd->bdev))
return 0;
features = le32_to_cpu(disk_super->compat_ro_flags) & ~THIN_FEATURE_COMPAT_RO_SUPP;
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index b298fefb022e..039d17b28938 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -819,7 +819,7 @@ static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set)
ret = dmz_rdwr_block(dev, REQ_OP_WRITE, zmd->sb[set].block,
mblk->page);
if (ret == 0)
- ret = blkdev_issue_flush(dev->bdev, GFP_NOIO);
+ ret = blkdev_issue_flush(dev->bdev);
return ret;
}
@@ -862,7 +862,7 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
/* Flush drive cache (this will also sync data) */
if (ret == 0)
- ret = blkdev_issue_flush(dev->bdev, GFP_NOIO);
+ ret = blkdev_issue_flush(dev->bdev);
return ret;
}
@@ -933,7 +933,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
/* If there are no dirty metadata blocks, just flush the device cache */
if (list_empty(&write_list)) {
- ret = blkdev_issue_flush(dev->bdev, GFP_NOIO);
+ ret = blkdev_issue_flush(dev->bdev);
goto err;
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 7bac564f3faa..479ec5bea09e 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -977,16 +977,17 @@ static void clone_endio(struct bio *bio)
struct mapped_device *md = tio->io->md;
dm_endio_fn endio = tio->ti->type->end_io;
struct bio *orig_bio = io->orig_bio;
+ struct request_queue *q = bio->bi_bdev->bd_disk->queue;
if (unlikely(error == BLK_STS_TARGET)) {
if (bio_op(bio) == REQ_OP_DISCARD &&
- !bio->bi_disk->queue->limits.max_discard_sectors)
+ !q->limits.max_discard_sectors)
disable_discard(md);
else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
- !bio->bi_disk->queue->limits.max_write_same_sectors)
+ !q->limits.max_write_same_sectors)
disable_write_same(md);
else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
- !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
+ !q->limits.max_write_zeroes_sectors)
disable_write_zeroes(md);
}
@@ -996,7 +997,7 @@ static void clone_endio(struct bio *bio)
*/
if (bio_op(orig_bio) == REQ_OP_ZONE_APPEND) {
sector_t written_sector = bio->bi_iter.bi_sector;
- struct request_queue *q = orig_bio->bi_disk->queue;
+ struct request_queue *q = orig_bio->bi_bdev->bd_disk->queue;
u64 mask = (u64)blk_queue_zone_sectors(q) - 1;
orig_bio->bi_iter.bi_sector += written_sector & mask;
@@ -1422,8 +1423,7 @@ static int __send_empty_flush(struct clone_info *ci)
*/
bio_init(&flush_bio, NULL, 0);
flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
- flush_bio.bi_disk = ci->io->md->disk;
- bio_associate_blkg(&flush_bio);
+ bio_set_dev(&flush_bio, ci->io->md->disk->part0);
ci->bio = &flush_bio;
ci->sector_count = 0;
@@ -1626,7 +1626,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
static blk_qc_t dm_submit_bio(struct bio *bio)
{
- struct mapped_device *md = bio->bi_disk->private_data;
+ struct mapped_device *md = bio->bi_bdev->bd_disk->private_data;
blk_qc_t ret = BLK_QC_T_NONE;
int srcu_idx;
struct dm_table *map;
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
index 68cac7d19278..63ed8329a98d 100644
--- a/drivers/md/md-linear.c
+++ b/drivers/md/md-linear.c
@@ -252,7 +252,7 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
start_sector + data_offset;
if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bio->bi_disk->queue))) {
+ !blk_queue_discard(bio->bi_bdev->bd_disk->queue))) {
/* Just ignore it */
bio_endio(bio);
} else {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 04384452a7ab..21da0c48f6c2 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -340,24 +340,6 @@ static int start_readonly;
*/
static bool create_on_open = true;
-struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
- struct mddev *mddev)
-{
- if (!mddev || !bioset_initialized(&mddev->bio_set))
- return bio_alloc(gfp_mask, nr_iovecs);
-
- return bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set);
-}
-EXPORT_SYMBOL_GPL(bio_alloc_mddev);
-
-static struct bio *md_bio_alloc_sync(struct mddev *mddev)
-{
- if (!mddev || !bioset_initialized(&mddev->sync_set))
- return bio_alloc(GFP_NOIO, 1);
-
- return bio_alloc_bioset(GFP_NOIO, 1, &mddev->sync_set);
-}
-
/*
* We have a system wide 'event count' that is incremented
* on any 'interesting' event, and readers of /proc/mdstat
@@ -463,8 +445,8 @@ struct md_io {
struct mddev *mddev;
bio_end_io_t *orig_bi_end_io;
void *orig_bi_private;
+ struct block_device *orig_bi_bdev;
unsigned long start_time;
- struct block_device *part;
};
static void md_end_io(struct bio *bio)
@@ -472,7 +454,7 @@ static void md_end_io(struct bio *bio)
struct md_io *md_io = bio->bi_private;
struct mddev *mddev = md_io->mddev;
- part_end_io_acct(md_io->part, bio, md_io->start_time);
+ bio_end_io_acct_remapped(bio, md_io->start_time, md_io->orig_bi_bdev);
bio->bi_end_io = md_io->orig_bi_end_io;
bio->bi_private = md_io->orig_bi_private;
@@ -486,7 +468,7 @@ static void md_end_io(struct bio *bio)
static blk_qc_t md_submit_bio(struct bio *bio)
{
const int rw = bio_data_dir(bio);
- struct mddev *mddev = bio->bi_disk->private_data;
+ struct mddev *mddev = bio->bi_bdev->bd_disk->private_data;
if (mddev == NULL || mddev->pers == NULL) {
bio_io_error(bio);
@@ -514,12 +496,12 @@ static blk_qc_t md_submit_bio(struct bio *bio)
md_io->mddev = mddev;
md_io->orig_bi_end_io = bio->bi_end_io;
md_io->orig_bi_private = bio->bi_private;
+ md_io->orig_bi_bdev = bio->bi_bdev;
bio->bi_end_io = md_end_io;
bio->bi_private = md_io;
- md_io->start_time = part_start_io_acct(mddev->gendisk,
- &md_io->part, bio);
+ md_io->start_time = bio_start_io_acct(bio);
}
/* bio could be mergeable after passing to underlayer */
@@ -613,7 +595,7 @@ static void submit_flushes(struct work_struct *ws)
atomic_inc(&rdev->nr_pending);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
- bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
+ bi = bio_alloc_bioset(GFP_NOIO, 0, &mddev->bio_set);
bi->bi_end_io = md_end_flush;
bi->bi_private = rdev;
bio_set_dev(bi, rdev->bdev);
@@ -999,7 +981,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
if (test_bit(Faulty, &rdev->flags))
return;
- bio = md_bio_alloc_sync(mddev);
+ bio = bio_alloc_bioset(GFP_NOIO, 1, &mddev->sync_set);
atomic_inc(&rdev->nr_pending);
@@ -1031,29 +1013,29 @@ int md_super_wait(struct mddev *mddev)
int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct page *page, int op, int op_flags, bool metadata_op)
{
- struct bio *bio = md_bio_alloc_sync(rdev->mddev);
- int ret;
+ struct bio bio;
+ struct bio_vec bvec;
+
+ bio_init(&bio, &bvec, 1);
if (metadata_op && rdev->meta_bdev)
- bio_set_dev(bio, rdev->meta_bdev);
+ bio_set_dev(&bio, rdev->meta_bdev);
else
- bio_set_dev(bio, rdev->bdev);
- bio_set_op_attrs(bio, op, op_flags);
+ bio_set_dev(&bio, rdev->bdev);
+ bio.bi_opf = op | op_flags;
if (metadata_op)
- bio->bi_iter.bi_sector = sector + rdev->sb_start;
+ bio.bi_iter.bi_sector = sector + rdev->sb_start;
else if (rdev->mddev->reshape_position != MaxSector &&
(rdev->mddev->reshape_backwards ==
(sector >= rdev->mddev->reshape_position)))
- bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
+ bio.bi_iter.bi_sector = sector + rdev->new_data_offset;
else
- bio->bi_iter.bi_sector = sector + rdev->data_offset;
- bio_add_page(bio, page, size, 0);
+ bio.bi_iter.bi_sector = sector + rdev->data_offset;
+ bio_add_page(&bio, page, size, 0);
- submit_bio_wait(bio);
+ submit_bio_wait(&bio);
- ret = !bio->bi_status;
- bio_put(bio);
- return ret;
+ return !bio.bi_status;
}
EXPORT_SYMBOL_GPL(sync_page_io);
@@ -2417,6 +2399,12 @@ int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
}
EXPORT_SYMBOL(md_integrity_add_rdev);
+static bool rdev_read_only(struct md_rdev *rdev)
+{
+ return bdev_read_only(rdev->bdev) ||
+ (rdev->meta_bdev && bdev_read_only(rdev->meta_bdev));
+}
+
static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
{
char b[BDEVNAME_SIZE];
@@ -2426,8 +2414,7 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
if (find_rdev(mddev, rdev->bdev->bd_dev))
return -EEXIST;
- if ((bdev_read_only(rdev->bdev) || bdev_read_only(rdev->meta_bdev)) &&
- mddev->pers)
+ if (rdev_read_only(rdev) && mddev->pers)
return -EROFS;
/* make sure rdev->sectors exceeds mddev->dev_sectors */
@@ -5861,9 +5848,7 @@ int md_run(struct mddev *mddev)
continue;
sync_blockdev(rdev->bdev);
invalidate_bdev(rdev->bdev);
- if (mddev->ro != 1 &&
- (bdev_read_only(rdev->bdev) ||
- bdev_read_only(rdev->meta_bdev))) {
+ if (mddev->ro != 1 && rdev_read_only(rdev)) {
mddev->ro = 1;
if (mddev->gendisk)
set_disk_ro(mddev->gendisk, 1);
@@ -6158,7 +6143,7 @@ static int restart_array(struct mddev *mddev)
if (test_bit(Journal, &rdev->flags) &&
!test_bit(Faulty, &rdev->flags))
has_journal = true;
- if (bdev_read_only(rdev->bdev))
+ if (rdev_read_only(rdev))
has_readonly = true;
}
rcu_read_unlock();
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 34070ab30a8a..bcbba1b5ec4a 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -556,7 +556,7 @@ static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sect
static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors)
{
- atomic_add(nr_sectors, &bio->bi_disk->sync_io);
+ md_sync_acct(bio->bi_bdev, nr_sectors);
}
struct md_personality
@@ -742,8 +742,6 @@ extern void md_rdev_clear(struct md_rdev *rdev);
extern void md_handle_request(struct mddev *mddev, struct bio *bio);
extern void mddev_suspend(struct mddev *mddev);
extern void mddev_resume(struct mddev *mddev);
-extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
- struct mddev *mddev);
extern void md_reload_sb(struct mddev *mddev, int raid_disk);
extern void md_update_sb(struct mddev *mddev, int force);
@@ -793,14 +791,14 @@ static inline void mddev_clear_unsupported_flags(struct mddev *mddev,
static inline void mddev_check_writesame(struct mddev *mddev, struct bio *bio)
{
if (bio_op(bio) == REQ_OP_WRITE_SAME &&
- !bio->bi_disk->queue->limits.max_write_same_sectors)
+ !bio->bi_bdev->bd_disk->queue->limits.max_write_same_sectors)
mddev->queue->limits.max_write_same_sectors = 0;
}
static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio)
{
if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
- !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
+ !bio->bi_bdev->bd_disk->queue->limits.max_write_zeroes_sectors)
mddev->queue->limits.max_write_zeroes_sectors = 0;
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index c0347997f6ff..d2378765dc15 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -794,13 +794,13 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
- struct md_rdev *rdev = (void *)bio->bi_disk;
+ struct md_rdev *rdev = (void *)bio->bi_bdev;
bio->bi_next = NULL;
bio_set_dev(bio, rdev->bdev);
if (test_bit(Faulty, &rdev->flags)) {
bio_io_error(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bio->bi_disk->queue)))
+ !blk_queue_discard(bio->bi_bdev->bd_disk->queue)))
/* Just ignore it */
bio_endio(bio);
else
@@ -1104,7 +1104,7 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio,
int i = 0;
struct bio *behind_bio = NULL;
- behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev);
+ behind_bio = bio_alloc_bioset(GFP_NOIO, vcnt, &r1_bio->mddev->bio_set);
if (!behind_bio)
return;
@@ -1520,7 +1520,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
trace_block_bio_remap(mbio, disk_devt(mddev->gendisk),
r1_bio->sector);
/* flush_pending_writes() needs access to the rdev so...*/
- mbio->bi_disk = (void *)conf->mirrors[i].rdev;
+ mbio->bi_bdev = (void *)conf->mirrors[i].rdev;
cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
if (cb)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index c5d88ef6a45c..a9ae7d113492 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -882,13 +882,13 @@ static void flush_pending_writes(struct r10conf *conf)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
- struct md_rdev *rdev = (void*)bio->bi_disk;
+ struct md_rdev *rdev = (void*)bio->bi_bdev;
bio->bi_next = NULL;
bio_set_dev(bio, rdev->bdev);
if (test_bit(Faulty, &rdev->flags)) {
bio_io_error(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bio->bi_disk->queue)))
+ !blk_queue_discard(bio->bi_bdev->bd_disk->queue)))
/* Just ignore it */
bio_endio(bio);
else
@@ -1075,13 +1075,13 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
- struct md_rdev *rdev = (void*)bio->bi_disk;
+ struct md_rdev *rdev = (void*)bio->bi_bdev;
bio->bi_next = NULL;
bio_set_dev(bio, rdev->bdev);
if (test_bit(Faulty, &rdev->flags)) {
bio_io_error(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bio->bi_disk->queue)))
+ !blk_queue_discard(bio->bi_bdev->bd_disk->queue)))
/* Just ignore it */
bio_endio(bio);
else
@@ -1253,7 +1253,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
trace_block_bio_remap(mbio, disk_devt(conf->mddev->gendisk),
r10_bio->sector);
/* flush_pending_writes() needs access to the rdev so...*/
- mbio->bi_disk = (void *)rdev;
+ mbio->bi_bdev = (void *)rdev;
atomic_inc(&r10_bio->remaining);
@@ -3003,7 +3003,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
/* Again, very different code for resync and recovery.
* Both must result in an r10bio with a list of bios that
- * have bi_end_io, bi_sector, bi_disk set,
+ * have bi_end_io, bi_sector, bi_bdev set,
* and bi_private set to the r10bio.
* For recovery, we may actually create several r10bios
* with 2 bios in each, that correspond to the bios in the main one.
@@ -4531,7 +4531,7 @@ read_more:
return sectors_done;
}
- read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);
+ read_bio = bio_alloc_bioset(GFP_KERNEL, RESYNC_PAGES, &mddev->bio_set);
bio_set_dev(read_bio, rdev->bdev);
read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
@@ -4539,10 +4539,6 @@ read_more:
read_bio->bi_private = r10_bio;
read_bio->bi_end_io = end_reshape_read;
bio_set_op_attrs(read_bio, REQ_OP_READ, 0);
- read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
- read_bio->bi_status = 0;
- read_bio->bi_vcnt = 0;
- read_bio->bi_iter.bi_size = 0;
r10_bio->master_bio = read_bio;
r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index d0f540296fe9..e8c118e05dfd 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -1037,7 +1037,7 @@ static int ppl_recover(struct ppl_log *log, struct ppl_header *pplhdr,
}
/* flush the disk cache after recovery if necessary */
- ret = blkdev_issue_flush(rdev->bdev, GFP_KERNEL);
+ ret = blkdev_issue_flush(rdev->bdev);
out:
__free_page(page);
return ret;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 3a90cc0e43ca..a348b2adf2a9 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5310,7 +5310,7 @@ static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
unsigned int chunk_sectors;
unsigned int bio_sectors = bio_sectors(bio);
- WARN_ON_ONCE(bio->bi_partno);
+ WARN_ON_ONCE(bio->bi_bdev->bd_partno);
chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors);
return chunk_sectors >=
@@ -5393,90 +5393,72 @@ static void raid5_align_endio(struct bio *bi)
static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
{
struct r5conf *conf = mddev->private;
- int dd_idx;
- struct bio* align_bi;
+ struct bio *align_bio;
struct md_rdev *rdev;
- sector_t end_sector;
+ sector_t sector, end_sector, first_bad;
+ int bad_sectors, dd_idx;
if (!in_chunk_boundary(mddev, raid_bio)) {
pr_debug("%s: non aligned\n", __func__);
return 0;
}
- /*
- * use bio_clone_fast to make a copy of the bio
- */
- align_bi = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->bio_set);
- if (!align_bi)
- return 0;
- /*
- * set bi_end_io to a new function, and set bi_private to the
- * original bio.
- */
- align_bi->bi_end_io = raid5_align_endio;
- align_bi->bi_private = raid_bio;
- /*
- * compute position
- */
- align_bi->bi_iter.bi_sector =
- raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector,
- 0, &dd_idx, NULL);
- end_sector = bio_end_sector(align_bi);
+ sector = raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, 0,
+ &dd_idx, NULL);
+ end_sector = bio_end_sector(raid_bio);
+
rcu_read_lock();
+ if (r5c_big_stripe_cached(conf, sector))
+ goto out_rcu_unlock;
+
rdev = rcu_dereference(conf->disks[dd_idx].replacement);
if (!rdev || test_bit(Faulty, &rdev->flags) ||
rdev->recovery_offset < end_sector) {
rdev = rcu_dereference(conf->disks[dd_idx].rdev);
- if (rdev &&
- (test_bit(Faulty, &rdev->flags) ||
+ if (!rdev)
+ goto out_rcu_unlock;
+ if (test_bit(Faulty, &rdev->flags) ||
!(test_bit(In_sync, &rdev->flags) ||
- rdev->recovery_offset >= end_sector)))
- rdev = NULL;
+ rdev->recovery_offset >= end_sector))
+ goto out_rcu_unlock;
}
- if (r5c_big_stripe_cached(conf, align_bi->bi_iter.bi_sector)) {
- rcu_read_unlock();
- bio_put(align_bi);
+ atomic_inc(&rdev->nr_pending);
+ rcu_read_unlock();
+
+ align_bio = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->bio_set);
+ bio_set_dev(align_bio, rdev->bdev);
+ align_bio->bi_end_io = raid5_align_endio;
+ align_bio->bi_private = raid_bio;
+ align_bio->bi_iter.bi_sector = sector;
+
+ raid_bio->bi_next = (void *)rdev;
+
+ if (is_badblock(rdev, sector, bio_sectors(align_bio), &first_bad,
+ &bad_sectors)) {
+ bio_put(align_bio);
+ rdev_dec_pending(rdev, mddev);
return 0;
}
- if (rdev) {
- sector_t first_bad;
- int bad_sectors;
-
- atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
- raid_bio->bi_next = (void*)rdev;
- bio_set_dev(align_bi, rdev->bdev);
-
- if (is_badblock(rdev, align_bi->bi_iter.bi_sector,
- bio_sectors(align_bi),
- &first_bad, &bad_sectors)) {
- bio_put(align_bi);
- rdev_dec_pending(rdev, mddev);
- return 0;
- }
+ /* No reshape active, so we can trust rdev->data_offset */
+ align_bio->bi_iter.bi_sector += rdev->data_offset;
- /* No reshape active, so we can trust rdev->data_offset */
- align_bi->bi_iter.bi_sector += rdev->data_offset;
+ spin_lock_irq(&conf->device_lock);
+ wait_event_lock_irq(conf->wait_for_quiescent, conf->quiesce == 0,
+ conf->device_lock);
+ atomic_inc(&conf->active_aligned_reads);
+ spin_unlock_irq(&conf->device_lock);
- spin_lock_irq(&conf->device_lock);
- wait_event_lock_irq(conf->wait_for_quiescent,
- conf->quiesce == 0,
- conf->device_lock);
- atomic_inc(&conf->active_aligned_reads);
- spin_unlock_irq(&conf->device_lock);
+ if (mddev->gendisk)
+ trace_block_bio_remap(align_bio, disk_devt(mddev->gendisk),
+ raid_bio->bi_iter.bi_sector);
+ submit_bio_noacct(align_bio);
+ return 1;
- if (mddev->gendisk)
- trace_block_bio_remap(align_bi, disk_devt(mddev->gendisk),
- raid_bio->bi_iter.bi_sector);
- submit_bio_noacct(align_bi);
- return 1;
- } else {
- rcu_read_unlock();
- bio_put(align_bi);
- return 0;
- }
+out_rcu_unlock:
+ rcu_read_unlock();
+ return 0;
}
static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)