summaryrefslogtreecommitdiff
path: root/drivers/md/dm-mpath.c
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2020-06-15 11:31:04 -0400
committerMike Snitzer <snitzer@redhat.com>2020-07-13 11:47:32 -0400
commit374117ad4736c5a4f8012cfe59fc07d9d58191d5 (patch)
tree53d95d7b94a1717a75ecb57010106eeef66df390 /drivers/md/dm-mpath.c
parent564dbb130b3f8ab39719a42c130505bf75610fbf (diff)
dm mpath: use double checked locking in fast path
Fast-path code biased toward lazy acknowledgement of bit being set (primarily only for initialization). Multipath code is very retry oriented so even if state is missed it'll recover. Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm-mpath.c')
-rw-r--r--drivers/md/dm-mpath.c32
1 files changed, 23 insertions, 9 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index f71bb4e5eaf7..88dc7b41b1c6 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -128,6 +128,20 @@ static void queue_if_no_path_timeout_work(struct timer_list *t);
#define MPATHF_PG_INIT_REQUIRED 5 /* pg_init needs calling? */
#define MPATHF_PG_INIT_DELAY_RETRY 6 /* Delay pg_init retry? */
+static bool mpath_double_check_test_bit(int MPATHF_bit, struct multipath *m)
+{
+ bool r = test_bit(MPATHF_bit, &m->flags);
+
+ if (r) {
+ unsigned long flags;
+ spin_lock_irqsave(&m->lock, flags);
+ r = test_bit(MPATHF_bit, &m->flags);
+ spin_unlock_irqrestore(&m->lock, flags);
+ }
+
+ return r;
+}
+
/*-----------------------------------------------
* Allocation routines
*-----------------------------------------------*/
@@ -499,7 +513,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
/* Do we need to select a new pgpath? */
pgpath = READ_ONCE(m->current_pgpath);
- if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
+ if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
pgpath = choose_pgpath(m, nr_bytes);
if (!pgpath) {
@@ -507,8 +521,8 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
return DM_MAPIO_DELAY_REQUEUE;
dm_report_EIO(m); /* Failed */
return DM_MAPIO_KILL;
- } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
- test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
+ } else if (mpath_double_check_test_bit(MPATHF_QUEUE_IO, m) ||
+ mpath_double_check_test_bit(MPATHF_PG_INIT_REQUIRED, m)) {
pg_init_all_paths(m);
return DM_MAPIO_DELAY_REQUEUE;
}
@@ -598,7 +612,7 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
/* Do we need to select a new pgpath? */
pgpath = READ_ONCE(m->current_pgpath);
- if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
+ if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
if (!pgpath) {
@@ -609,8 +623,8 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
}
spin_unlock_irqrestore(&m->lock, flags);
- } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
- test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
+ } else if (mpath_double_check_test_bit(MPATHF_QUEUE_IO, m) ||
+ mpath_double_check_test_bit(MPATHF_PG_INIT_REQUIRED, m)) {
multipath_queue_bio(m, bio);
pg_init_all_paths(m);
return ERR_PTR(-EAGAIN);
@@ -861,7 +875,7 @@ static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
struct request_queue *q = bdev_get_queue(bdev);
int r;
- if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
+ if (mpath_double_check_test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, m)) {
retain:
if (*attached_handler_name) {
/*
@@ -1967,11 +1981,11 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
int r;
pgpath = READ_ONCE(m->current_pgpath);
- if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
+ if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
pgpath = choose_pgpath(m, 0);
if (pgpath) {
- if (!test_bit(MPATHF_QUEUE_IO, &m->flags)) {
+ if (!mpath_double_check_test_bit(MPATHF_QUEUE_IO, m)) {
*bdev = pgpath->path.dev->bdev;
r = 0;
} else {