summaryrefslogtreecommitdiff
path: root/drivers/md
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2016-02-17 15:04:05 -0500
committerMike Snitzer <snitzer@redhat.com>2016-02-22 22:34:42 -0500
commitb0b477c7e0dd93f8916d106018ded1331b81bf61 (patch)
tree0b8329486138c196dc07855bbf43ad9130efbea1 /drivers/md
parent90a4323ccfeaa2ef35a8f4c6443ee5106453d961 (diff)
dm round robin: use percpu 'repeat_count' and 'current_path'
Now that dm-mpath core is lockless in the per-IO fast path it is critical, for performance, to have the .select_path hook (rr_select_path) also be as lockless as possible. The new percpu members of 'struct selector' allow for lockless support of 'repeat_count' governed repeat use of a previously selected path. If a path fails while it is 'current_path' the worst case is concurrent IO might be mapped to the failed path until the .fail_path hook (rr_fail_path) is called. Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-round-robin.c60
1 files changed, 48 insertions, 12 deletions
diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-round-robin.c
index 0558ab3d7235..4ace1da17db8 100644
--- a/drivers/md/dm-round-robin.c
+++ b/drivers/md/dm-round-robin.c
@@ -17,7 +17,7 @@
#include <linux/module.h>
#define DM_MSG_PREFIX "multipath round-robin"
-#define RR_MIN_IO 1
+#define RR_MIN_IO 1000
#define RR_VERSION "1.1.0"
/*-----------------------------------------------------------------
@@ -47,19 +47,44 @@ struct selector {
struct list_head valid_paths;
struct list_head invalid_paths;
spinlock_t lock;
+ struct dm_path * __percpu *current_path;
+ struct percpu_counter repeat_count;
};
+static void set_percpu_current_path(struct selector *s, struct dm_path *path)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ *per_cpu_ptr(s->current_path, cpu) = path;
+}
+
static struct selector *alloc_selector(void)
{
struct selector *s = kmalloc(sizeof(*s), GFP_KERNEL);
- if (s) {
- INIT_LIST_HEAD(&s->valid_paths);
- INIT_LIST_HEAD(&s->invalid_paths);
- spin_lock_init(&s->lock);
- }
+ if (!s)
+ return NULL;
+
+ INIT_LIST_HEAD(&s->valid_paths);
+ INIT_LIST_HEAD(&s->invalid_paths);
+ spin_lock_init(&s->lock);
+
+ s->current_path = alloc_percpu(struct dm_path *);
+ if (!s->current_path)
+ goto out_current_path;
+ set_percpu_current_path(s, NULL);
+
+ if (percpu_counter_init(&s->repeat_count, 0, GFP_KERNEL))
+ goto out_repeat_count;
return s;
+
+out_repeat_count:
+ free_percpu(s->current_path);
+out_current_path:
+ kfree(s);
+ return NULL;;
}
static int rr_create(struct path_selector *ps, unsigned argc, char **argv)
@@ -80,6 +105,8 @@ static void rr_destroy(struct path_selector *ps)
free_paths(&s->valid_paths);
free_paths(&s->invalid_paths);
+ free_percpu(s->current_path);
+ percpu_counter_destroy(&s->repeat_count);
kfree(s);
ps->context = NULL;
}
@@ -130,11 +157,6 @@ static int rr_add_path(struct path_selector *ps, struct dm_path *path,
return -EINVAL;
}
- if (repeat_count > 1) {
- DMWARN_LIMIT("repeat_count > 1 is deprecated, using 1 instead");
- repeat_count = 1;
- }
-
/* allocate the path */
pi = kmalloc(sizeof(*pi), GFP_KERNEL);
if (!pi) {
@@ -161,6 +183,9 @@ static void rr_fail_path(struct path_selector *ps, struct dm_path *p)
struct path_info *pi = p->pscontext;
spin_lock_irqsave(&s->lock, flags);
+ if (p == *this_cpu_ptr(s->current_path))
+ set_percpu_current_path(s, NULL);
+
list_move(&pi->list, &s->invalid_paths);
spin_unlock_irqrestore(&s->lock, flags);
}
@@ -183,15 +208,26 @@ static struct dm_path *rr_select_path(struct path_selector *ps, size_t nr_bytes)
unsigned long flags;
struct selector *s = ps->context;
struct path_info *pi = NULL;
+ struct dm_path *current_path = NULL;
+
+ current_path = *this_cpu_ptr(s->current_path);
+ if (current_path) {
+ percpu_counter_dec(&s->repeat_count);
+ if (percpu_counter_read_positive(&s->repeat_count) > 0)
+ return current_path;
+ }
spin_lock_irqsave(&s->lock, flags);
if (!list_empty(&s->valid_paths)) {
pi = list_entry(s->valid_paths.next, struct path_info, list);
list_move_tail(&pi->list, &s->valid_paths);
+ percpu_counter_set(&s->repeat_count, pi->repeat_count);
+ set_percpu_current_path(s, pi->path);
+ current_path = pi->path;
}
spin_unlock_irqrestore(&s->lock, flags);
- return pi ? pi->path : NULL;
+ return current_path;
}
static struct path_selector_type rr_ps = {