summaryrefslogtreecommitdiff
path: root/lib/sbitmap.c
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2021-01-22 10:33:08 +0800
committerMartin K. Petersen <martin.petersen@oracle.com>2021-03-04 17:36:59 -0500
commitc548e62bcf6adc7066ff201e9ecc88e536dd8890 (patch)
tree526fdd3f62253edc5c9512207fa76655991d7c5c /lib/sbitmap.c
parentbf2c4282a10a92810ba83e85677a5273d6ca0df5 (diff)
scsi: sbitmap: Move allocation hint into sbitmap
Allocation hint should have belonged to sbitmap. Also, when sbitmap's depth is high and there is no need to use mulitple wakeup queues, user can benefit from percpu allocation hint too. Move allocation hint into sbitmap, then SCSI device queue can benefit from allocation hint when converting to plain sbitmap. Convert vhost/scsi.c to use sbitmap allocation with percpu alloc hint. This is more efficient than the previous approach. Link: https://lore.kernel.org/r/20210122023317.687987-5-ming.lei@redhat.com Cc: Omar Sandoval <osandov@fb.com> Cc: Kashyap Desai <kashyap.desai@broadcom.com> Cc: Sumanesh Samanta <sumanesh.samanta@broadcom.com> Cc: Ewan D. Milne <emilne@redhat.com> Cc: Mike Christie <michael.christie@oracle.com> Cc: virtualization@lists.linux-foundation.org Tested-by: Sumanesh Samanta <sumanesh.samanta@broadcom.com> Reviewed-by: Hannes Reinecke <hare@suse.de> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'lib/sbitmap.c')
-rw-r--r--lib/sbitmap.c112
1 files changed, 66 insertions, 46 deletions
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 2b43a6aefec3..e395435654aa 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -9,52 +9,51 @@
#include <linux/sbitmap.h>
#include <linux/seq_file.h>
-static int init_alloc_hint(struct sbitmap_queue *sbq, gfp_t flags)
+static int init_alloc_hint(struct sbitmap *sb, gfp_t flags)
{
- unsigned depth = sbq->sb.depth;
+ unsigned depth = sb->depth;
- sbq->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
- if (!sbq->alloc_hint)
+ sb->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
+ if (!sb->alloc_hint)
return -ENOMEM;
- if (depth && !sbq->sb.round_robin) {
+ if (depth && !sb->round_robin) {
int i;
for_each_possible_cpu(i)
- *per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth;
+ *per_cpu_ptr(sb->alloc_hint, i) = prandom_u32() % depth;
}
-
return 0;
}
-static inline unsigned update_alloc_hint_before_get(struct sbitmap_queue *sbq,
+static inline unsigned update_alloc_hint_before_get(struct sbitmap *sb,
unsigned int depth)
{
unsigned hint;
- hint = this_cpu_read(*sbq->alloc_hint);
+ hint = this_cpu_read(*sb->alloc_hint);
if (unlikely(hint >= depth)) {
hint = depth ? prandom_u32() % depth : 0;
- this_cpu_write(*sbq->alloc_hint, hint);
+ this_cpu_write(*sb->alloc_hint, hint);
}
return hint;
}
-static inline void update_alloc_hint_after_get(struct sbitmap_queue *sbq,
+static inline void update_alloc_hint_after_get(struct sbitmap *sb,
unsigned int depth,
unsigned int hint,
unsigned int nr)
{
if (nr == -1) {
/* If the map is full, a hint won't do us much good. */
- this_cpu_write(*sbq->alloc_hint, 0);
- } else if (nr == hint || unlikely(sbq->sb.round_robin)) {
+ this_cpu_write(*sb->alloc_hint, 0);
+ } else if (nr == hint || unlikely(sb->round_robin)) {
/* Only update the hint if we used it. */
hint = nr + 1;
if (hint >= depth - 1)
hint = 0;
- this_cpu_write(*sbq->alloc_hint, hint);
+ this_cpu_write(*sb->alloc_hint, hint);
}
}
@@ -82,7 +81,8 @@ static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
}
int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
- gfp_t flags, int node, bool round_robin)
+ gfp_t flags, int node, bool round_robin,
+ bool alloc_hint)
{
unsigned int bits_per_word;
unsigned int i;
@@ -114,9 +114,18 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
return 0;
}
+ if (alloc_hint) {
+ if (init_alloc_hint(sb, flags))
+ return -ENOMEM;
+ } else {
+ sb->alloc_hint = NULL;
+ }
+
sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node);
- if (!sb->map)
+ if (!sb->map) {
+ free_percpu(sb->alloc_hint);
return -ENOMEM;
+ }
for (i = 0; i < sb->map_nr; i++) {
sb->map[i].depth = min(depth, bits_per_word);
@@ -196,7 +205,7 @@ static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
return nr;
}
-int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint)
+static int __sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint)
{
unsigned int i, index;
int nr = -1;
@@ -228,10 +237,27 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint)
return nr;
}
+
+int sbitmap_get(struct sbitmap *sb)
+{
+ int nr;
+ unsigned int hint, depth;
+
+ if (WARN_ON_ONCE(unlikely(!sb->alloc_hint)))
+ return -1;
+
+ depth = READ_ONCE(sb->depth);
+ hint = update_alloc_hint_before_get(sb, depth);
+ nr = __sbitmap_get(sb, hint);
+ update_alloc_hint_after_get(sb, depth, hint, nr);
+
+ return nr;
+}
EXPORT_SYMBOL_GPL(sbitmap_get);
-int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
- unsigned long shallow_depth)
+static int __sbitmap_get_shallow(struct sbitmap *sb,
+ unsigned int alloc_hint,
+ unsigned long shallow_depth)
{
unsigned int i, index;
int nr = -1;
@@ -263,6 +289,22 @@ again:
return nr;
}
+
+int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth)
+{
+ int nr;
+ unsigned int hint, depth;
+
+ if (WARN_ON_ONCE(unlikely(!sb->alloc_hint)))
+ return -1;
+
+ depth = READ_ONCE(sb->depth);
+ hint = update_alloc_hint_before_get(sb, depth);
+ nr = __sbitmap_get_shallow(sb, hint, shallow_depth);
+ update_alloc_hint_after_get(sb, depth, hint, nr);
+
+ return nr;
+}
EXPORT_SYMBOL_GPL(sbitmap_get_shallow);
bool sbitmap_any_bit_set(const struct sbitmap *sb)
@@ -400,15 +442,10 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
int i;
ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node,
- round_robin);
+ round_robin, true);
if (ret)
return ret;
- if (init_alloc_hint(sbq, flags) != 0) {
- sbitmap_free(&sbq->sb);
- return -ENOMEM;
- }
-
sbq->min_shallow_depth = UINT_MAX;
sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
atomic_set(&sbq->wake_index, 0);
@@ -416,7 +453,6 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
if (!sbq->ws) {
- free_percpu(sbq->alloc_hint);
sbitmap_free(&sbq->sb);
return -ENOMEM;
}
@@ -458,32 +494,16 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
int __sbitmap_queue_get(struct sbitmap_queue *sbq)
{
- unsigned int hint, depth;
- int nr;
-
- depth = READ_ONCE(sbq->sb.depth);
- hint = update_alloc_hint_before_get(sbq, depth);
- nr = sbitmap_get(&sbq->sb, hint);
- update_alloc_hint_after_get(sbq, depth, hint, nr);
-
- return nr;
+ return sbitmap_get(&sbq->sb);
}
EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
unsigned int shallow_depth)
{
- unsigned int hint, depth;
- int nr;
-
WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth);
- depth = READ_ONCE(sbq->sb.depth);
- hint = update_alloc_hint_before_get(sbq, depth);
- nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth);
- update_alloc_hint_after_get(sbq, depth, hint, nr);
-
- return nr;
+ return sbitmap_get_shallow(&sbq->sb, shallow_depth);
}
EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow);
@@ -592,7 +612,7 @@ void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
sbitmap_queue_wake_up(sbq);
if (likely(!sbq->sb.round_robin && nr < sbq->sb.depth))
- *per_cpu_ptr(sbq->alloc_hint, cpu) = nr;
+ *per_cpu_ptr(sbq->sb.alloc_hint, cpu) = nr;
}
EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
@@ -630,7 +650,7 @@ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
if (!first)
seq_puts(m, ", ");
first = false;
- seq_printf(m, "%u", *per_cpu_ptr(sbq->alloc_hint, i));
+ seq_printf(m, "%u", *per_cpu_ptr(sbq->sb.alloc_hint, i));
}
seq_puts(m, "}\n");