summaryrefslogtreecommitdiff
path: root/lib/sbitmap.c
diff options
context:
space:
mode:
authorOmar Sandoval <osandov@fb.com>2016-09-17 01:28:26 -0700
committerJens Axboe <axboe@fb.com>2016-09-17 08:39:16 -0600
commit05fd095d53b979878f016c3a7080d3683cc89d72 (patch)
tree93751c669d2cba783d4762127efa949ad4bda5f9 /lib/sbitmap.c
parent98d95416dbfaf4910caadfb4ddc75e4aacbdff8c (diff)
sbitmap: re-initialize allocation hints after resize
After a struct sbitmap_queue is resized smaller, the allocation hints may still be set to bits beyond the new depth of the bitmap. This means that, for example, if the number of blk-mq tags is reduced through sysfs, more requests than the nominal queue depth may be in flight. It's tempting to fix this at resize time by doing a one-time reinitialization of the hints, but this can race with __sbitmap_queue_get() updating the hint. Instead, check the hint before we use it. This caused no measurable performance difference in my synthetic benchmarks. Signed-off-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'lib/sbitmap.c')
-rw-r--r--lib/sbitmap.c9
1 files changed, 7 insertions, 2 deletions
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 928b82a733f2..f736c52a712c 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -246,10 +246,15 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
int __sbitmap_queue_get(struct sbitmap_queue *sbq)
{
- unsigned int hint;
+ unsigned int hint, depth;
int nr;
hint = this_cpu_read(*sbq->alloc_hint);
+ depth = READ_ONCE(sbq->sb.depth);
+ if (unlikely(hint >= depth)) {
+ hint = depth ? prandom_u32() % depth : 0;
+ this_cpu_write(*sbq->alloc_hint, hint);
+ }
nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin);
if (nr == -1) {
@@ -258,7 +263,7 @@ int __sbitmap_queue_get(struct sbitmap_queue *sbq)
} else if (nr == hint || unlikely(sbq->round_robin)) {
/* Only update the hint if we used it. */
hint = nr + 1;
- if (hint >= sbq->sb.depth - 1)
+ if (hint >= depth - 1)
hint = 0;
this_cpu_write(*sbq->alloc_hint, hint);
}