summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-11-29 12:35:16 -0700
committerJens Axboe <axboe@kernel.dk>2018-11-29 13:58:34 -0700
commit27fae429acee1e9418059e7fa545438075af5256 (patch)
tree9d1258df6416f59265bfca42b13c46576963bb9b /lib
parentb2c5d16b72df1116f05c9be16a630ac939d34101 (diff)
sbitmap: don't loop for find_next_zero_bit() for !round_robin
If we aren't forced to do round robin tag allocation, just use the allocation hint to find the index for the tag word, don't use it for the offset inside the word. This avoids a potential extra round trip in the bit looping, and since we're fetching this cacheline, we may as well check the whole word from the start. Reviewed-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'lib')
-rw-r--r--lib/sbitmap.c21
1 files changed, 13 insertions, 8 deletions
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index fdd1b8aa8ac6..45cab6bbc1c7 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -118,10 +118,19 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
index = SB_NR_TO_INDEX(sb, alloc_hint);
+ /*
+ * Unless we're doing round robin tag allocation, just use the
+ * alloc_hint to find the right word index. No point in looping
+ * twice in find_next_zero_bit() for that case.
+ */
+ if (round_robin)
+ alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
+ else
+ alloc_hint = 0;
+
for (i = 0; i < sb->map_nr; i++) {
nr = __sbitmap_get_word(&sb->map[index].word,
- sb->map[index].depth,
- SB_NR_TO_BIT(sb, alloc_hint),
+ sb->map[index].depth, alloc_hint,
!round_robin);
if (nr != -1) {
nr += index << sb->shift;
@@ -129,13 +138,9 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
}
/* Jump to next index. */
- index++;
- alloc_hint = index << sb->shift;
-
- if (index >= sb->map_nr) {
+ alloc_hint = 0;
+ if (++index >= sb->map_nr)
index = 0;
- alloc_hint = 0;
- }
}
return nr;