summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-12-16 12:57:51 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2020-12-16 12:57:51 -0800
commitac7ac4618cf25e0d5cd8eba83d5f600084b65b9a (patch)
treee5d28907ff72690a0463a2238b96202d751a535c /lib
parent48aba79bcf6ea05148dc82ad9c40713960b00396 (diff)
parentfa94ba8a7b22890e6a17b39b9359e114fe18cd59 (diff)
Merge tag 'for-5.11/block-2020-12-14' of git://git.kernel.dk/linux-block
Pull block updates from Jens Axboe: "Another series of killing more code than what is being added, again thanks to Christoph's relentless cleanups and tech debt tackling. This contains: - blk-iocost improvements (Baolin Wang) - part0 iostat fix (Jeffle Xu) - Disable iopoll for split bios (Jeffle Xu) - block tracepoint cleanups (Christoph Hellwig) - Merging of struct block_device and hd_struct (Christoph Hellwig) - Rework/cleanup of how block device sizes are updated (Christoph Hellwig) - Simplification of gendisk lookup and removal of block device aliasing (Christoph Hellwig) - Block device ioctl cleanups (Christoph Hellwig) - Removal of bdget()/blkdev_get() as exported API (Christoph Hellwig) - Disk change rework, avoid ->revalidate_disk() (Christoph Hellwig) - sbitmap improvements (Pavel Begunkov) - Hybrid polling fix (Pavel Begunkov) - bvec iteration improvements (Pavel Begunkov) - Zone revalidation fixes (Damien Le Moal) - blk-throttle limit fix (Yu Kuai) - Various little fixes" * tag 'for-5.11/block-2020-12-14' of git://git.kernel.dk/linux-block: (126 commits) blk-mq: fix msec comment from micro to milli seconds blk-mq: update arg in comment of blk_mq_map_queue blk-mq: add helper allocating tagset->tags Revert "block: Fix a lockdep complaint triggered by request queue flushing" nvme-loop: use blk_mq_hctx_set_fq_lock_class to set loop's lock class blk-mq: add new API of blk_mq_hctx_set_fq_lock_class block: disable iopoll for split bio block: Improve blk_revalidate_disk_zones() checks sbitmap: simplify wrap check sbitmap: replace CAS with atomic and sbitmap: remove swap_lock sbitmap: optimise sbitmap_deferred_clear() blk-mq: skip hybrid polling if iopoll doesn't spin blk-iocost: Factor out the base vrate change into a separate function blk-iocost: Factor out the active iocgs' state check into a separate function blk-iocost: Move the usage ratio calculation to the correct place blk-iocost: Remove unnecessary advance declaration blk-iocost: Fix some typos in comments blktrace: fix up a kerneldoc comment block: remove the request_queue to argument request based tracepoints ...
Diffstat (limited to 'lib')
-rw-r--r--lib/sbitmap.c44
1 files changed, 18 insertions, 26 deletions
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 267aa7709416..d693d9213ceb 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -12,33 +12,24 @@
/*
* See if we have deferred clears that we can batch move
*/
-static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index)
+static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
{
- unsigned long mask, val;
- bool ret = false;
- unsigned long flags;
+ unsigned long mask;
- spin_lock_irqsave(&sb->map[index].swap_lock, flags);
-
- if (!sb->map[index].cleared)
- goto out_unlock;
+ if (!READ_ONCE(map->cleared))
+ return false;
/*
* First get a stable cleared mask, setting the old mask to 0.
*/
- mask = xchg(&sb->map[index].cleared, 0);
+ mask = xchg(&map->cleared, 0);
/*
* Now clear the masked bits in our free word
*/
- do {
- val = sb->map[index].word;
- } while (cmpxchg(&sb->map[index].word, val, val & ~mask) != val);
-
- ret = true;
-out_unlock:
- spin_unlock_irqrestore(&sb->map[index].swap_lock, flags);
- return ret;
+ atomic_long_andnot(mask, (atomic_long_t *)&map->word);
+ BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(map->word));
+ return true;
}
int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
@@ -80,7 +71,6 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
for (i = 0; i < sb->map_nr; i++) {
sb->map[i].depth = min(depth, bits_per_word);
depth -= sb->map[i].depth;
- spin_lock_init(&sb->map[i].swap_lock);
}
return 0;
}
@@ -92,7 +82,7 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
unsigned int i;
for (i = 0; i < sb->map_nr; i++)
- sbitmap_deferred_clear(sb, i);
+ sbitmap_deferred_clear(&sb->map[i]);
sb->depth = depth;
sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
@@ -107,9 +97,11 @@ EXPORT_SYMBOL_GPL(sbitmap_resize);
static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
unsigned int hint, bool wrap)
{
- unsigned int orig_hint = hint;
int nr;
+ /* don't wrap if starting from 0 */
+ wrap = wrap && hint;
+
while (1) {
nr = find_next_zero_bit(word, depth, hint);
if (unlikely(nr >= depth)) {
@@ -118,8 +110,8 @@ static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
* offset to 0 in a failure case, so start from 0 to
* exhaust the map.
*/
- if (orig_hint && hint && wrap) {
- hint = orig_hint = 0;
+ if (hint && wrap) {
+ hint = 0;
continue;
}
return -1;
@@ -139,15 +131,15 @@ static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
unsigned int alloc_hint, bool round_robin)
{
+ struct sbitmap_word *map = &sb->map[index];
int nr;
do {
- nr = __sbitmap_get_word(&sb->map[index].word,
- sb->map[index].depth, alloc_hint,
+ nr = __sbitmap_get_word(&map->word, map->depth, alloc_hint,
!round_robin);
if (nr != -1)
break;
- if (!sbitmap_deferred_clear(sb, index))
+ if (!sbitmap_deferred_clear(map))
break;
} while (1);
@@ -207,7 +199,7 @@ again:
break;
}
- if (sbitmap_deferred_clear(sb, index))
+ if (sbitmap_deferred_clear(&sb->map[index]))
goto again;
/* Jump to next index. */