summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2022-09-04 06:39:25 -0600
committerJens Axboe <axboe@kernel.dk>2022-09-04 06:39:25 -0600
commitbce1b56c73826fec8caf6187f0c922ede397a5a8 (patch)
treeef141b92117fd0bc0d23b891268b44fd7f230381
parent12c5b70c1897288ee6c841b5cc3ff4d27d511bd1 (diff)
Revert "sbitmap: fix batched wait_cnt accounting"
This reverts commit 16ede66973c84f890c03584f79158dd5b2d725f5. This is causing issues with CPU stalls on my test box, revert it for now until we understand what is going on. It looks like infinite looping off sbitmap_queue_wake_up(), but hard to tell with a lot of CPUs hitting this issue and the console scrolling infinitely. Link: https://lore.kernel.org/linux-block/e742813b-ce5c-0d58-205b-1626f639b1bd@kernel.dk/ Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-mq-tag.c2
-rw-r--r--include/linux/sbitmap.h3
-rw-r--r--lib/sbitmap.c31
3 files changed, 16 insertions, 20 deletions
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 9eb968e14d31..8e3b36d1cb57 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -196,7 +196,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
* other allocations on previous queue won't be starved.
*/
if (bt != bt_prev)
- sbitmap_queue_wake_up(bt_prev, 1);
+ sbitmap_queue_wake_up(bt_prev);
ws = bt_wait_ptr(bt, data->hctx);
} while (1);
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index 4d2d5205ab58..8f5a86e210b9 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -575,9 +575,8 @@ void sbitmap_queue_wake_all(struct sbitmap_queue *sbq);
* sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue
* on a &struct sbitmap_queue.
* @sbq: Bitmap queue to wake up.
- * @nr: Number of bits cleared.
*/
-void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr);
+void sbitmap_queue_wake_up(struct sbitmap_queue *sbq);
/**
* sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 2fedf07a9db5..a39b1a877366 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -599,38 +599,34 @@ static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
return NULL;
}
-static bool __sbq_wake_up(struct sbitmap_queue *sbq, int nr)
+static bool __sbq_wake_up(struct sbitmap_queue *sbq)
{
struct sbq_wait_state *ws;
- int wake_batch, wait_cnt, cur;
+ unsigned int wake_batch;
+ int wait_cnt;
ws = sbq_wake_ptr(sbq);
- if (!ws || !nr)
+ if (!ws)
return false;
- wake_batch = READ_ONCE(sbq->wake_batch);
- cur = atomic_read(&ws->wait_cnt);
- do {
- if (cur <= 0)
- return true;
- wait_cnt = cur - nr;
- } while (!atomic_try_cmpxchg(&ws->wait_cnt, &cur, wait_cnt));
-
+ wait_cnt = atomic_dec_return(&ws->wait_cnt);
/*
* For concurrent callers of this, callers should call this function
* again to wakeup a new batch on a different 'ws'.
*/
- if (!waitqueue_active(&ws->wait))
+ if (wait_cnt < 0 || !waitqueue_active(&ws->wait))
return true;
if (wait_cnt > 0)
return false;
+ wake_batch = READ_ONCE(sbq->wake_batch);
+
/*
* Wake up first in case that concurrent callers decrease wait_cnt
* while waitqueue is empty.
*/
- wake_up_nr(&ws->wait, max(wake_batch, nr));
+ wake_up_nr(&ws->wait, wake_batch);
/*
* Pairs with the memory barrier in sbitmap_queue_resize() to
@@ -655,11 +651,12 @@ static bool __sbq_wake_up(struct sbitmap_queue *sbq, int nr)
return false;
}
-void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
+void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)
{
- while (__sbq_wake_up(sbq, nr))
+ while (__sbq_wake_up(sbq))
;
}
+EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
static inline void sbitmap_update_cpu_hint(struct sbitmap *sb, int cpu, int tag)
{
@@ -696,7 +693,7 @@ void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
atomic_long_andnot(mask, (atomic_long_t *) addr);
smp_mb__after_atomic();
- sbitmap_queue_wake_up(sbq, nr_tags);
+ sbitmap_queue_wake_up(sbq);
sbitmap_update_cpu_hint(&sbq->sb, raw_smp_processor_id(),
tags[nr_tags - 1] - offset);
}
@@ -724,7 +721,7 @@ void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
* waiter. See the comment on waitqueue_active().
*/
smp_mb__after_atomic();
- sbitmap_queue_wake_up(sbq, 1);
+ sbitmap_queue_wake_up(sbq);
sbitmap_update_cpu_hint(&sbq->sb, cpu, nr);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_clear);