summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--block/blk-mq-debugfs.c1
-rw-r--r--block/blk-mq.c10
-rw-r--r--block/blk-stat.c18
-rw-r--r--block/blk-stat.h1
-rw-r--r--block/blk-sysfs.c3
-rw-r--r--include/linux/blkdev.h3
6 files changed, 26 insertions, 10 deletions
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 4f2cf8399f3d..f4022b198580 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -122,7 +122,6 @@ static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(FUA),
QUEUE_FLAG_NAME(DAX),
QUEUE_FLAG_NAME(STATS),
- QUEUE_FLAG_NAME(POLL_STATS),
QUEUE_FLAG_NAME(REGISTERED),
QUEUE_FLAG_NAME(QUIESCED),
QUEUE_FLAG_NAME(PCI_P2PDMA),
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 3af88ffc9e2c..7cd408408a37 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -4581,11 +4581,10 @@ EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
/* Enable polling stats and return whether they were already enabled. */
static bool blk_poll_stats_enable(struct request_queue *q)
{
- if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
- blk_queue_flag_test_and_set(QUEUE_FLAG_POLL_STATS, q))
+ if (q->poll_stat)
return true;
- blk_stat_add_callback(q, q->poll_cb);
- return false;
+
+ return blk_stats_alloc_enable(q);
}
static void blk_mq_poll_stats_start(struct request_queue *q)
@@ -4594,8 +4593,7 @@ static void blk_mq_poll_stats_start(struct request_queue *q)
* We don't arm the callback if polling stats are not enabled or the
* callback is already active.
*/
- if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
- blk_stat_is_active(q->poll_cb))
+ if (!q->poll_stat || blk_stat_is_active(q->poll_cb))
return;
blk_stat_activate_msecs(q->poll_cb, 100);
diff --git a/block/blk-stat.c b/block/blk-stat.c
index ae3dd1fb8e61..efb2a80db906 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -219,3 +219,21 @@ void blk_free_queue_stats(struct blk_queue_stats *stats)
kfree(stats);
}
+
+bool blk_stats_alloc_enable(struct request_queue *q)
+{
+ struct blk_rq_stat *poll_stat;
+
+ poll_stat = kcalloc(BLK_MQ_POLL_STATS_BKTS, sizeof(*poll_stat),
+ GFP_ATOMIC);
+ if (!poll_stat)
+ return false;
+
+ if (cmpxchg(&q->poll_stat, NULL, poll_stat) != NULL) {
+ kfree(poll_stat);
+ return true;
+ }
+
+ blk_stat_add_callback(q, q->poll_cb);
+ return false;
+}
diff --git a/block/blk-stat.h b/block/blk-stat.h
index 17b47a86eefb..58f029af49e5 100644
--- a/block/blk-stat.h
+++ b/block/blk-stat.h
@@ -64,6 +64,7 @@ struct blk_stat_callback {
struct blk_queue_stats *blk_alloc_queue_stats(void);
void blk_free_queue_stats(struct blk_queue_stats *);
+bool blk_stats_alloc_enable(struct request_queue *q);
void blk_stat_add(struct request *rq, u64 now);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index cd75b0f73dc6..c079be1c58a3 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -785,11 +785,12 @@ static void blk_release_queue(struct kobject *kobj)
might_sleep();
- if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
+ if (q->poll_stat)
blk_stat_remove_callback(q, q->poll_cb);
blk_stat_free_callback(q->poll_cb);
blk_free_queue_stats(q->stats);
+ kfree(q->poll_stat);
blk_exit_queue(q);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index bd4370baccca..74118e67f649 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -267,7 +267,7 @@ struct request_queue {
int poll_nsec;
struct blk_stat_callback *poll_cb;
- struct blk_rq_stat poll_stat[BLK_MQ_POLL_STATS_BKTS];
+ struct blk_rq_stat *poll_stat;
struct timer_list timeout;
struct work_struct timeout_work;
@@ -397,7 +397,6 @@ struct request_queue {
#define QUEUE_FLAG_FUA 18 /* device supports FUA writes */
#define QUEUE_FLAG_DAX 19 /* device supports DAX */
#define QUEUE_FLAG_STATS 20 /* track IO start and completion times */
-#define QUEUE_FLAG_POLL_STATS 21 /* collecting stats for hybrid polling */
#define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */
#define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */
#define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */