summaryrefslogtreecommitdiff
path: root/block/blk-mq-cpumap.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-mq-cpumap.c')
-rw-r--r--block/blk-mq-cpumap.c126
1 files changed, 82 insertions, 44 deletions
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index 9c2fce1a7b50..705da074ad6c 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -10,66 +10,69 @@
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/cpu.h>
+#include <linux/group_cpus.h>
+#include <linux/device/bus.h>
+#include <linux/sched/isolation.h>
-#include <linux/blk-mq.h>
#include "blk.h"
#include "blk-mq.h"
-static int queue_index(struct blk_mq_queue_map *qmap,
- unsigned int nr_queues, const int q)
+static unsigned int blk_mq_num_queues(const struct cpumask *mask,
+ unsigned int max_queues)
{
- return qmap->queue_offset + (q % nr_queues);
+ unsigned int num;
+
+ num = cpumask_weight(mask);
+ return min_not_zero(num, max_queues);
}
-static int get_first_sibling(unsigned int cpu)
+/**
+ * blk_mq_num_possible_queues - Calc nr of queues for multiqueue devices
+ * @max_queues: The maximum number of queues the hardware/driver
+ * supports. If max_queues is 0, the argument is
+ * ignored.
+ *
+ * Calculates the number of queues to be used for a multiqueue
+ * device based on the number of possible CPUs.
+ */
+unsigned int blk_mq_num_possible_queues(unsigned int max_queues)
{
- unsigned int ret;
-
- ret = cpumask_first(topology_sibling_cpumask(cpu));
- if (ret < nr_cpu_ids)
- return ret;
+ return blk_mq_num_queues(cpu_possible_mask, max_queues);
+}
+EXPORT_SYMBOL_GPL(blk_mq_num_possible_queues);
- return cpu;
+/**
+ * blk_mq_num_online_queues - Calc nr of queues for multiqueue devices
+ * @max_queues: The maximum number of queues the hardware/driver
+ * supports. If max_queues is 0, the argument is
+ * ignored.
+ *
+ * Calculates the number of queues to be used for a multiqueue
+ * device based on the number of online CPUs.
+ */
+unsigned int blk_mq_num_online_queues(unsigned int max_queues)
+{
+ return blk_mq_num_queues(cpu_online_mask, max_queues);
}
+EXPORT_SYMBOL_GPL(blk_mq_num_online_queues);
void blk_mq_map_queues(struct blk_mq_queue_map *qmap)
{
- unsigned int *map = qmap->mq_map;
- unsigned int nr_queues = qmap->nr_queues;
- unsigned int cpu, first_sibling, q = 0;
-
- for_each_possible_cpu(cpu)
- map[cpu] = -1;
-
- /*
- * Spread queues among present CPUs first for minimizing
- * count of dead queues which are mapped by all un-present CPUs
- */
- for_each_present_cpu(cpu) {
- if (q >= nr_queues)
- break;
- map[cpu] = queue_index(qmap, nr_queues, q++);
+ const struct cpumask *masks;
+ unsigned int queue, cpu, nr_masks;
+
+ masks = group_cpus_evenly(qmap->nr_queues, &nr_masks);
+ if (!masks) {
+ for_each_possible_cpu(cpu)
+ qmap->mq_map[cpu] = qmap->queue_offset;
+ return;
}
- for_each_possible_cpu(cpu) {
- if (map[cpu] != -1)
- continue;
- /*
- * First do sequential mapping between CPUs and queues.
- * In case we still have CPUs to map, and we have some number of
- * threads per cores then map sibling threads to the same queue
- * for performance optimizations.
- */
- if (q < nr_queues) {
- map[cpu] = queue_index(qmap, nr_queues, q++);
- } else {
- first_sibling = get_first_sibling(cpu);
- if (first_sibling == cpu)
- map[cpu] = queue_index(qmap, nr_queues, q++);
- else
- map[cpu] = map[first_sibling];
- }
+ for (queue = 0; queue < qmap->nr_queues; queue++) {
+ for_each_cpu(cpu, &masks[queue % nr_masks])
+ qmap->mq_map[cpu] = qmap->queue_offset + queue;
}
+ kfree(masks);
}
EXPORT_SYMBOL_GPL(blk_mq_map_queues);
@@ -92,3 +95,38 @@ int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int index)
return NUMA_NO_NODE;
}
+
+/**
+ * blk_mq_map_hw_queues - Create CPU to hardware queue mapping
+ * @qmap: CPU to hardware queue map
+ * @dev: The device to map queues
+ * @offset: Queue offset to use for the device
+ *
+ * Create a CPU to hardware queue mapping in @qmap. The struct bus_type
+ * irq_get_affinity callback will be used to retrieve the affinity.
+ */
+void blk_mq_map_hw_queues(struct blk_mq_queue_map *qmap,
+ struct device *dev, unsigned int offset)
+
+{
+ const struct cpumask *mask;
+ unsigned int queue, cpu;
+
+ if (!dev->bus->irq_get_affinity)
+ goto fallback;
+
+ for (queue = 0; queue < qmap->nr_queues; queue++) {
+ mask = dev->bus->irq_get_affinity(dev, queue + offset);
+ if (!mask)
+ goto fallback;
+
+ for_each_cpu(cpu, mask)
+ qmap->mq_map[cpu] = qmap->queue_offset + queue;
+ }
+
+ return;
+
+fallback:
+ blk_mq_map_queues(qmap);
+}
+EXPORT_SYMBOL_GPL(blk_mq_map_hw_queues);