summaryrefslogtreecommitdiff
path: root/drivers/nvme/host/tcp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme/host/tcp.c')
-rw-r--r--drivers/nvme/host/tcp.c92
1 files changed, 5 insertions, 87 deletions
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index bf0230442d57..260b3554d821 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1802,58 +1802,12 @@ out_free_queues:
return ret;
}
-static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
-{
- unsigned int nr_io_queues;
-
- nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
- nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
- nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus());
-
- return nr_io_queues;
-}
-
-static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
- unsigned int nr_io_queues)
-{
- struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
- struct nvmf_ctrl_options *opts = nctrl->opts;
-
- if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
- /*
- * separate read/write queues
- * hand out dedicated default queues only after we have
- * sufficient read queues.
- */
- ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
- nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
- ctrl->io_queues[HCTX_TYPE_DEFAULT] =
- min(opts->nr_write_queues, nr_io_queues);
- nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
- } else {
- /*
- * shared read/write queues
- * either no write queues were requested, or we don't have
- * sufficient queue count to have dedicated default queues.
- */
- ctrl->io_queues[HCTX_TYPE_DEFAULT] =
- min(opts->nr_io_queues, nr_io_queues);
- nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
- }
-
- if (opts->nr_poll_queues && nr_io_queues) {
- /* map dedicated poll queues only if we have queues left */
- ctrl->io_queues[HCTX_TYPE_POLL] =
- min(opts->nr_poll_queues, nr_io_queues);
- }
-}
-
static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
{
unsigned int nr_io_queues;
int ret;
- nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
+ nr_io_queues = nvmf_nr_io_queues(ctrl->opts);
ret = nvme_set_queue_count(ctrl, &nr_io_queues);
if (ret)
return ret;
@@ -1868,8 +1822,8 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
dev_info(ctrl->device,
"creating %d I/O queues.\n", nr_io_queues);
- nvme_tcp_set_io_queues(ctrl, nr_io_queues);
-
+ nvmf_set_io_queues(ctrl->opts, nr_io_queues,
+ to_tcp_ctrl(ctrl)->io_queues);
return __nvme_tcp_alloc_io_queues(ctrl);
}
@@ -2449,44 +2403,8 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
static void nvme_tcp_map_queues(struct blk_mq_tag_set *set)
{
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data);
- struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
-
- if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
- /* separate read/write queues */
- set->map[HCTX_TYPE_DEFAULT].nr_queues =
- ctrl->io_queues[HCTX_TYPE_DEFAULT];
- set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
- set->map[HCTX_TYPE_READ].nr_queues =
- ctrl->io_queues[HCTX_TYPE_READ];
- set->map[HCTX_TYPE_READ].queue_offset =
- ctrl->io_queues[HCTX_TYPE_DEFAULT];
- } else {
- /* shared read/write queues */
- set->map[HCTX_TYPE_DEFAULT].nr_queues =
- ctrl->io_queues[HCTX_TYPE_DEFAULT];
- set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
- set->map[HCTX_TYPE_READ].nr_queues =
- ctrl->io_queues[HCTX_TYPE_DEFAULT];
- set->map[HCTX_TYPE_READ].queue_offset = 0;
- }
- blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
- blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
-
- if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
- /* map dedicated poll queues only if we have queues left */
- set->map[HCTX_TYPE_POLL].nr_queues =
- ctrl->io_queues[HCTX_TYPE_POLL];
- set->map[HCTX_TYPE_POLL].queue_offset =
- ctrl->io_queues[HCTX_TYPE_DEFAULT] +
- ctrl->io_queues[HCTX_TYPE_READ];
- blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
- }
-
- dev_info(ctrl->ctrl.device,
- "mapped %d/%d/%d default/read/poll queues.\n",
- ctrl->io_queues[HCTX_TYPE_DEFAULT],
- ctrl->io_queues[HCTX_TYPE_READ],
- ctrl->io_queues[HCTX_TYPE_POLL]);
+
+ nvmf_map_queues(set, &ctrl->ctrl, ctrl->io_queues);
}
static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)