summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHannes Reinecke <hare@suse.de>2023-12-08 13:53:20 +0100
committerKeith Busch <kbusch@kernel.org>2024-01-10 13:27:32 -0800
commit07a29b134ce8e47aef15ea71eab8e6b3734a9720 (patch)
tree2e072857ee806bdfd582d92ae29b3e8db391edec
parent06c59d427017fcde3107c236177fcc74c9db7909 (diff)
nvmet-tcp: avoid circular locking dependency on install_queue()
nvmet_tcp_install_queue() is driven from the ->io_work workqueue function, but will call flush_workqueue() which might trigger ->release_work() which in itself calls flush_work on ->io_work. To avoid that check for pending queue in disconnecting status, and return 'controller busy' when we reached a certain threshold. Signed-off-by: Hannes Reinecke <hare@suse.de> Tested-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Keith Busch <kbusch@kernel.org>
-rw-r--r--drivers/nvme/target/tcp.c18
1 files changed, 15 insertions, 3 deletions
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 4dc60cbcb205..6a1e6bb80062 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -25,6 +25,7 @@
#define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
#define NVMET_TCP_MAXH2CDATA 0x400000 /* 16M arbitrary limit */
+#define NVMET_TCP_BACKLOG 128
static int param_store_val(const char *str, int *val, int min, int max)
{
@@ -2067,7 +2068,7 @@ static int nvmet_tcp_add_port(struct nvmet_port *nport)
goto err_sock;
}
- ret = kernel_listen(port->sock, 128);
+ ret = kernel_listen(port->sock, NVMET_TCP_BACKLOG);
if (ret) {
pr_err("failed to listen %d on port sock\n", ret);
goto err_sock;
@@ -2133,8 +2134,19 @@ static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
container_of(sq, struct nvmet_tcp_queue, nvme_sq);
if (sq->qid == 0) {
- /* Let inflight controller teardown complete */
- flush_workqueue(nvmet_wq);
+ struct nvmet_tcp_queue *q;
+ int pending = 0;
+
+ /* Check for pending controller teardown */
+ mutex_lock(&nvmet_tcp_queue_mutex);
+ list_for_each_entry(q, &nvmet_tcp_queue_list, queue_list) {
+ if (q->nvme_sq.ctrl == sq->ctrl &&
+ q->state == NVMET_TCP_Q_DISCONNECTING)
+ pending++;
+ }
+ mutex_unlock(&nvmet_tcp_queue_mutex);
+ if (pending > NVMET_TCP_BACKLOG)
+ return NVME_SC_CONNECT_CTRL_BUSY;
}
queue->nr_cmds = sq->size * 2;