From 8832cf922151e9dfa2821736beb0ae2dd3968b6e Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Mon, 21 Mar 2022 13:57:27 +0200 Subject: nvmet: use a private workqueue instead of the system workqueue Any attempt to flush kernel-global WQs has possibility of deadlock so we should simply stop using them, instead introduce nvmet_wq which is the generic nvmet workqueue for work elements that don't explicitly require a dedicated workqueue (by the mere fact that they are using the system_wq). Changes were done using the following replaces: - s/schedule_work(/queue_work(nvmet_wq, /g - s/schedule_delayed_work(/queue_delayed_work(nvmet_wq, /g - s/flush_scheduled_work()/flush_workqueue(nvmet_wq)/g Reported-by: Tetsuo Handa Signed-off-by: Sagi Grimberg Reviewed-by: Chaitanya Kulkarni Signed-off-by: Christoph Hellwig --- drivers/nvme/target/tcp.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/nvme/target/tcp.c') diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 83ca577f72be..2793554e622e 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -1269,7 +1269,7 @@ static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue) spin_lock(&queue->state_lock); if (queue->state != NVMET_TCP_Q_DISCONNECTING) { queue->state = NVMET_TCP_Q_DISCONNECTING; - schedule_work(&queue->release_work); + queue_work(nvmet_wq, &queue->release_work); } spin_unlock(&queue->state_lock); } @@ -1684,7 +1684,7 @@ static void nvmet_tcp_listen_data_ready(struct sock *sk) goto out; if (sk->sk_state == TCP_LISTEN) - schedule_work(&port->accept_work); + queue_work(nvmet_wq, &port->accept_work); out: read_unlock_bh(&sk->sk_callback_lock); } @@ -1815,7 +1815,7 @@ static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq) if (sq->qid == 0) { /* Let inflight controller teardown complete */ - flush_scheduled_work(); + flush_workqueue(nvmet_wq); } queue->nr_cmds = sq->size * 2; @@ -1876,12 +1876,12 @@ static void __exit nvmet_tcp_exit(void) nvmet_unregister_transport(&nvmet_tcp_ops); - flush_scheduled_work(); + flush_workqueue(nvmet_wq); mutex_lock(&nvmet_tcp_queue_mutex); list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) kernel_sock_shutdown(queue->sock, SHUT_RDWR); mutex_unlock(&nvmet_tcp_queue_mutex); - flush_scheduled_work(); + flush_workqueue(nvmet_wq); destroy_workqueue(nvmet_tcp_wq); } -- cgit