diff options
Diffstat (limited to 'drivers/nvme/target/fc.c')
| -rw-r--r-- | drivers/nvme/target/fc.c | 1179 |
1 files changed, 790 insertions, 389 deletions
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index f98f5c5bea26..0d9784004c9b 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -1,18 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2016 Avago Technologies. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful. - * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, - * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A - * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO - * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID. - * See the GNU General Public License for more details, a copy of which - * can be found in the file COPYING included with this package - * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> @@ -26,6 +14,7 @@ #include "nvmet.h" #include <linux/nvme-fc-driver.h> #include <linux/nvme-fc.h> +#include "../host/fc.h" /* *************************** Data Structures/Defines ****************** */ @@ -33,23 +22,21 @@ #define NVMET_LS_CTX_COUNT 256 -/* for this implementation, assume small single frame rqst/rsp */ -#define NVME_FC_MAX_LS_BUFFER_SIZE 2048 - struct nvmet_fc_tgtport; struct nvmet_fc_tgt_assoc; -struct nvmet_fc_ls_iod { - struct nvmefc_tgt_ls_req *lsreq; +struct nvmet_fc_ls_iod { /* for an LS RQST RCV */ + struct nvmefc_ls_rsp *lsrsp; struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */ - struct list_head ls_list; /* tgtport->ls_list */ + struct list_head ls_rcv_list; /* tgtport->ls_rcv_list */ struct nvmet_fc_tgtport *tgtport; struct nvmet_fc_tgt_assoc *assoc; + void *hosthandle; - u8 *rqstbuf; - u8 *rspbuf; + union nvmefc_ls_requests *rqstbuf; + union nvmefc_ls_responses *rspbuf; u16 rqstdatalen; dma_addr_t rspdma; @@ -58,6 +45,20 @@ struct nvmet_fc_ls_iod { struct work_struct work; } __aligned(sizeof(unsigned long long)); +struct nvmet_fc_ls_req_op { /* for an LS RQST XMT */ + struct nvmefc_ls_req ls_req; + + struct nvmet_fc_tgtport *tgtport; + void *hosthandle; + + int ls_error; + struct list_head lsreq_list; /* tgtport->ls_req_list */ + bool req_queued; + + struct work_struct put_work; +}; + + /* desired maximum for a single sequence - if sg list allows it */ #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024) @@ -95,7 +96,6 @@ struct nvmet_fc_fcp_iod { }; struct nvmet_fc_tgtport { - struct nvmet_fc_target_port fc_target_port; struct list_head tgt_list; /* nvmet_fc_target_list */ @@ -104,9 +104,11 @@ struct nvmet_fc_tgtport { struct nvmet_fc_ls_iod *iod; spinlock_t lock; - struct list_head ls_list; + struct list_head ls_rcv_list; + struct list_head ls_req_list; struct list_head ls_busylist; struct list_head assoc_list; + struct list_head host_list; struct ida assoc_cnt; struct nvmet_fc_port_entry *pe; struct kref ref; @@ -140,38 +142,36 @@ struct nvmet_fc_tgt_queue { struct nvmet_cq nvme_cq; struct nvmet_sq nvme_sq; struct nvmet_fc_tgt_assoc *assoc; - struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */ struct list_head fod_list; struct list_head pending_cmd_list; struct list_head avail_defer_list; struct workqueue_struct *work_q; struct kref ref; + /* array of fcp_iods */ + struct nvmet_fc_fcp_iod fod[] /* __counted_by(sqsize) */; } __aligned(sizeof(unsigned long long)); +struct nvmet_fc_hostport { + struct nvmet_fc_tgtport *tgtport; + void *hosthandle; + struct list_head host_list; + struct kref ref; + u8 invalid; +}; + struct nvmet_fc_tgt_assoc { u64 association_id; u32 a_id; + atomic_t terminating; struct nvmet_fc_tgtport *tgtport; + struct nvmet_fc_hostport *hostport; + struct nvmet_fc_ls_iod *rcv_disconn; struct list_head a_list; - struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1]; + struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1]; struct kref ref; struct work_struct del_work; }; - -static inline int -nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr) -{ - return (iodptr - iodptr->tgtport->iod); -} - -static inline int -nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr) -{ - return (fodptr - fodptr->queue->fod); -} - - /* * Association and Connection IDs: * @@ -235,10 +235,20 @@ static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc); static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue); static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue); static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport); +static void nvmet_fc_put_lsop_work(struct work_struct *work) +{ + struct nvmet_fc_ls_req_op *lsop = + container_of(work, struct nvmet_fc_ls_req_op, put_work); + + nvmet_fc_tgtport_put(lsop->tgtport); + kfree(lsop); +} static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport); static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_fcp_iod *fod); static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc); +static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport, + struct nvmet_fc_ls_iod *iod); /* *********************** FC-NVME DMA Handling **************************** */ @@ -330,6 +340,185 @@ fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, } +/* ********************** FC-NVME LS XMT Handling ************************* */ + + +static void +__nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop) +{ + struct nvmet_fc_tgtport *tgtport = lsop->tgtport; + struct nvmefc_ls_req *lsreq = &lsop->ls_req; + unsigned long flags; + + spin_lock_irqsave(&tgtport->lock, flags); + + if (!lsop->req_queued) { + spin_unlock_irqrestore(&tgtport->lock, flags); + goto out_putwork; + } + + list_del(&lsop->lsreq_list); + + lsop->req_queued = false; + + spin_unlock_irqrestore(&tgtport->lock, flags); + + fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, + (lsreq->rqstlen + lsreq->rsplen), + DMA_BIDIRECTIONAL); + +out_putwork: + queue_work(nvmet_wq, &lsop->put_work); +} + +static int +__nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport, + struct nvmet_fc_ls_req_op *lsop, + void (*done)(struct nvmefc_ls_req *req, int status)) +{ + struct nvmefc_ls_req *lsreq = &lsop->ls_req; + unsigned long flags; + int ret = 0; + + if (!tgtport->ops->ls_req) + return -EOPNOTSUPP; + + if (!nvmet_fc_tgtport_get(tgtport)) + return -ESHUTDOWN; + + lsreq->done = done; + lsop->req_queued = false; + INIT_LIST_HEAD(&lsop->lsreq_list); + INIT_WORK(&lsop->put_work, nvmet_fc_put_lsop_work); + + lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr, + lsreq->rqstlen + lsreq->rsplen, + DMA_BIDIRECTIONAL); + if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma)) { + ret = -EFAULT; + goto out_puttgtport; + } + lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen; + + spin_lock_irqsave(&tgtport->lock, flags); + + list_add_tail(&lsop->lsreq_list, &tgtport->ls_req_list); + + lsop->req_queued = true; + + spin_unlock_irqrestore(&tgtport->lock, flags); + + ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle, + lsreq); + if (ret) + goto out_unlink; + + return 0; + +out_unlink: + lsop->ls_error = ret; + spin_lock_irqsave(&tgtport->lock, flags); + lsop->req_queued = false; + list_del(&lsop->lsreq_list); + spin_unlock_irqrestore(&tgtport->lock, flags); + fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, + (lsreq->rqstlen + lsreq->rsplen), + DMA_BIDIRECTIONAL); +out_puttgtport: + nvmet_fc_tgtport_put(tgtport); + + return ret; +} + +static int +nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport *tgtport, + struct nvmet_fc_ls_req_op *lsop, + void (*done)(struct nvmefc_ls_req *req, int status)) +{ + /* don't wait for completion */ + + return __nvmet_fc_send_ls_req(tgtport, lsop, done); +} + +static void +nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) +{ + struct nvmet_fc_ls_req_op *lsop = + container_of(lsreq, struct nvmet_fc_ls_req_op, ls_req); + + __nvmet_fc_finish_ls_req(lsop); + + /* fc-nvme target doesn't care about success or failure of cmd */ +} + +/* + * This routine sends a FC-NVME LS to disconnect (aka terminate) + * the FC-NVME Association. Terminating the association also + * terminates the FC-NVME connections (per queue, both admin and io + * queues) that are part of the association. E.g. things are torn + * down, and the related FC-NVME Association ID and Connection IDs + * become invalid. + * + * The behavior of the fc-nvme target is such that its + * understanding of the association and connections will implicitly + * be torn down. The action is implicit as it may be due to a loss of + * connectivity with the fc-nvme host, so the target may never get a + * response even if it tried. As such, the action of this routine + * is to asynchronously send the LS, ignore any results of the LS, and + * continue on with terminating the association. If the fc-nvme host + * is present and receives the LS, it too can tear down. + */ +static void +nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc) +{ + struct nvmet_fc_tgtport *tgtport = assoc->tgtport; + struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst; + struct fcnvme_ls_disconnect_assoc_acc *discon_acc; + struct nvmet_fc_ls_req_op *lsop; + struct nvmefc_ls_req *lsreq; + int ret; + + /* + * If ls_req is NULL or no hosthandle, it's an older lldd and no + * message is normal. Otherwise, send unless the hostport has + * already been invalidated by the lldd. + */ + if (!tgtport->ops->ls_req || assoc->hostport->invalid) + return; + + lsop = kzalloc((sizeof(*lsop) + + sizeof(*discon_rqst) + sizeof(*discon_acc) + + tgtport->ops->lsrqst_priv_sz), GFP_KERNEL); + if (!lsop) { + pr_info("{%d:%d}: send Disconnect Association failed: ENOMEM\n", + tgtport->fc_target_port.port_num, assoc->a_id); + return; + } + + discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1]; + discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1]; + lsreq = &lsop->ls_req; + if (tgtport->ops->lsrqst_priv_sz) + lsreq->private = (void *)&discon_acc[1]; + else + lsreq->private = NULL; + + lsop->tgtport = tgtport; + lsop->hosthandle = assoc->hostport->hosthandle; + + nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc, + assoc->association_id); + + ret = nvmet_fc_send_ls_req_async(tgtport, lsop, + nvmet_fc_disconnect_assoc_done); + if (ret) { + pr_info("{%d:%d}: XMT Disconnect Association failed: %d\n", + tgtport->fc_target_port.port_num, assoc->a_id, ret); + kfree(lsop); + } +} + + /* *********************** FC-NVME Port Management ************************ */ @@ -349,17 +538,18 @@ nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport) for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work); iod->tgtport = tgtport; - list_add_tail(&iod->ls_list, &tgtport->ls_list); + list_add_tail(&iod->ls_rcv_list, &tgtport->ls_rcv_list); - iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE, - GFP_KERNEL); + iod->rqstbuf = kzalloc(sizeof(union nvmefc_ls_requests) + + sizeof(union nvmefc_ls_responses), + GFP_KERNEL); if (!iod->rqstbuf) goto out_fail; - iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE; + iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1]; iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf, - NVME_FC_MAX_LS_BUFFER_SIZE, + sizeof(*iod->rspbuf), DMA_TO_DEVICE); if (fc_dma_mapping_error(tgtport->dev, iod->rspdma)) goto out_fail; @@ -369,12 +559,12 @@ nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport) out_fail: kfree(iod->rqstbuf); - list_del(&iod->ls_list); + list_del(&iod->ls_rcv_list); for (iod--, i--; i >= 0; iod--, i--) { fc_dma_unmap_single(tgtport->dev, iod->rspdma, - NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE); + sizeof(*iod->rspbuf), DMA_TO_DEVICE); kfree(iod->rqstbuf); - list_del(&iod->ls_list); + list_del(&iod->ls_rcv_list); } kfree(iod); @@ -390,10 +580,10 @@ nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport) for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { fc_dma_unmap_single(tgtport->dev, - iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE, + iod->rspdma, sizeof(*iod->rspbuf), DMA_TO_DEVICE); kfree(iod->rqstbuf); - list_del(&iod->ls_list); + list_del(&iod->ls_rcv_list); } kfree(tgtport->iod); } @@ -405,10 +595,10 @@ nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport) unsigned long flags; spin_lock_irqsave(&tgtport->lock, flags); - iod = list_first_entry_or_null(&tgtport->ls_list, - struct nvmet_fc_ls_iod, ls_list); + iod = list_first_entry_or_null(&tgtport->ls_rcv_list, + struct nvmet_fc_ls_iod, ls_rcv_list); if (iod) - list_move_tail(&iod->ls_list, &tgtport->ls_busylist); + list_move_tail(&iod->ls_rcv_list, &tgtport->ls_busylist); spin_unlock_irqrestore(&tgtport->lock, flags); return iod; } @@ -421,7 +611,7 @@ nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport, unsigned long flags; spin_lock_irqsave(&tgtport->lock, flags); - list_move(&iod->ls_list, &tgtport->ls_list); + list_move(&iod->ls_rcv_list, &tgtport->ls_rcv_list); spin_unlock_irqrestore(&tgtport->lock, flags); } @@ -594,28 +784,21 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, u16 qid, u16 sqsize) { struct nvmet_fc_tgt_queue *queue; - unsigned long flags; int ret; if (qid > NVMET_NR_QUEUES) return NULL; - queue = kzalloc((sizeof(*queue) + - (sizeof(struct nvmet_fc_fcp_iod) * sqsize)), - GFP_KERNEL); + queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL); if (!queue) return NULL; - if (!nvmet_fc_tgt_a_get(assoc)) - goto out_free_queue; - queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0, assoc->tgtport->fc_target_port.port_num, assoc->a_id, qid); if (!queue->work_q) - goto out_a_put; + goto out_free_queue; - queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1]; queue->qid = qid; queue->sqsize = sqsize; queue->assoc = assoc; @@ -631,22 +814,20 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue); - ret = nvmet_sq_init(&queue->nvme_sq); + nvmet_cq_init(&queue->nvme_cq); + ret = nvmet_sq_init(&queue->nvme_sq, &queue->nvme_cq); if (ret) goto out_fail_iodlist; WARN_ON(assoc->queues[qid]); - spin_lock_irqsave(&assoc->tgtport->lock, flags); assoc->queues[qid] = queue; - spin_unlock_irqrestore(&assoc->tgtport->lock, flags); return queue; out_fail_iodlist: + nvmet_cq_put(&queue->nvme_cq); nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue); destroy_workqueue(queue->work_q); -out_a_put: - nvmet_fc_tgt_a_put(assoc); out_free_queue: kfree(queue); return NULL; @@ -658,16 +839,9 @@ nvmet_fc_tgt_queue_free(struct kref *ref) { struct nvmet_fc_tgt_queue *queue = container_of(ref, struct nvmet_fc_tgt_queue, ref); - unsigned long flags; - - spin_lock_irqsave(&queue->assoc->tgtport->lock, flags); - queue->assoc->queues[queue->qid] = NULL; - spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags); nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue); - nvmet_fc_tgt_a_put(queue->assoc); - destroy_workqueue(queue->work_q); kfree(queue); @@ -693,31 +867,33 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) struct nvmet_fc_fcp_iod *fod = queue->fod; struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr; unsigned long flags; - int i, writedataactive; + int i; bool disconnect; disconnect = atomic_xchg(&queue->connected, 0); + /* if not connected, nothing to do */ + if (!disconnect) + return; + spin_lock_irqsave(&queue->qlock, flags); - /* about outstanding io's */ + /* abort outstanding io's */ for (i = 0; i < queue->sqsize; fod++, i++) { if (fod->active) { spin_lock(&fod->flock); fod->abort = true; - writedataactive = fod->writedataactive; - spin_unlock(&fod->flock); /* * only call lldd abort routine if waiting for * writedata. other outstanding ops should finish * on their own. */ - if (writedataactive) { - spin_lock(&fod->flock); + if (fod->writedataactive) { fod->aborted = true; spin_unlock(&fod->flock); tgtport->ops->fcp_abort( &tgtport->fc_target_port, fod->fcpreq); - } + } else + spin_unlock(&fod->flock); } } @@ -757,8 +933,8 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) flush_workqueue(queue->work_q); - if (disconnect) - nvmet_sq_destroy(&queue->nvme_sq); + nvmet_sq_destroy(&queue->nvme_sq); + nvmet_cq_put(&queue->nvme_cq); nvmet_fc_tgt_q_put(queue); } @@ -771,85 +947,213 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_tgt_queue *queue; u64 association_id = nvmet_fc_getassociationid(connection_id); u16 qid = nvmet_fc_getqueueid(connection_id); - unsigned long flags; if (qid > NVMET_NR_QUEUES) return NULL; - spin_lock_irqsave(&tgtport->lock, flags); - list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { + rcu_read_lock(); + list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { if (association_id == assoc->association_id) { queue = assoc->queues[qid]; if (queue && (!atomic_read(&queue->connected) || !nvmet_fc_tgt_q_get(queue))) queue = NULL; - spin_unlock_irqrestore(&tgtport->lock, flags); + rcu_read_unlock(); return queue; } } + rcu_read_unlock(); + return NULL; +} + +static void +nvmet_fc_hostport_free(struct kref *ref) +{ + struct nvmet_fc_hostport *hostport = + container_of(ref, struct nvmet_fc_hostport, ref); + struct nvmet_fc_tgtport *tgtport = hostport->tgtport; + unsigned long flags; + + spin_lock_irqsave(&tgtport->lock, flags); + list_del(&hostport->host_list); spin_unlock_irqrestore(&tgtport->lock, flags); + if (tgtport->ops->host_release && hostport->invalid) + tgtport->ops->host_release(hostport->hosthandle); + kfree(hostport); + nvmet_fc_tgtport_put(tgtport); +} + +static void +nvmet_fc_hostport_put(struct nvmet_fc_hostport *hostport) +{ + kref_put(&hostport->ref, nvmet_fc_hostport_free); +} + +static int +nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport) +{ + return kref_get_unless_zero(&hostport->ref); +} + +static struct nvmet_fc_hostport * +nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) +{ + struct nvmet_fc_hostport *host; + + lockdep_assert_held(&tgtport->lock); + + list_for_each_entry(host, &tgtport->host_list, host_list) { + if (host->hosthandle == hosthandle && !host->invalid) { + if (nvmet_fc_hostport_get(host)) + return host; + } + } + return NULL; } +static struct nvmet_fc_hostport * +nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) +{ + struct nvmet_fc_hostport *newhost, *match = NULL; + unsigned long flags; + + /* + * Caller holds a reference on tgtport. + */ + + /* if LLDD not implemented, leave as NULL */ + if (!hosthandle) + return NULL; + + spin_lock_irqsave(&tgtport->lock, flags); + match = nvmet_fc_match_hostport(tgtport, hosthandle); + spin_unlock_irqrestore(&tgtport->lock, flags); + + if (match) + return match; + + newhost = kzalloc(sizeof(*newhost), GFP_KERNEL); + if (!newhost) + return ERR_PTR(-ENOMEM); + + spin_lock_irqsave(&tgtport->lock, flags); + match = nvmet_fc_match_hostport(tgtport, hosthandle); + if (match) { + /* new allocation not needed */ + kfree(newhost); + newhost = match; + } else { + nvmet_fc_tgtport_get(tgtport); + newhost->tgtport = tgtport; + newhost->hosthandle = hosthandle; + INIT_LIST_HEAD(&newhost->host_list); + kref_init(&newhost->ref); + + list_add_tail(&newhost->host_list, &tgtport->host_list); + } + spin_unlock_irqrestore(&tgtport->lock, flags); + + return newhost; +} + static void -nvmet_fc_delete_assoc(struct work_struct *work) +nvmet_fc_delete_assoc_work(struct work_struct *work) { struct nvmet_fc_tgt_assoc *assoc = container_of(work, struct nvmet_fc_tgt_assoc, del_work); + struct nvmet_fc_tgtport *tgtport = assoc->tgtport; nvmet_fc_delete_target_assoc(assoc); nvmet_fc_tgt_a_put(assoc); + nvmet_fc_tgtport_put(tgtport); +} + +static void +nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc) +{ + int terminating; + + terminating = atomic_xchg(&assoc->terminating, 1); + + /* if already terminating, do nothing */ + if (terminating) + return; + + nvmet_fc_tgtport_get(assoc->tgtport); + if (!queue_work(nvmet_wq, &assoc->del_work)) + nvmet_fc_tgtport_put(assoc->tgtport); +} + +static bool +nvmet_fc_assoc_exists(struct nvmet_fc_tgtport *tgtport, u64 association_id) +{ + struct nvmet_fc_tgt_assoc *a; + bool found = false; + + rcu_read_lock(); + list_for_each_entry_rcu(a, &tgtport->assoc_list, a_list) { + if (association_id == a->association_id) { + found = true; + break; + } + } + rcu_read_unlock(); + + return found; } static struct nvmet_fc_tgt_assoc * -nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport) +nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle) { - struct nvmet_fc_tgt_assoc *assoc, *tmpassoc; + struct nvmet_fc_tgt_assoc *assoc; unsigned long flags; + bool done; u64 ran; int idx; - bool needrandom = true; + + if (!tgtport->pe) + return NULL; assoc = kzalloc(sizeof(*assoc), GFP_KERNEL); if (!assoc) return NULL; - idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL); + idx = ida_alloc(&tgtport->assoc_cnt, GFP_KERNEL); if (idx < 0) goto out_free_assoc; - if (!nvmet_fc_tgtport_get(tgtport)) - goto out_ida_put; + assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle); + if (IS_ERR(assoc->hostport)) + goto out_ida; assoc->tgtport = tgtport; + nvmet_fc_tgtport_get(tgtport); assoc->a_id = idx; INIT_LIST_HEAD(&assoc->a_list); kref_init(&assoc->ref); - INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc); + INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc_work); + atomic_set(&assoc->terminating, 0); - while (needrandom) { + done = false; + do { get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID); ran = ran << BYTES_FOR_QID_SHIFT; spin_lock_irqsave(&tgtport->lock, flags); - needrandom = false; - list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) - if (ran == tmpassoc->association_id) { - needrandom = true; - break; - } - if (!needrandom) { + if (!nvmet_fc_assoc_exists(tgtport, ran)) { assoc->association_id = ran; - list_add_tail(&assoc->a_list, &tgtport->assoc_list); + list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list); + done = true; } spin_unlock_irqrestore(&tgtport->lock, flags); - } + } while (!done); return assoc; -out_ida_put: - ida_simple_remove(&tgtport->assoc_cnt, idx); +out_ida: + ida_free(&tgtport->assoc_cnt, idx); out_free_assoc: kfree(assoc); return NULL; @@ -861,14 +1165,29 @@ nvmet_fc_target_assoc_free(struct kref *ref) struct nvmet_fc_tgt_assoc *assoc = container_of(ref, struct nvmet_fc_tgt_assoc, ref); struct nvmet_fc_tgtport *tgtport = assoc->tgtport; + struct nvmet_fc_ls_iod *oldls; unsigned long flags; + int i; + for (i = NVMET_NR_QUEUES; i >= 0; i--) { + if (assoc->queues[i]) + nvmet_fc_delete_target_queue(assoc->queues[i]); + } + + /* Send Disconnect now that all i/o has completed */ + nvmet_fc_xmt_disconnect_assoc(assoc); + + nvmet_fc_hostport_put(assoc->hostport); spin_lock_irqsave(&tgtport->lock, flags); - list_del(&assoc->a_list); + oldls = assoc->rcv_disconn; spin_unlock_irqrestore(&tgtport->lock, flags); - ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id); + /* if pending Rcv Disconnect Association LS, send rsp now */ + if (oldls) + nvmet_fc_xmt_ls_rsp(tgtport, oldls); + ida_free(&tgtport->assoc_cnt, assoc->a_id); + pr_info("{%d:%d}: Association freed\n", + tgtport->fc_target_port.port_num, assoc->a_id); kfree(assoc); - nvmet_fc_tgtport_put(tgtport); } static void @@ -887,25 +1206,25 @@ static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc) { struct nvmet_fc_tgtport *tgtport = assoc->tgtport; - struct nvmet_fc_tgt_queue *queue; unsigned long flags; int i; spin_lock_irqsave(&tgtport->lock, flags); + list_del_rcu(&assoc->a_list); + spin_unlock_irqrestore(&tgtport->lock, flags); + + synchronize_rcu(); + + /* ensure all in-flight I/Os have been processed */ for (i = NVMET_NR_QUEUES; i >= 0; i--) { - queue = assoc->queues[i]; - if (queue) { - if (!nvmet_fc_tgt_q_get(queue)) - continue; - spin_unlock_irqrestore(&tgtport->lock, flags); - nvmet_fc_delete_target_queue(queue); - nvmet_fc_tgt_q_put(queue); - spin_lock_irqsave(&tgtport->lock, flags); - } + if (assoc->queues[i]) + flush_workqueue(assoc->queues[i]->work_q); } - spin_unlock_irqrestore(&tgtport->lock, flags); - nvmet_fc_tgt_a_put(assoc); + pr_info("{%d:%d}: Association deleted\n", + tgtport->fc_target_port.port_num, assoc->a_id); + + nvmet_fc_tgtport_put(tgtport); } static struct nvmet_fc_tgt_assoc * @@ -914,17 +1233,17 @@ nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport, { struct nvmet_fc_tgt_assoc *assoc; struct nvmet_fc_tgt_assoc *ret = NULL; - unsigned long flags; - spin_lock_irqsave(&tgtport->lock, flags); - list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { + rcu_read_lock(); + list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { if (association_id == assoc->association_id) { ret = assoc; - nvmet_fc_tgt_a_get(assoc); + if (!nvmet_fc_tgt_a_get(assoc)) + ret = NULL; break; } } - spin_unlock_irqrestore(&tgtport->lock, flags); + rcu_read_unlock(); return ret; } @@ -936,6 +1255,7 @@ nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport, { lockdep_assert_held(&nvmet_fc_tgtlock); + nvmet_fc_tgtport_get(tgtport); pe->tgtport = tgtport; tgtport->pe = pe; @@ -955,8 +1275,10 @@ nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe) unsigned long flags; spin_lock_irqsave(&nvmet_fc_tgtlock, flags); - if (pe->tgtport) + if (pe->tgtport) { + nvmet_fc_tgtport_put(pe->tgtport); pe->tgtport->pe = NULL; + } list_del(&pe->pe_list); spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); } @@ -974,8 +1296,10 @@ nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport) spin_lock_irqsave(&nvmet_fc_tgtlock, flags); pe = tgtport->pe; - if (pe) + if (pe) { + nvmet_fc_tgtport_put(pe->tgtport); pe->tgtport = NULL; + } tgtport->pe = NULL; spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); } @@ -998,6 +1322,9 @@ nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport) list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) { if (tgtport->fc_target_port.node_name == pe->node_name && tgtport->fc_target_port.port_name == pe->port_name) { + if (!nvmet_fc_tgtport_get(tgtport)) + continue; + WARN_ON(pe->tgtport); tgtport->pe = pe; pe->tgtport = tgtport; @@ -1008,9 +1335,9 @@ nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport) } /** - * nvme_fc_register_targetport - transport entry point called by an + * nvmet_fc_register_targetport - transport entry point called by an * LLDD to register the existence of a local - * NVME subystem FC port. + * NVME subsystem FC port. * @pinfo: pointer to information about the port to be registered * @template: LLDD entrypoints and operational parameters for the port * @dev: physical hardware device node port corresponds to. Will be @@ -1050,7 +1377,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo, goto out_regtgt_failed; } - idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL); + idx = ida_alloc(&nvmet_fc_tgtport_cnt, GFP_KERNEL); if (idx < 0) { ret = -ENOSPC; goto out_fail_kfree; @@ -1063,16 +1390,21 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo, newrec->fc_target_port.node_name = pinfo->node_name; newrec->fc_target_port.port_name = pinfo->port_name; - newrec->fc_target_port.private = &newrec[1]; + if (template->target_priv_sz) + newrec->fc_target_port.private = &newrec[1]; + else + newrec->fc_target_port.private = NULL; newrec->fc_target_port.port_id = pinfo->port_id; newrec->fc_target_port.port_num = idx; INIT_LIST_HEAD(&newrec->tgt_list); newrec->dev = dev; newrec->ops = template; spin_lock_init(&newrec->lock); - INIT_LIST_HEAD(&newrec->ls_list); + INIT_LIST_HEAD(&newrec->ls_rcv_list); + INIT_LIST_HEAD(&newrec->ls_req_list); INIT_LIST_HEAD(&newrec->ls_busylist); INIT_LIST_HEAD(&newrec->assoc_list); + INIT_LIST_HEAD(&newrec->host_list); kref_init(&newrec->ref); ida_init(&newrec->assoc_cnt); newrec->max_sg_cnt = template->max_sgl_segments; @@ -1095,7 +1427,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo, out_free_newrec: put_device(dev); out_ida_put: - ida_simple_remove(&nvmet_fc_tgtport_cnt, idx); + ida_free(&nvmet_fc_tgtport_cnt, idx); out_fail_kfree: kfree(newrec); out_regtgt_failed: @@ -1111,18 +1443,13 @@ nvmet_fc_free_tgtport(struct kref *ref) struct nvmet_fc_tgtport *tgtport = container_of(ref, struct nvmet_fc_tgtport, ref); struct device *dev = tgtport->dev; - unsigned long flags; - - spin_lock_irqsave(&nvmet_fc_tgtlock, flags); - list_del(&tgtport->tgt_list); - spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); nvmet_fc_free_ls_iodlist(tgtport); /* let the LLDD know we've finished tearing it down */ tgtport->ops->targetport_delete(&tgtport->fc_target_port); - ida_simple_remove(&nvmet_fc_tgtport_cnt, + ida_free(&nvmet_fc_tgtport_cnt, tgtport->fc_target_port.port_num); ida_destroy(&tgtport->assoc_cnt); @@ -1147,21 +1474,75 @@ nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport) static void __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport) { + struct nvmet_fc_tgt_assoc *assoc; + + rcu_read_lock(); + list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { + if (!nvmet_fc_tgt_a_get(assoc)) + continue; + nvmet_fc_schedule_delete_assoc(assoc); + nvmet_fc_tgt_a_put(assoc); + } + rcu_read_unlock(); +} + +/** + * nvmet_fc_invalidate_host - transport entry point called by an LLDD + * to remove references to a hosthandle for LS's. + * + * The nvmet-fc layer ensures that any references to the hosthandle + * on the targetport are forgotten (set to NULL). The LLDD will + * typically call this when a login with a remote host port has been + * lost, thus LS's for the remote host port are no longer possible. + * + * If an LS request is outstanding to the targetport/hosthandle (or + * issued concurrently with the call to invalidate the host), the + * LLDD is responsible for terminating/aborting the LS and completing + * the LS request. It is recommended that these terminations/aborts + * occur after calling to invalidate the host handle to avoid additional + * retries by the nvmet-fc transport. The nvmet-fc transport may + * continue to reference host handle while it cleans up outstanding + * NVME associations. The nvmet-fc transport will call the + * ops->host_release() callback to notify the LLDD that all references + * are complete and the related host handle can be recovered. + * Note: if there are no references, the callback may be called before + * the invalidate host call returns. + * + * @target_port: pointer to the (registered) target port that a prior + * LS was received on and which supplied the transport the + * hosthandle. + * @hosthandle: the handle (pointer) that represents the host port + * that no longer has connectivity and that LS's should + * no longer be directed to. + */ +void +nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port, + void *hosthandle) +{ + struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); struct nvmet_fc_tgt_assoc *assoc, *next; unsigned long flags; + bool noassoc = true; spin_lock_irqsave(&tgtport->lock, flags); list_for_each_entry_safe(assoc, next, &tgtport->assoc_list, a_list) { + if (assoc->hostport->hosthandle != hosthandle) + continue; if (!nvmet_fc_tgt_a_get(assoc)) continue; - spin_unlock_irqrestore(&tgtport->lock, flags); - nvmet_fc_delete_target_assoc(assoc); + assoc->hostport->invalid = 1; + noassoc = false; + nvmet_fc_schedule_delete_assoc(assoc); nvmet_fc_tgt_a_put(assoc); - spin_lock_irqsave(&tgtport->lock, flags); } spin_unlock_irqrestore(&tgtport->lock, flags); + + /* if there's nothing to wait for - call the callback */ + if (noassoc && tgtport->ops->host_release) + tgtport->ops->host_release(hosthandle); } +EXPORT_SYMBOL_GPL(nvmet_fc_invalidate_host); /* * nvmet layer has called to terminate an association @@ -1183,8 +1564,8 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl) continue; spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); - spin_lock_irqsave(&tgtport->lock, flags); - list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { + rcu_read_lock(); + list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { queue = assoc->queues[0]; if (queue && queue->nvme_sq.ctrl == ctrl) { if (nvmet_fc_tgt_a_get(assoc)) @@ -1192,12 +1573,13 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl) break; } } - spin_unlock_irqrestore(&tgtport->lock, flags); + rcu_read_unlock(); nvmet_fc_tgtport_put(tgtport); if (found_ctrl) { - schedule_work(&assoc->del_work); + nvmet_fc_schedule_delete_assoc(assoc); + nvmet_fc_tgt_a_put(assoc); return; } @@ -1206,8 +1588,41 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl) spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); } +static void +nvmet_fc_free_pending_reqs(struct nvmet_fc_tgtport *tgtport) +{ + struct nvmet_fc_ls_req_op *lsop; + struct nvmefc_ls_req *lsreq; + struct nvmet_fc_ls_iod *iod; + int i; + + iod = tgtport->iod; + for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) + cancel_work(&iod->work); + + /* + * After this point the connection is lost and thus any pending + * request can't be processed by the normal completion path. This + * is likely a request from nvmet_fc_send_ls_req_async. + */ + while ((lsop = list_first_entry_or_null(&tgtport->ls_req_list, + struct nvmet_fc_ls_req_op, lsreq_list))) { + list_del(&lsop->lsreq_list); + + if (!lsop->req_queued) + continue; + + lsreq = &lsop->ls_req; + fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, + (lsreq->rqstlen + lsreq->rsplen), + DMA_BIDIRECTIONAL); + nvmet_fc_tgtport_put(tgtport); + kfree(lsop); + } +} + /** - * nvme_fc_unregister_targetport - transport entry point called by an + * nvmet_fc_unregister_targetport - transport entry point called by an * LLDD to deregister/remove a previously * registered a local NVME subsystem FC port. * @target_port: pointer to the (registered) target port that is to be @@ -1221,12 +1636,20 @@ int nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port) { struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); + unsigned long flags; + + spin_lock_irqsave(&nvmet_fc_tgtlock, flags); + list_del(&tgtport->tgt_list); + spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); nvmet_fc_portentry_unbind_tgt(tgtport); /* terminate any outstanding associations */ __nvmet_fc_free_assocs(tgtport); + flush_workqueue(nvmet_wq); + + nvmet_fc_free_pending_reqs(tgtport); nvmet_fc_tgtport_put(tgtport); return 0; @@ -1234,113 +1657,15 @@ nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port) EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport); -/* *********************** FC-NVME LS Handling **************************** */ - - -static void -nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, __be32 desc_len, u8 rqst_ls_cmd) -{ - struct fcnvme_ls_acc_hdr *acc = buf; - - acc->w0.ls_cmd = ls_cmd; - acc->desc_list_len = desc_len; - acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST); - acc->rqst.desc_len = - fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)); - acc->rqst.w0.ls_cmd = rqst_ls_cmd; -} - -static int -nvmet_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd, - u8 reason, u8 explanation, u8 vendor) -{ - struct fcnvme_ls_rjt *rjt = buf; - - nvmet_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST, - fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)), - ls_cmd); - rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT); - rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt)); - rjt->rjt.reason_code = reason; - rjt->rjt.reason_explanation = explanation; - rjt->rjt.vendor = vendor; - - return sizeof(struct fcnvme_ls_rjt); -} - -/* Validation Error indexes into the string table below */ -enum { - VERR_NO_ERROR = 0, - VERR_CR_ASSOC_LEN = 1, - VERR_CR_ASSOC_RQST_LEN = 2, - VERR_CR_ASSOC_CMD = 3, - VERR_CR_ASSOC_CMD_LEN = 4, - VERR_ERSP_RATIO = 5, - VERR_ASSOC_ALLOC_FAIL = 6, - VERR_QUEUE_ALLOC_FAIL = 7, - VERR_CR_CONN_LEN = 8, - VERR_CR_CONN_RQST_LEN = 9, - VERR_ASSOC_ID = 10, - VERR_ASSOC_ID_LEN = 11, - VERR_NO_ASSOC = 12, - VERR_CONN_ID = 13, - VERR_CONN_ID_LEN = 14, - VERR_NO_CONN = 15, - VERR_CR_CONN_CMD = 16, - VERR_CR_CONN_CMD_LEN = 17, - VERR_DISCONN_LEN = 18, - VERR_DISCONN_RQST_LEN = 19, - VERR_DISCONN_CMD = 20, - VERR_DISCONN_CMD_LEN = 21, - VERR_DISCONN_SCOPE = 22, - VERR_RS_LEN = 23, - VERR_RS_RQST_LEN = 24, - VERR_RS_CMD = 25, - VERR_RS_CMD_LEN = 26, - VERR_RS_RCTL = 27, - VERR_RS_RO = 28, -}; +/* ********************** FC-NVME LS RCV Handling ************************* */ -static char *validation_errors[] = { - "OK", - "Bad CR_ASSOC Length", - "Bad CR_ASSOC Rqst Length", - "Not CR_ASSOC Cmd", - "Bad CR_ASSOC Cmd Length", - "Bad Ersp Ratio", - "Association Allocation Failed", - "Queue Allocation Failed", - "Bad CR_CONN Length", - "Bad CR_CONN Rqst Length", - "Not Association ID", - "Bad Association ID Length", - "No Association", - "Not Connection ID", - "Bad Connection ID Length", - "No Connection", - "Not CR_CONN Cmd", - "Bad CR_CONN Cmd Length", - "Bad DISCONN Length", - "Bad DISCONN Rqst Length", - "Not DISCONN Cmd", - "Bad DISCONN Cmd Length", - "Bad Disconnect Scope", - "Bad RS Length", - "Bad RS Rqst Length", - "Not RS Cmd", - "Bad RS Cmd Length", - "Bad RS R_CTL", - "Bad RS Relative Offset", -}; static void nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_ls_iod *iod) { - struct fcnvme_ls_cr_assoc_rqst *rqst = - (struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf; - struct fcnvme_ls_cr_assoc_acc *acc = - (struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf; + struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc; + struct fcnvme_ls_cr_assoc_acc *acc = &iod->rspbuf->rsp_cr_assoc; struct nvmet_fc_tgt_queue *queue; int ret = 0; @@ -1372,23 +1697,26 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport, else { /* new association w/ admin queue */ - iod->assoc = nvmet_fc_alloc_target_assoc(tgtport); + iod->assoc = nvmet_fc_alloc_target_assoc( + tgtport, iod->hosthandle); if (!iod->assoc) ret = VERR_ASSOC_ALLOC_FAIL; else { queue = nvmet_fc_alloc_target_queue(iod->assoc, 0, be16_to_cpu(rqst->assoc_cmd.sqsize)); - if (!queue) + if (!queue) { ret = VERR_QUEUE_ALLOC_FAIL; + nvmet_fc_tgt_a_put(iod->assoc); + } } } if (ret) { - dev_err(tgtport->dev, - "Create Association LS failed: %s\n", - validation_errors[ret]); - iod->lsreq->rsplen = nvmet_fc_format_rjt(acc, - NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd, + pr_err("{%d}: Create Association LS failed: %s\n", + tgtport->fc_target_port.port_num, + validation_errors[ret]); + iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, + sizeof(*acc), rqst->w0.ls_cmd, FCNVME_RJT_RC_LOGIC, FCNVME_RJT_EXP_NONE, 0); return; @@ -1398,11 +1726,14 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport, atomic_set(&queue->connected, 1); queue->sqhd = 0; /* best place to init value */ + pr_info("{%d:%d}: Association created\n", + tgtport->fc_target_port.port_num, iod->assoc->a_id); + /* format a response */ - iod->lsreq->rsplen = sizeof(*acc); + iod->lsrsp->rsplen = sizeof(*acc); - nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, + nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, fcnvme_lsdesc_len( sizeof(struct fcnvme_ls_cr_assoc_acc)), FCNVME_LS_CREATE_ASSOCIATION); @@ -1423,10 +1754,8 @@ static void nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_ls_iod *iod) { - struct fcnvme_ls_cr_conn_rqst *rqst = - (struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf; - struct fcnvme_ls_cr_conn_acc *acc = - (struct fcnvme_ls_cr_conn_acc *)iod->rspbuf; + struct fcnvme_ls_cr_conn_rqst *rqst = &iod->rqstbuf->rq_cr_conn; + struct fcnvme_ls_cr_conn_acc *acc = &iod->rspbuf->rsp_cr_conn; struct nvmet_fc_tgt_queue *queue; int ret = 0; @@ -1475,11 +1804,11 @@ nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport, } if (ret) { - dev_err(tgtport->dev, - "Create Connection LS failed: %s\n", - validation_errors[ret]); - iod->lsreq->rsplen = nvmet_fc_format_rjt(acc, - NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd, + pr_err("{%d}: Create Connection LS failed: %s\n", + tgtport->fc_target_port.port_num, + validation_errors[ret]); + iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, + sizeof(*acc), rqst->w0.ls_cmd, (ret == VERR_NO_ASSOC) ? FCNVME_RJT_RC_INV_ASSOC : FCNVME_RJT_RC_LOGIC, @@ -1493,9 +1822,9 @@ nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport, /* format a response */ - iod->lsreq->rsplen = sizeof(*acc); + iod->lsrsp->rsplen = sizeof(*acc); - nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, + nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)), FCNVME_LS_CREATE_CONNECTION); acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); @@ -1507,107 +1836,89 @@ nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport, be16_to_cpu(rqst->connect_cmd.qid))); } -static void +/* + * Returns true if the LS response is to be transmit + * Returns false if the LS response is to be delayed + */ +static int nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_ls_iod *iod) { - struct fcnvme_ls_disconnect_rqst *rqst = - (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf; - struct fcnvme_ls_disconnect_acc *acc = - (struct fcnvme_ls_disconnect_acc *)iod->rspbuf; - struct nvmet_fc_tgt_queue *queue = NULL; - struct nvmet_fc_tgt_assoc *assoc; + struct fcnvme_ls_disconnect_assoc_rqst *rqst = + &iod->rqstbuf->rq_dis_assoc; + struct fcnvme_ls_disconnect_assoc_acc *acc = + &iod->rspbuf->rsp_dis_assoc; + struct nvmet_fc_tgt_assoc *assoc = NULL; + struct nvmet_fc_ls_iod *oldls = NULL; + unsigned long flags; int ret = 0; - bool del_assoc = false; memset(acc, 0, sizeof(*acc)); - if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst)) - ret = VERR_DISCONN_LEN; - else if (rqst->desc_list_len != - fcnvme_lsdesc_len( - sizeof(struct fcnvme_ls_disconnect_rqst))) - ret = VERR_DISCONN_RQST_LEN; - else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) - ret = VERR_ASSOC_ID; - else if (rqst->associd.desc_len != - fcnvme_lsdesc_len( - sizeof(struct fcnvme_lsdesc_assoc_id))) - ret = VERR_ASSOC_ID_LEN; - else if (rqst->discon_cmd.desc_tag != - cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD)) - ret = VERR_DISCONN_CMD; - else if (rqst->discon_cmd.desc_len != - fcnvme_lsdesc_len( - sizeof(struct fcnvme_lsdesc_disconn_cmd))) - ret = VERR_DISCONN_CMD_LEN; - else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) && - (rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION)) - ret = VERR_DISCONN_SCOPE; - else { - /* match an active association */ + ret = nvmefc_vldt_lsreq_discon_assoc(iod->rqstdatalen, rqst); + if (!ret) { + /* match an active association - takes an assoc ref if !NULL */ assoc = nvmet_fc_find_target_assoc(tgtport, be64_to_cpu(rqst->associd.association_id)); iod->assoc = assoc; - if (assoc) { - if (rqst->discon_cmd.scope == - FCNVME_DISCONN_CONNECTION) { - queue = nvmet_fc_find_target_queue(tgtport, - be64_to_cpu( - rqst->discon_cmd.id)); - if (!queue) { - nvmet_fc_tgt_a_put(assoc); - ret = VERR_NO_CONN; - } - } - } else + if (!assoc) ret = VERR_NO_ASSOC; } - if (ret) { - dev_err(tgtport->dev, - "Disconnect LS failed: %s\n", - validation_errors[ret]); - iod->lsreq->rsplen = nvmet_fc_format_rjt(acc, - NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd, + if (ret || !assoc) { + pr_err("{%d}: Disconnect LS failed: %s\n", + tgtport->fc_target_port.port_num, + validation_errors[ret]); + iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, + sizeof(*acc), rqst->w0.ls_cmd, (ret == VERR_NO_ASSOC) ? FCNVME_RJT_RC_INV_ASSOC : - (ret == VERR_NO_CONN) ? - FCNVME_RJT_RC_INV_CONN : - FCNVME_RJT_RC_LOGIC, + FCNVME_RJT_RC_LOGIC, FCNVME_RJT_EXP_NONE, 0); - return; + return true; } /* format a response */ - iod->lsreq->rsplen = sizeof(*acc); + iod->lsrsp->rsplen = sizeof(*acc); - nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, + nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, fcnvme_lsdesc_len( - sizeof(struct fcnvme_ls_disconnect_acc)), - FCNVME_LS_DISCONNECT); - - - /* are we to delete a Connection ID (queue) */ - if (queue) { - int qid = queue->qid; + sizeof(struct fcnvme_ls_disconnect_assoc_acc)), + FCNVME_LS_DISCONNECT_ASSOC); - nvmet_fc_delete_target_queue(queue); - - /* release the get taken by find_target_queue */ - nvmet_fc_tgt_q_put(queue); + /* + * The rules for LS response says the response cannot + * go back until ABTS's have been sent for all outstanding + * I/O and a Disconnect Association LS has been sent. + * So... save off the Disconnect LS to send the response + * later. If there was a prior LS already saved, replace + * it with the newer one and send a can't perform reject + * on the older one. + */ + spin_lock_irqsave(&tgtport->lock, flags); + oldls = assoc->rcv_disconn; + assoc->rcv_disconn = iod; + spin_unlock_irqrestore(&tgtport->lock, flags); - /* tear association down if io queue terminated */ - if (!qid) - del_assoc = true; + if (oldls) { + pr_info("{%d:%d}: Multiple Disconnect Association LS's " + "received\n", + tgtport->fc_target_port.port_num, assoc->a_id); + /* overwrite good response with bogus failure */ + oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf, + sizeof(*iod->rspbuf), + /* ok to use rqst, LS is same */ + rqst->w0.ls_cmd, + FCNVME_RJT_RC_UNAB, + FCNVME_RJT_EXP_NONE, 0); + nvmet_fc_xmt_ls_rsp(tgtport, oldls); } - /* release get taken in nvmet_fc_find_target_assoc */ - nvmet_fc_tgt_a_put(iod->assoc); + nvmet_fc_schedule_delete_assoc(assoc); + nvmet_fc_tgt_a_put(assoc); - if (del_assoc) - nvmet_fc_delete_target_assoc(iod->assoc); + return false; } @@ -1619,13 +1930,13 @@ static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req); static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops; static void -nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq) +nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp) { - struct nvmet_fc_ls_iod *iod = lsreq->nvmet_fc_private; + struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private; struct nvmet_fc_tgtport *tgtport = iod->tgtport; fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma, - NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE); + sizeof(*iod->rspbuf), DMA_TO_DEVICE); nvmet_fc_free_ls_iod(tgtport, iod); nvmet_fc_tgtport_put(tgtport); } @@ -1637,11 +1948,11 @@ nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport, int ret; fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma, - NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE); + sizeof(*iod->rspbuf), DMA_TO_DEVICE); - ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsreq); + ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp); if (ret) - nvmet_fc_xmt_ls_rsp_done(iod->lsreq); + nvmet_fc_xmt_ls_rsp_done(iod->lsrsp); } /* @@ -1651,15 +1962,15 @@ static void nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_ls_iod *iod) { - struct fcnvme_ls_rqst_w0 *w0 = - (struct fcnvme_ls_rqst_w0 *)iod->rqstbuf; + struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0; + bool sendrsp = true; - iod->lsreq->nvmet_fc_private = iod; - iod->lsreq->rspbuf = iod->rspbuf; - iod->lsreq->rspdma = iod->rspdma; - iod->lsreq->done = nvmet_fc_xmt_ls_rsp_done; + iod->lsrsp->nvme_fc_private = iod; + iod->lsrsp->rspbuf = iod->rspbuf; + iod->lsrsp->rspdma = iod->rspdma; + iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done; /* Be preventative. handlers will later set to valid length */ - iod->lsreq->rsplen = 0; + iod->lsrsp->rsplen = 0; iod->assoc = NULL; @@ -1677,17 +1988,18 @@ nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport, /* Creates an IO Queue/Connection */ nvmet_fc_ls_create_connection(tgtport, iod); break; - case FCNVME_LS_DISCONNECT: + case FCNVME_LS_DISCONNECT_ASSOC: /* Terminate a Queue/Connection or the Association */ - nvmet_fc_ls_disconnect(tgtport, iod); + sendrsp = nvmet_fc_ls_disconnect(tgtport, iod); break; default: - iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf, - NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd, + iod->lsrsp->rsplen = nvme_fc_format_rjt(iod->rspbuf, + sizeof(*iod->rspbuf), w0->ls_cmd, FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0); } - nvmet_fc_xmt_ls_rsp(tgtport, iod); + if (sendrsp) + nvmet_fc_xmt_ls_rsp(tgtport, iod); } /* @@ -1716,37 +2028,56 @@ nvmet_fc_handle_ls_rqst_work(struct work_struct *work) * * @target_port: pointer to the (registered) target port the LS was * received on. - * @lsreq: pointer to a lsreq request structure to be used to reference + * @hosthandle: pointer to the host specific data, gets stored in iod. + * @lsrsp: pointer to a lsrsp structure to be used to reference * the exchange corresponding to the LS. * @lsreqbuf: pointer to the buffer containing the LS Request * @lsreqbuf_len: length, in bytes, of the received LS request */ int nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port, - struct nvmefc_tgt_ls_req *lsreq, + void *hosthandle, + struct nvmefc_ls_rsp *lsrsp, void *lsreqbuf, u32 lsreqbuf_len) { struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); struct nvmet_fc_ls_iod *iod; - - if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE) + struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf; + + if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) { + pr_info("{%d}: RCV %s LS failed: payload too large (%d)\n", + tgtport->fc_target_port.port_num, + (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? + nvmefc_ls_names[w0->ls_cmd] : "", + lsreqbuf_len); return -E2BIG; + } - if (!nvmet_fc_tgtport_get(tgtport)) + if (!nvmet_fc_tgtport_get(tgtport)) { + pr_info("{%d}: RCV %s LS failed: target deleting\n", + tgtport->fc_target_port.port_num, + (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? + nvmefc_ls_names[w0->ls_cmd] : ""); return -ESHUTDOWN; + } iod = nvmet_fc_alloc_ls_iod(tgtport); if (!iod) { + pr_info("{%d}: RCV %s LS failed: context allocation failed\n", + tgtport->fc_target_port.port_num, + (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? + nvmefc_ls_names[w0->ls_cmd] : ""); nvmet_fc_tgtport_put(tgtport); return -ENOENT; } - iod->lsreq = lsreq; + iod->lsrsp = lsrsp; iod->fcpreq = NULL; memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len); iod->rqstdatalen = lsreqbuf_len; + iod->hosthandle = hosthandle; - schedule_work(&iod->work); + queue_work(nvmet_wq, &iod->work); return 0; } @@ -1851,7 +2182,7 @@ nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport, */ rspcnt = atomic_inc_return(&fod->queue->zrspcnt); if (!(rspcnt % fod->queue->ersp_ratio) || - sqe->opcode == nvme_fabrics_command || + nvme_is_fabrics((struct nvme_command *) sqe) || xfr_length != fod->req.transfer_len || (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] || (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) || @@ -1977,7 +2308,7 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport, ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); if (ret) { /* - * should be ok to set w/o lock as its in the thread of + * should be ok to set w/o lock as it's in the thread of * execution (not an async timer routine) and doesn't * contend with any clearing action */ @@ -2039,9 +2370,9 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod) return; if (fcpreq->fcp_error || fcpreq->transferred_length != fcpreq->transfer_length) { - spin_lock(&fod->flock); + spin_lock_irqsave(&fod->flock, flags); fod->abort = true; - spin_unlock(&fod->flock); + spin_unlock_irqrestore(&fod->flock, flags); nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); return; @@ -2060,7 +2391,7 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod) } /* data transfer complete, resume with nvmet layer */ - nvmet_req_execute(&fod->req); + fod->req.execute(&fod->req); break; case NVMET_FCOP_READDATA: @@ -2197,13 +2528,6 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, int ret; /* - * if there is no nvmet mapping to the targetport there - * shouldn't be requests. just terminate them. - */ - if (!tgtport->pe) - goto transport_error; - - /* * Fused commands are currently not supported in the linux * implementation. * @@ -2229,7 +2553,9 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, } fod->req.cmd = &fod->cmdiubuf.sqe; - fod->req.rsp = &fod->rspiubuf.cqe; + fod->req.cqe = &fod->rspiubuf.cqe; + if (!tgtport->pe) + goto transport_error; fod->req.port = tgtport->pe->port; /* clear any response payload */ @@ -2238,10 +2564,8 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, fod->data_sg = NULL; fod->data_sg_cnt = 0; - ret = nvmet_req_init(&fod->req, - &fod->queue->nvme_cq, - &fod->queue->nvme_sq, - &nvmet_fc_tgt_fcp_ops); + ret = nvmet_req_init(&fod->req, &fod->queue->nvme_sq, + &nvmet_fc_tgt_fcp_ops); if (!ret) { /* bad SQE content or invalid ctrl state */ /* nvmet layer has already called op done to send rsp. */ @@ -2276,7 +2600,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, * can invoke the nvmet_layer now. If read data, cmd completion will * push the data */ - nvmet_req_execute(&fod->req); + fod->req.execute(&fod->req); return; transport_error: @@ -2300,7 +2624,7 @@ transport_error: * and the api of the FC LLDD which may issue a hw command to send the * response, but the LLDD may not get the hw completion for that command * and upcall the nvmet_fc layer before a new command may be - * asynchronously received - its possible for a command to be received + * asynchronously received - it's possible for a command to be received * before the LLDD and nvmet_fc have recycled the job structure. It gives * the appearance of more commands received than fits in the sq. * To alleviate this scenario, a temporary queue is maintained in the @@ -2344,7 +2668,7 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port, /* validate iu, so the connection id can be used to find the queue */ if ((cmdiubuf_len != sizeof(*cmdiu)) || - (cmdiu->scsi_id != NVME_CMD_SCSI_ID) || + (cmdiu->format_id != NVME_CMD_FORMAT_ID) || (cmdiu->fc_id != NVME_CMD_FC_ID) || (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4))) return -EIO; @@ -2567,12 +2891,17 @@ nvmet_fc_add_port(struct nvmet_port *port) list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) { if ((tgtport->fc_target_port.node_name == traddr.nn) && (tgtport->fc_target_port.port_name == traddr.pn)) { + if (!nvmet_fc_tgtport_get(tgtport)) + continue; + /* a FC port can only be 1 nvmet port id */ if (!tgtport->pe) { nvmet_fc_portentry_bind(tgtport, pe, port); ret = 0; } else ret = -EALREADY; + + nvmet_fc_tgtport_put(tgtport); break; } } @@ -2588,12 +2917,78 @@ static void nvmet_fc_remove_port(struct nvmet_port *port) { struct nvmet_fc_port_entry *pe = port->priv; + struct nvmet_fc_tgtport *tgtport = NULL; + unsigned long flags; + + spin_lock_irqsave(&nvmet_fc_tgtlock, flags); + if (pe->tgtport && nvmet_fc_tgtport_get(pe->tgtport)) + tgtport = pe->tgtport; + spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); nvmet_fc_portentry_unbind(pe); + if (tgtport) { + /* terminate any outstanding associations */ + __nvmet_fc_free_assocs(tgtport); + nvmet_fc_tgtport_put(tgtport); + } + kfree(pe); } +static void +nvmet_fc_discovery_chg(struct nvmet_port *port) +{ + struct nvmet_fc_port_entry *pe = port->priv; + struct nvmet_fc_tgtport *tgtport = NULL; + unsigned long flags; + + spin_lock_irqsave(&nvmet_fc_tgtlock, flags); + if (pe->tgtport && nvmet_fc_tgtport_get(pe->tgtport)) + tgtport = pe->tgtport; + spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); + + if (!tgtport) + return; + + if (tgtport && tgtport->ops->discovery_event) + tgtport->ops->discovery_event(&tgtport->fc_target_port); + + nvmet_fc_tgtport_put(tgtport); +} + +static ssize_t +nvmet_fc_host_traddr(struct nvmet_ctrl *ctrl, + char *traddr, size_t traddr_size) +{ + struct nvmet_sq *sq = ctrl->sqs[0]; + struct nvmet_fc_tgt_queue *queue = + container_of(sq, struct nvmet_fc_tgt_queue, nvme_sq); + struct nvmet_fc_tgtport *tgtport = queue->assoc ? queue->assoc->tgtport : NULL; + struct nvmet_fc_hostport *hostport = queue->assoc ? queue->assoc->hostport : NULL; + u64 wwnn, wwpn; + ssize_t ret = 0; + + if (!tgtport || !nvmet_fc_tgtport_get(tgtport)) + return -ENODEV; + if (!hostport || !nvmet_fc_hostport_get(hostport)) { + ret = -ENODEV; + goto out_put; + } + + if (tgtport->ops->host_traddr) { + ret = tgtport->ops->host_traddr(hostport->hosthandle, &wwnn, &wwpn); + if (ret) + goto out_put_host; + ret = snprintf(traddr, traddr_size, "nn-0x%llx:pn-0x%llx", wwnn, wwpn); + } +out_put_host: + nvmet_fc_hostport_put(hostport); +out_put: + nvmet_fc_tgtport_put(tgtport); + return ret; +} + static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { .owner = THIS_MODULE, .type = NVMF_TRTYPE_FC, @@ -2602,6 +2997,8 @@ static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { .remove_port = nvmet_fc_remove_port, .queue_response = nvmet_fc_fcp_nvme_cmd_done, .delete_ctrl = nvmet_fc_delete_ctrl, + .discovery_chg = nvmet_fc_discovery_chg, + .host_traddr = nvmet_fc_host_traddr, }; static int __init nvmet_fc_init_module(void) @@ -2611,6 +3008,9 @@ static int __init nvmet_fc_init_module(void) static void __exit nvmet_fc_exit_module(void) { + /* ensure any shutdown operation, e.g. delete ctrls have finished */ + flush_workqueue(nvmet_wq); + /* sanity check - all lports should be removed */ if (!list_empty(&nvmet_fc_target_list)) pr_warn("%s: targetport list not empty\n", __func__); @@ -2623,4 +3023,5 @@ static void __exit nvmet_fc_exit_module(void) module_init(nvmet_fc_init_module); module_exit(nvmet_fc_exit_module); +MODULE_DESCRIPTION("NVMe target FC transport driver"); MODULE_LICENSE("GPL v2"); |
