summaryrefslogtreecommitdiff
path: root/drivers/nvme/target/rdma.c
diff options
context:
space:
mode:
authorLogan Gunthorpe <logang@deltatee.com>2018-10-04 15:27:46 -0600
committerBjorn Helgaas <bhelgaas@google.com>2018-10-17 12:18:23 -0500
commit5b2322e48c978fd91d50873491b1c3b0a3b0266b (patch)
treefcb31f70487da3f25d6584aba8f4b1f3c2b9893a /drivers/nvme/target/rdma.c
parente0596ab2900dfa64c0538e4aef8eec3c6f0f38eb (diff)
nvmet: Introduce helper functions to allocate and free request SGLs
Add helpers to allocate and free the SGL in a struct nvmet_req: int nvmet_req_alloc_sgl(struct nvmet_req *req) void nvmet_req_free_sgl(struct nvmet_req *req) This will be expanded in a future patch to implement peer-to-peer memory DMAs and should be common with all target drivers. The new helpers are used in nvmet-rdma. Seeing we use req.transfer_len as the length of the SGL it is set earlier and cleared on any error. It also seems to be unnecessary to accumulate the length as the map_sgl functions should only ever be called once per request. Signed-off-by: Logan Gunthorpe <logang@deltatee.com> Signed-off-by: Bjorn Helgaas <bhelgaas@google.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Acked-by: Sagi Grimberg <sagi@grimberg.me>
Diffstat (limited to 'drivers/nvme/target/rdma.c')
-rw-r--r--drivers/nvme/target/rdma.c20
1 files changed, 12 insertions, 8 deletions
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index bfc4da660bb4..9e091e78a2f0 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -503,7 +503,7 @@ static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
}
if (rsp->req.sg != rsp->cmd->inline_sg)
- sgl_free(rsp->req.sg);
+ nvmet_req_free_sgl(&rsp->req);
if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
nvmet_rdma_process_wr_wait_list(queue);
@@ -652,24 +652,24 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
{
struct rdma_cm_id *cm_id = rsp->queue->cm_id;
u64 addr = le64_to_cpu(sgl->addr);
- u32 len = get_unaligned_le24(sgl->length);
u32 key = get_unaligned_le32(sgl->key);
int ret;
+ rsp->req.transfer_len = get_unaligned_le24(sgl->length);
+
/* no data command? */
- if (!len)
+ if (!rsp->req.transfer_len)
return 0;
- rsp->req.sg = sgl_alloc(len, GFP_KERNEL, &rsp->req.sg_cnt);
- if (!rsp->req.sg)
- return NVME_SC_INTERNAL;
+ ret = nvmet_req_alloc_sgl(&rsp->req);
+ if (ret < 0)
+ goto error_out;
ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
nvmet_data_dir(&rsp->req));
if (ret < 0)
- return NVME_SC_INTERNAL;
- rsp->req.transfer_len += len;
+ goto error_out;
rsp->n_rdma += ret;
if (invalidate) {
@@ -678,6 +678,10 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
}
return 0;
+
+error_out:
+ rsp->req.transfer_len = 0;
+ return NVME_SC_INTERNAL;
}
static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)