diff options
Diffstat (limited to 'fs/cifs/smbdirect.c')
-rw-r--r-- | fs/cifs/smbdirect.c | 65 |
1 files changed, 34 insertions, 31 deletions
diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c index 91710eb571fb..c62f7c95683c 100644 --- a/fs/cifs/smbdirect.c +++ b/fs/cifs/smbdirect.c @@ -862,6 +862,8 @@ static int smbd_post_send_negotiate_req(struct smbd_connection *info) ib_dma_unmap_single(info->id->device, request->sge[0].addr, request->sge[0].length, DMA_TO_DEVICE); + smbd_disconnect_rdma_connection(info); + dma_mapping_failed: mempool_free(request, info->request_mempool); return rc; @@ -1025,8 +1027,8 @@ static int smbd_post_send(struct smbd_connection *info, for (i = 0; i < request->num_sge; i++) { log_rdma_send(INFO, - "rdma_request sge[%d] addr=%llu legnth=%u\n", - i, request->sge[0].addr, request->sge[0].length); + "rdma_request sge[%d] addr=%llu length=%u\n", + i, request->sge[i].addr, request->sge[i].length); ib_dma_sync_single_for_device( info->id->device, request->sge[i].addr, @@ -1061,6 +1063,7 @@ static int smbd_post_send(struct smbd_connection *info, if (atomic_dec_and_test(&info->send_pending)) wake_up(&info->wait_send_pending); } + smbd_disconnect_rdma_connection(info); } else /* Reset timer for idle connection after packet is sent */ mod_delayed_work(info->workqueue, &info->idle_timer_work, @@ -1202,7 +1205,7 @@ static int smbd_post_recv( if (rc) { ib_dma_unmap_single(info->id->device, response->sge.addr, response->sge.length, DMA_FROM_DEVICE); - + smbd_disconnect_rdma_connection(info); log_rdma_recv(ERR, "ib_post_recv failed rc=%d\n", rc); } @@ -1498,8 +1501,8 @@ int smbd_reconnect(struct TCP_Server_Info *server) log_rdma_event(INFO, "reconnecting rdma session\n"); if (!server->smbd_conn) { - log_rdma_event(ERR, "rdma session already destroyed\n"); - return -EINVAL; + log_rdma_event(INFO, "rdma session already destroyed\n"); + goto create_conn; } /* @@ -1512,15 +1515,19 @@ int smbd_reconnect(struct TCP_Server_Info *server) } /* wait until the transport is destroyed */ - wait_event(server->smbd_conn->wait_destroy, - server->smbd_conn->transport_status == SMBD_DESTROYED); + if (!wait_event_timeout(server->smbd_conn->wait_destroy, + server->smbd_conn->transport_status == SMBD_DESTROYED, 5*HZ)) + return -EAGAIN; destroy_workqueue(server->smbd_conn->workqueue); kfree(server->smbd_conn); +create_conn: log_rdma_event(INFO, "creating rdma session\n"); server->smbd_conn = smbd_get_connection( server, (struct sockaddr *) &server->dstaddr); + log_rdma_event(INFO, "created rdma session info=%p\n", + server->smbd_conn); return server->smbd_conn ? 0 : -ENOENT; } @@ -2079,7 +2086,7 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst) int start, i, j; int max_iov_size = info->max_send_size - sizeof(struct smbd_data_transfer); - struct kvec iov[SMBDIRECT_MAX_SGE]; + struct kvec *iov; int rc; info->smbd_send_pending++; @@ -2089,32 +2096,20 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst) } /* - * This usually means a configuration error - * We use RDMA read/write for packet size > rdma_readwrite_threshold - * as long as it's properly configured we should never get into this - * situation - */ - if (rqst->rq_nvec + rqst->rq_npages > SMBDIRECT_MAX_SGE) { - log_write(ERR, "maximum send segment %x exceeding %x\n", - rqst->rq_nvec + rqst->rq_npages, SMBDIRECT_MAX_SGE); - rc = -EINVAL; - goto done; - } - - /* - * Remove the RFC1002 length defined in MS-SMB2 section 2.1 - * It is used only for TCP transport + * Skip the RFC1002 length defined in MS-SMB2 section 2.1 + * It is used only for TCP transport in the iov[0] * In future we may want to add a transport layer under protocol * layer so this will only be issued to TCP transport */ - iov[0].iov_base = (char *)rqst->rq_iov[0].iov_base + 4; - iov[0].iov_len = rqst->rq_iov[0].iov_len - 4; - buflen += iov[0].iov_len; + + if (rqst->rq_iov[0].iov_len != 4) { + log_write(ERR, "expected the pdu length in 1st iov, but got %zu\n", rqst->rq_iov[0].iov_len); + return -EINVAL; + } + iov = &rqst->rq_iov[1]; /* total up iov array first */ - for (i = 1; i < rqst->rq_nvec; i++) { - iov[i].iov_base = rqst->rq_iov[i].iov_base; - iov[i].iov_len = rqst->rq_iov[i].iov_len; + for (i = 0; i < rqst->rq_nvec-1; i++) { buflen += iov[i].iov_len; } @@ -2132,6 +2127,10 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst) goto done; } + cifs_dbg(FYI, "Sending smb (RDMA): smb_len=%u\n", buflen); + for (i = 0; i < rqst->rq_nvec-1; i++) + dump_smb(iov[i].iov_base, iov[i].iov_len); + remaining_data_length = buflen; log_write(INFO, "rqst->rq_nvec=%d rqst->rq_npages=%d rq_pagesz=%d " @@ -2187,12 +2186,14 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst) goto done; } i++; + if (i == rqst->rq_nvec-1) + break; } start = i; buflen = 0; } else { i++; - if (i == rqst->rq_nvec) { + if (i == rqst->rq_nvec-1) { /* send out all remaining vecs */ remaining_data_length -= buflen; log_write(INFO, @@ -2295,7 +2296,7 @@ static void smbd_mr_recovery_work(struct work_struct *work) rc = ib_dereg_mr(smbdirect_mr->mr); if (rc) { log_rdma_mr(ERR, - "ib_dereg_mr faield rc=%x\n", + "ib_dereg_mr failed rc=%x\n", rc); smbd_disconnect_rdma_connection(info); } @@ -2542,6 +2543,8 @@ dma_map_error: if (atomic_dec_and_test(&info->mr_used_count)) wake_up(&info->wait_for_mr_cleanup); + smbd_disconnect_rdma_connection(info); + return NULL; } |