summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-08-17 12:44:48 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-17 12:44:48 -0700
commit9bd553929f68921be0f2014dd06561e0c8249a0d (patch)
tree720e556374e3500af9a0210178fabfc6bd0f754c /fs
parent022ff62c3d8c3758d15ccc6b58615fd8f257ba85 (diff)
parent0a3173a5f09bc58a3638ecfd0a80bdbae55e123c (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe: "This has been a large cycle for RDMA, with several major patch series reworking parts of the core code. - Rework the so-called 'gid cache' and internal APIs to use a kref'd pointer to a struct instead of copying, push this upwards into the callers and add more stuff to the struct. The new design avoids some ugly races the old one suffered with. This is part of the namespace enablement work as the new struct is learning to be namespace aware. - Various uapi cleanups, moving more stuff to include/uapi and fixing some long standing bugs that have recently been discovered. - Driver updates for mlx5, mlx4 i40iw, rxe, cxgb4, hfi1, usnic, pvrdma, and hns - Provide max_send_sge and max_recv_sge attributes to better support HW where these values are asymmetric. - mlx5 user API 'devx' allows sending commands directly to the device FW, instead of trying to cram every wild and niche feature into the common API. Sort of like what GPU does. - Major write() and ioctl() API rework to cleanly support PCI device hot unplug and advance the ioctl conversion work - Sparse and compile warning cleanups - Add 'const' to the ib_poll_cq() signature, and permit a NULL 'bad_wr', which is the common use case - Various patches to avoid high order allocations across the stack - SRQ support for cxgb4, hns and qedr - Changes to IPoIB to better follow the netdev model for working with struct net_device liftime" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (312 commits) Revert "net/smc: Replace ib_query_gid with rdma_get_gid_attr" RDMA/hns: Fix usage of bitmap allocation functions return values IB/core: Change filter function return type from int to bool IB/core: Update GID entries for netdevice whose mac address changes IB/core: Add default GIDs of the bond master netdev IB/core: Consider adding default GIDs of bond device IB/core: Delete lower netdevice default GID entries in bonding scenario IB/core: Avoid confusing del_netdev_default_ips IB/core: Add comment for change upper netevent handling qedr: Add user space support for SRQ qedr: Add support for kernel mode SRQ's qedr: Add wrapping generic structure for qpidr and adjust idr routines. IB/mlx5: Fix leaking stack memory to userspace Update the e-mail address of Bart Van Assche IB/ucm: Fix compiling ucm.c IB/uverbs: Do not check for device disassociation during ioctl IB/uverbs: Remove struct uverbs_root_spec and all supporting code IB/uverbs: Use uverbs_api to unmarshal ioctl commands IB/uverbs: Use uverbs_alloc for allocations IB/uverbs: Add a simple allocator to uverbs_attr_bundle ...
Diffstat (limited to 'fs')
-rw-r--r--fs/cifs/smbdirect.c32
1 files changed, 19 insertions, 13 deletions
diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
index c55ea4e6201b..5fdb9a509a97 100644
--- a/fs/cifs/smbdirect.c
+++ b/fs/cifs/smbdirect.c
@@ -802,7 +802,7 @@ out1:
*/
static int smbd_post_send_negotiate_req(struct smbd_connection *info)
{
- struct ib_send_wr send_wr, *send_wr_fail;
+ struct ib_send_wr send_wr;
int rc = -ENOMEM;
struct smbd_request *request;
struct smbd_negotiate_req *packet;
@@ -854,7 +854,7 @@ static int smbd_post_send_negotiate_req(struct smbd_connection *info)
request->has_payload = false;
atomic_inc(&info->send_pending);
- rc = ib_post_send(info->id->qp, &send_wr, &send_wr_fail);
+ rc = ib_post_send(info->id->qp, &send_wr, NULL);
if (!rc)
return 0;
@@ -1024,7 +1024,7 @@ static void smbd_destroy_header(struct smbd_connection *info,
static int smbd_post_send(struct smbd_connection *info,
struct smbd_request *request, bool has_payload)
{
- struct ib_send_wr send_wr, *send_wr_fail;
+ struct ib_send_wr send_wr;
int rc, i;
for (i = 0; i < request->num_sge; i++) {
@@ -1055,7 +1055,7 @@ static int smbd_post_send(struct smbd_connection *info,
atomic_inc(&info->send_pending);
}
- rc = ib_post_send(info->id->qp, &send_wr, &send_wr_fail);
+ rc = ib_post_send(info->id->qp, &send_wr, NULL);
if (rc) {
log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
if (has_payload) {
@@ -1184,7 +1184,7 @@ static int smbd_post_send_data(
static int smbd_post_recv(
struct smbd_connection *info, struct smbd_response *response)
{
- struct ib_recv_wr recv_wr, *recv_wr_fail = NULL;
+ struct ib_recv_wr recv_wr;
int rc = -EIO;
response->sge.addr = ib_dma_map_single(
@@ -1203,7 +1203,7 @@ static int smbd_post_recv(
recv_wr.sg_list = &response->sge;
recv_wr.num_sge = 1;
- rc = ib_post_recv(info->id->qp, &recv_wr, &recv_wr_fail);
+ rc = ib_post_recv(info->id->qp, &recv_wr, NULL);
if (rc) {
ib_dma_unmap_single(info->id->device, response->sge.addr,
response->sge.length, DMA_FROM_DEVICE);
@@ -1662,9 +1662,16 @@ static struct smbd_connection *_smbd_get_connection(
info->max_receive_size = smbd_max_receive_size;
info->keep_alive_interval = smbd_keep_alive_interval;
- if (info->id->device->attrs.max_sge < SMBDIRECT_MAX_SGE) {
- log_rdma_event(ERR, "warning: device max_sge = %d too small\n",
- info->id->device->attrs.max_sge);
+ if (info->id->device->attrs.max_send_sge < SMBDIRECT_MAX_SGE) {
+ log_rdma_event(ERR,
+ "warning: device max_send_sge = %d too small\n",
+ info->id->device->attrs.max_send_sge);
+ log_rdma_event(ERR, "Queue Pair creation may fail\n");
+ }
+ if (info->id->device->attrs.max_recv_sge < SMBDIRECT_MAX_SGE) {
+ log_rdma_event(ERR,
+ "warning: device max_recv_sge = %d too small\n",
+ info->id->device->attrs.max_recv_sge);
log_rdma_event(ERR, "Queue Pair creation may fail\n");
}
@@ -2473,7 +2480,6 @@ struct smbd_mr *smbd_register_mr(
int rc, i;
enum dma_data_direction dir;
struct ib_reg_wr *reg_wr;
- struct ib_send_wr *bad_wr;
if (num_pages > info->max_frmr_depth) {
log_rdma_mr(ERR, "num_pages=%d max_frmr_depth=%d\n",
@@ -2547,7 +2553,7 @@ skip_multiple_pages:
* on IB_WR_REG_MR. Hardware enforces a barrier and order of execution
* on the next ib_post_send when we actaully send I/O to remote peer
*/
- rc = ib_post_send(info->id->qp, &reg_wr->wr, &bad_wr);
+ rc = ib_post_send(info->id->qp, &reg_wr->wr, NULL);
if (!rc)
return smbdirect_mr;
@@ -2592,7 +2598,7 @@ static void local_inv_done(struct ib_cq *cq, struct ib_wc *wc)
*/
int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
{
- struct ib_send_wr *wr, *bad_wr;
+ struct ib_send_wr *wr;
struct smbd_connection *info = smbdirect_mr->conn;
int rc = 0;
@@ -2607,7 +2613,7 @@ int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
wr->send_flags = IB_SEND_SIGNALED;
init_completion(&smbdirect_mr->invalidate_done);
- rc = ib_post_send(info->id->qp, wr, &bad_wr);
+ rc = ib_post_send(info->id->qp, wr, NULL);
if (rc) {
log_rdma_mr(ERR, "ib_post_send failed rc=%x\n", rc);
smbd_disconnect_rdma_connection(info);