summaryrefslogtreecommitdiff
path: root/net/smc/smc_ib.c
diff options
context:
space:
mode:
authorUrsula Braun <ubraun@linux.vnet.ibm.com>2017-07-28 13:56:16 +0200
committerDavid S. Miller <davem@davemloft.net>2017-07-29 11:22:58 -0700
commit897e1c245773d93f26f125a99674f585a3aeef5d (patch)
tree9b41a96c8e1ee830f411d263556997bf6388dc53 /net/smc/smc_ib.c
parenta3fe3d01bd0d7cd6ee7a5e3eebc0926c47954fe7 (diff)
net/smc: use separate memory regions for RMBs
SMC currently uses the unsafe_global_rkey of the protection domain, which exposes all memory for remote reads and writes once a connection is established. This patch introduces separate memory regions with separate rkeys for every RMB. Now the unsafe_global_rkey of the protection domain is no longer needed. Signed-off-by: Ursula Braun <ubraun@linux.vnet.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/smc/smc_ib.c')
-rw-r--r--net/smc/smc_ib.c45
1 files changed, 43 insertions, 2 deletions
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index fcfeb89b05d9..08233492ec45 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -192,8 +192,7 @@ int smc_ib_create_protection_domain(struct smc_link *lnk)
{
int rc;
- lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev,
- IB_PD_UNSAFE_GLOBAL_RKEY);
+ lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev, 0);
rc = PTR_ERR_OR_ZERO(lnk->roce_pd);
if (IS_ERR(lnk->roce_pd))
lnk->roce_pd = NULL;
@@ -254,6 +253,48 @@ int smc_ib_create_queue_pair(struct smc_link *lnk)
return rc;
}
+void smc_ib_put_memory_region(struct ib_mr *mr)
+{
+ ib_dereg_mr(mr);
+}
+
+static int smc_ib_map_mr_sg(struct smc_buf_desc *buf_slot)
+{
+ unsigned int offset = 0;
+ int sg_num;
+
+ /* map the largest prefix of a dma mapped SG list */
+ sg_num = ib_map_mr_sg(buf_slot->mr_rx[SMC_SINGLE_LINK],
+ buf_slot->sgt[SMC_SINGLE_LINK].sgl,
+ buf_slot->sgt[SMC_SINGLE_LINK].orig_nents,
+ &offset, PAGE_SIZE);
+
+ return sg_num;
+}
+
+/* Allocate a memory region and map the dma mapped SG list of buf_slot */
+int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
+ struct smc_buf_desc *buf_slot)
+{
+ if (buf_slot->mr_rx[SMC_SINGLE_LINK])
+ return 0; /* already done */
+
+ buf_slot->mr_rx[SMC_SINGLE_LINK] =
+ ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, 1 << buf_slot->order);
+ if (IS_ERR(buf_slot->mr_rx[SMC_SINGLE_LINK])) {
+ int rc;
+
+ rc = PTR_ERR(buf_slot->mr_rx[SMC_SINGLE_LINK]);
+ buf_slot->mr_rx[SMC_SINGLE_LINK] = NULL;
+ return rc;
+ }
+
+ if (smc_ib_map_mr_sg(buf_slot) != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
/* map a new TX or RX buffer to DMA */
int smc_ib_buf_map(struct smc_ib_device *smcibdev, int buf_size,
struct smc_buf_desc *buf_slot,