diff options
Diffstat (limited to 'drivers/infiniband/sw/rxe/rxe_mr.c')
| -rw-r--r-- | drivers/infiniband/sw/rxe/rxe_mr.c | 109 |
1 files changed, 52 insertions, 57 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c index 0e538fafcc20..b1df05238848 100644 --- a/drivers/infiniband/sw/rxe/rxe_mr.c +++ b/drivers/infiniband/sw/rxe/rxe_mr.c @@ -34,7 +34,7 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length) case IB_MR_TYPE_MEM_REG: if (iova < mr->ibmr.iova || iova + length > mr->ibmr.iova + mr->ibmr.length) { - rxe_dbg_mr(mr, "iova/length out of range"); + rxe_dbg_mr(mr, "iova/length out of range\n"); return -EINVAL; } return 0; @@ -45,22 +45,17 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length) } } -#define IB_ACCESS_REMOTE (IB_ACCESS_REMOTE_READ \ - | IB_ACCESS_REMOTE_WRITE \ - | IB_ACCESS_REMOTE_ATOMIC) - -static void rxe_mr_init(int access, struct rxe_mr *mr) +void rxe_mr_init(int access, struct rxe_mr *mr) { - u32 lkey = mr->elem.index << 8 | rxe_get_next_key(-1); - u32 rkey = (access & IB_ACCESS_REMOTE) ? lkey : 0; + u32 key = mr->elem.index << 8 | rxe_get_next_key(-1); /* set ibmr->l/rkey and also copy into private l/rkey * for user MRs these will always be the same * for cases where caller 'owns' the key portion * they may be different until REG_MR WQE is executed. */ - mr->lkey = mr->ibmr.lkey = lkey; - mr->rkey = mr->ibmr.rkey = rkey; + mr->lkey = mr->ibmr.lkey = key; + mr->rkey = mr->ibmr.rkey = key; mr->access = access; mr->ibmr.page_size = PAGE_SIZE; @@ -131,7 +126,7 @@ static int rxe_mr_fill_pages_from_sgt(struct rxe_mr *mr, struct sg_table *sgt) return xas_error(&xas); } -int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova, +int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, int access, struct rxe_mr *mr) { struct ib_umem *umem; @@ -195,7 +190,7 @@ int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr) int err; /* always allow remote access for FMRs */ - rxe_mr_init(IB_ACCESS_REMOTE, mr); + rxe_mr_init(RXE_ACCESS_REMOTE, mr); err = rxe_mr_alloc(mr, max_pages); if (err) @@ -324,11 +319,14 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, err = mr_check_range(mr, iova, length); if (unlikely(err)) { - rxe_dbg_mr(mr, "iova out of range"); + rxe_dbg_mr(mr, "iova out of range\n"); return err; } - return rxe_mr_copy_xarray(mr, iova, addr, length, dir); + if (is_odp_mr(mr)) + return rxe_odp_mr_copy(mr, iova, addr, length, dir); + else + return rxe_mr_copy_xarray(mr, iova, addr, length, dir); } /* copy data in or out of a wqe, i.e. sg list @@ -426,7 +424,7 @@ err1: return err; } -int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length) +static int rxe_mr_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length) { unsigned int page_offset; unsigned long index; @@ -435,16 +433,6 @@ int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length) int err; u8 *va; - /* mr must be valid even if length is zero */ - if (WARN_ON(!mr)) - return -EINVAL; - - if (length == 0) - return 0; - - if (mr->ibmr.type == IB_MR_TYPE_DMA) - return -EFAULT; - err = mr_check_range(mr, iova, length); if (err) return err; @@ -456,7 +444,7 @@ int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length) if (!page) return -EFAULT; bytes = min_t(unsigned int, length, - mr_page_size(mr) - page_offset); + mr_page_size(mr) - page_offset); va = kmap_local_page(page); arch_wb_cache_pmem(va + page_offset, bytes); @@ -464,17 +452,38 @@ int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length) length -= bytes; iova += bytes; - page_offset = 0; } return 0; } +int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 start, unsigned int length) +{ + int err; + + /* mr must be valid even if length is zero */ + if (WARN_ON(!mr)) + return -EINVAL; + + if (length == 0) + return 0; + + if (mr->ibmr.type == IB_MR_TYPE_DMA) + return -EFAULT; + + if (is_odp_mr(mr)) + err = rxe_odp_flush_pmem_iova(mr, start, length); + else + err = rxe_mr_flush_pmem_iova(mr, start, length); + + return err; +} + /* Guarantee atomicity of atomic operations at the machine level. */ -static DEFINE_SPINLOCK(atomic_ops_lock); +DEFINE_SPINLOCK(atomic_ops_lock); -int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode, - u64 compare, u64 swap_add, u64 *orig_val) +enum resp_states rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode, + u64 compare, u64 swap_add, u64 *orig_val) { unsigned int page_offset; struct page *page; @@ -482,7 +491,7 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode, u64 *va; if (unlikely(mr->state != RXE_MR_STATE_VALID)) { - rxe_dbg_mr(mr, "mr not in valid state"); + rxe_dbg_mr(mr, "mr not in valid state\n"); return RESPST_ERR_RKEY_VIOLATION; } @@ -495,7 +504,7 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode, err = mr_check_range(mr, iova, sizeof(value)); if (err) { - rxe_dbg_mr(mr, "iova out of range"); + rxe_dbg_mr(mr, "iova out of range\n"); return RESPST_ERR_RKEY_VIOLATION; } page_offset = rxe_mr_iova_to_page_offset(mr, iova); @@ -506,7 +515,7 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode, } if (unlikely(page_offset & 0x7)) { - rxe_dbg_mr(mr, "iova not aligned"); + rxe_dbg_mr(mr, "iova not aligned\n"); return RESPST_ERR_MISALIGNED_ATOMIC; } @@ -526,23 +535,15 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode, kunmap_local(va); - return 0; + return RESPST_NONE; } -#if defined CONFIG_64BIT -/* only implemented or called for 64 bit architectures */ -int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value) +enum resp_states rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value) { unsigned int page_offset; struct page *page; u64 *va; - /* See IBA oA19-28 */ - if (unlikely(mr->state != RXE_MR_STATE_VALID)) { - rxe_dbg_mr(mr, "mr not in valid state"); - return RESPST_ERR_RKEY_VIOLATION; - } - if (mr->ibmr.type == IB_MR_TYPE_DMA) { page_offset = iova & (PAGE_SIZE - 1); page = ib_virt_dma_to_page(iova); @@ -553,7 +554,7 @@ int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value) /* See IBA oA19-28 */ err = mr_check_range(mr, iova, sizeof(value)); if (unlikely(err)) { - rxe_dbg_mr(mr, "iova out of range"); + rxe_dbg_mr(mr, "iova out of range\n"); return RESPST_ERR_RKEY_VIOLATION; } page_offset = rxe_mr_iova_to_page_offset(mr, iova); @@ -565,25 +566,17 @@ int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value) /* See IBA A19.4.2 */ if (unlikely(page_offset & 0x7)) { - rxe_dbg_mr(mr, "misaligned address"); + rxe_dbg_mr(mr, "misaligned address\n"); return RESPST_ERR_MISALIGNED_ATOMIC; } va = kmap_local_page(page); - /* Do atomic write after all prior operations have completed */ smp_store_release(&va[page_offset >> 3], value); - kunmap_local(va); - return 0; -} -#else -int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value) -{ - return RESPST_ERR_UNSUPPORTED_OPCODE; + return RESPST_NONE; } -#endif int advance_dma_data(struct rxe_dma_info *dma, unsigned int length) { @@ -644,6 +637,7 @@ int rxe_invalidate_mr(struct rxe_qp *qp, u32 key) { struct rxe_dev *rxe = to_rdev(qp->ibqp.device); struct rxe_mr *mr; + int remote; int ret; mr = rxe_pool_get_index(&rxe->mr_pool, key >> 8); @@ -653,9 +647,10 @@ int rxe_invalidate_mr(struct rxe_qp *qp, u32 key) goto err; } - if (mr->rkey ? (key != mr->rkey) : (key != mr->lkey)) { + remote = mr->access & RXE_ACCESS_REMOTE; + if (remote ? (key != mr->rkey) : (key != mr->lkey)) { rxe_dbg_mr(mr, "wr key (%#x) doesn't match mr key (%#x)\n", - key, (mr->rkey ? mr->rkey : mr->lkey)); + key, (remote ? mr->rkey : mr->lkey)); ret = -EINVAL; goto err_drop_ref; } @@ -715,7 +710,7 @@ int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe) mr->access = access; mr->lkey = key; - mr->rkey = (access & IB_ACCESS_REMOTE) ? key : 0; + mr->rkey = key; mr->ibmr.iova = wqe->wr.wr.reg.mr->iova; mr->state = RXE_MR_STATE_VALID; |
