summaryrefslogtreecommitdiff
path: root/drivers/infiniband/sw/rxe/rxe_mr.c
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@nvidia.com>2023-04-14 10:58:29 -0300
committerLeon Romanovsky <leon@kernel.org>2023-04-16 11:08:07 +0300
commit8d7c7c0eeb74281c846ef9231ce20536c79a99b4 (patch)
treeb80b6fee2ff23604fd054ea29593ec8b6ece33c7 /drivers/infiniband/sw/rxe/rxe_mr.c
parentb2b1ddc457458fecd1c6f385baa9fbda5f0c63ad (diff)
RDMA: Add ib_virt_dma_to_page()
Make it clearer what is going on by adding a function to go back from the "virtual" dma_addr to a kva and another to a struct page. This is used in the ib_uses_virt_dma() style drivers (siw, rxe, hfi, qib). Call them instead of a naked casting and virt_to_page() when working with dma_addr values encoded by the various ib_map functions. This also fixes the virt_to_page() casting problem Linus Walleij has been chasing. Cc: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/0-v2-05ea785520ed+10-ib_virt_page_jgg@nvidia.com Signed-off-by: Leon Romanovsky <leon@kernel.org>
Diffstat (limited to 'drivers/infiniband/sw/rxe/rxe_mr.c')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 1e17f8086d59..0e538fafcc20 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -210,10 +210,10 @@ err1:
return err;
}
-static int rxe_set_page(struct ib_mr *ibmr, u64 iova)
+static int rxe_set_page(struct ib_mr *ibmr, u64 dma_addr)
{
struct rxe_mr *mr = to_rmr(ibmr);
- struct page *page = virt_to_page(iova & mr->page_mask);
+ struct page *page = ib_virt_dma_to_page(dma_addr);
bool persistent = !!(mr->access & IB_ACCESS_FLUSH_PERSISTENT);
int err;
@@ -279,16 +279,16 @@ static int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr,
return 0;
}
-static void rxe_mr_copy_dma(struct rxe_mr *mr, u64 iova, void *addr,
+static void rxe_mr_copy_dma(struct rxe_mr *mr, u64 dma_addr, void *addr,
unsigned int length, enum rxe_mr_copy_dir dir)
{
- unsigned int page_offset = iova & (PAGE_SIZE - 1);
+ unsigned int page_offset = dma_addr & (PAGE_SIZE - 1);
unsigned int bytes;
struct page *page;
u8 *va;
while (length) {
- page = virt_to_page(iova & mr->page_mask);
+ page = ib_virt_dma_to_page(dma_addr);
bytes = min_t(unsigned int, length,
PAGE_SIZE - page_offset);
va = kmap_local_page(page);
@@ -300,7 +300,7 @@ static void rxe_mr_copy_dma(struct rxe_mr *mr, u64 iova, void *addr,
kunmap_local(va);
page_offset = 0;
- iova += bytes;
+ dma_addr += bytes;
addr += bytes;
length -= bytes;
}
@@ -488,7 +488,7 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
if (mr->ibmr.type == IB_MR_TYPE_DMA) {
page_offset = iova & (PAGE_SIZE - 1);
- page = virt_to_page(iova & PAGE_MASK);
+ page = ib_virt_dma_to_page(iova);
} else {
unsigned long index;
int err;
@@ -545,7 +545,7 @@ int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
if (mr->ibmr.type == IB_MR_TYPE_DMA) {
page_offset = iova & (PAGE_SIZE - 1);
- page = virt_to_page(iova & PAGE_MASK);
+ page = ib_virt_dma_to_page(iova);
} else {
unsigned long index;
int err;