diff options
author | Michael Margolin <mrgolin@amazon.com> | 2025-07-08 20:23:07 +0000 |
---|---|---|
committer | Leon Romanovsky <leon@kernel.org> | 2025-07-13 04:00:34 -0400 |
commit | c897c2c8b8e82981df10df546c753ac857612937 (patch) | |
tree | 09d30b89d25b66e54c00bc70ebeab23d44d57c66 | |
parent | 1a40c362ae265ca4004f7373b34c22af6810f6cb (diff) |
RDMA/core: Add umem "is_contiguous" and "start_dma_addr" helpers
In some cases drivers may need to check if a given umem is contiguous.
Add a helper function in core code so that drivers don't need to deal
with umem or scatter-gather lists structure.
Additionally add a helper for getting umem's start DMA address and use
it in other helper functions that open code it.
Signed-off-by: Michael Margolin <mrgolin@amazon.com>
Link: https://patch.msgid.link/20250708202308.24783-3-mrgolin@amazon.com
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
-rw-r--r-- | include/rdma/ib_umem.h | 25 |
1 files changed, 21 insertions, 4 deletions
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h index 7dc7b1cc71b5..0a8e092c0ea8 100644 --- a/include/rdma/ib_umem.h +++ b/include/rdma/ib_umem.h @@ -52,11 +52,15 @@ static inline int ib_umem_offset(struct ib_umem *umem) return umem->address & ~PAGE_MASK; } +static inline dma_addr_t ib_umem_start_dma_addr(struct ib_umem *umem) +{ + return sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem); +} + static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem, unsigned long pgsz) { - return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) & - (pgsz - 1); + return ib_umem_start_dma_addr(umem) & (pgsz - 1); } static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem, @@ -135,14 +139,27 @@ static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem, unsigned long pgsz_bitmap, u64 pgoff_bitmask) { - struct scatterlist *sg = umem->sgt_append.sgt.sgl; dma_addr_t dma_addr; - dma_addr = sg_dma_address(sg) + (umem->address & ~PAGE_MASK); + dma_addr = ib_umem_start_dma_addr(umem); return ib_umem_find_best_pgsz(umem, pgsz_bitmap, dma_addr & pgoff_bitmask); } +static inline bool ib_umem_is_contiguous(struct ib_umem *umem) +{ + dma_addr_t dma_addr; + unsigned long pgsz; + + /* + * Select the smallest aligned page that can contain the whole umem if + * it was contiguous. + */ + dma_addr = ib_umem_start_dma_addr(umem); + pgsz = roundup_pow_of_two((dma_addr ^ (umem->length - 1 + dma_addr)) + 1); + return !!ib_umem_find_best_pgoff(umem, pgsz, U64_MAX); +} + struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device, unsigned long offset, size_t size, int fd, int access, |