summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/mlx4/mr.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mlx4/mr.c')
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c465
1 files changed, 49 insertions, 416 deletions
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index c7c85c22e4e3..94464f1694d9 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -87,289 +87,24 @@ err_free:
return ERR_PTR(err);
}
-enum {
- MLX4_MAX_MTT_SHIFT = 31
-};
-
-static int mlx4_ib_umem_write_mtt_block(struct mlx4_ib_dev *dev,
- struct mlx4_mtt *mtt,
- u64 mtt_size, u64 mtt_shift, u64 len,
- u64 cur_start_addr, u64 *pages,
- int *start_index, int *npages)
-{
- u64 cur_end_addr = cur_start_addr + len;
- u64 cur_end_addr_aligned = 0;
- u64 mtt_entries;
- int err = 0;
- int k;
-
- len += (cur_start_addr & (mtt_size - 1ULL));
- cur_end_addr_aligned = round_up(cur_end_addr, mtt_size);
- len += (cur_end_addr_aligned - cur_end_addr);
- if (len & (mtt_size - 1ULL)) {
- pr_warn("write_block: len %llx is not aligned to mtt_size %llx\n",
- len, mtt_size);
- return -EINVAL;
- }
-
- mtt_entries = (len >> mtt_shift);
-
- /*
- * Align the MTT start address to the mtt_size.
- * Required to handle cases when the MR starts in the middle of an MTT
- * record. Was not required in old code since the physical addresses
- * provided by the dma subsystem were page aligned, which was also the
- * MTT size.
- */
- cur_start_addr = round_down(cur_start_addr, mtt_size);
- /* A new block is started ... */
- for (k = 0; k < mtt_entries; ++k) {
- pages[*npages] = cur_start_addr + (mtt_size * k);
- (*npages)++;
- /*
- * Be friendly to mlx4_write_mtt() and pass it chunks of
- * appropriate size.
- */
- if (*npages == PAGE_SIZE / sizeof(u64)) {
- err = mlx4_write_mtt(dev->dev, mtt, *start_index,
- *npages, pages);
- if (err)
- return err;
-
- (*start_index) += *npages;
- *npages = 0;
- }
- }
-
- return 0;
-}
-
-static inline u64 alignment_of(u64 ptr)
-{
- return ilog2(ptr & (~(ptr - 1)));
-}
-
-static int mlx4_ib_umem_calc_block_mtt(u64 next_block_start,
- u64 current_block_end,
- u64 block_shift)
-{
- /* Check whether the alignment of the new block is aligned as well as
- * the previous block.
- * Block address must start with zeros till size of entity_size.
- */
- if ((next_block_start & ((1ULL << block_shift) - 1ULL)) != 0)
- /*
- * It is not as well aligned as the previous block-reduce the
- * mtt size accordingly. Here we take the last right bit which
- * is 1.
- */
- block_shift = alignment_of(next_block_start);
-
- /*
- * Check whether the alignment of the end of previous block - is it
- * aligned as well as the start of the block
- */
- if (((current_block_end) & ((1ULL << block_shift) - 1ULL)) != 0)
- /*
- * It is not as well aligned as the start of the block -
- * reduce the mtt size accordingly.
- */
- block_shift = alignment_of(current_block_end);
-
- return block_shift;
-}
-
int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
struct ib_umem *umem)
{
- u64 *pages;
- u64 len = 0;
- int err = 0;
- u64 mtt_size;
- u64 cur_start_addr = 0;
- u64 mtt_shift;
- int start_index = 0;
- int npages = 0;
- struct scatterlist *sg;
- int i;
-
- pages = (u64 *) __get_free_page(GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
+ struct ib_block_iter biter;
+ int err, i = 0;
+ u64 addr;
- mtt_shift = mtt->page_shift;
- mtt_size = 1ULL << mtt_shift;
-
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
- if (cur_start_addr + len == sg_dma_address(sg)) {
- /* still the same block */
- len += sg_dma_len(sg);
- continue;
- }
- /*
- * A new block is started ...
- * If len is malaligned, write an extra mtt entry to cover the
- * misaligned area (round up the division)
- */
- err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size,
- mtt_shift, len,
- cur_start_addr,
- pages, &start_index,
- &npages);
+ rdma_umem_for_each_dma_block(umem, &biter, BIT(mtt->page_shift)) {
+ addr = rdma_block_iter_dma_address(&biter);
+ err = mlx4_write_mtt(dev->dev, mtt, i++, 1, &addr);
if (err)
- goto out;
-
- cur_start_addr = sg_dma_address(sg);
- len = sg_dma_len(sg);
+ return err;
}
-
- /* Handle the last block */
- if (len > 0) {
- /*
- * If len is malaligned, write an extra mtt entry to cover
- * the misaligned area (round up the division)
- */
- err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size,
- mtt_shift, len,
- cur_start_addr, pages,
- &start_index, &npages);
- if (err)
- goto out;
- }
-
- if (npages)
- err = mlx4_write_mtt(dev->dev, mtt, start_index, npages, pages);
-
-out:
- free_page((unsigned long) pages);
- return err;
-}
-
-/*
- * Calculate optimal mtt size based on contiguous pages.
- * Function will return also the number of pages that are not aligned to the
- * calculated mtt_size to be added to total number of pages. For that we should
- * check the first chunk length & last chunk length and if not aligned to
- * mtt_size we should increment the non_aligned_pages number. All chunks in the
- * middle already handled as part of mtt shift calculation for both their start
- * & end addresses.
- */
-int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
- int *num_of_mtts)
-{
- u64 block_shift = MLX4_MAX_MTT_SHIFT;
- u64 min_shift = umem->page_shift;
- u64 last_block_aligned_end = 0;
- u64 current_block_start = 0;
- u64 first_block_start = 0;
- u64 current_block_len = 0;
- u64 last_block_end = 0;
- struct scatterlist *sg;
- u64 current_block_end;
- u64 misalignment_bits;
- u64 next_block_start;
- u64 total_len = 0;
- int i;
-
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
- /*
- * Initialization - save the first chunk start as the
- * current_block_start - block means contiguous pages.
- */
- if (current_block_len == 0 && current_block_start == 0) {
- current_block_start = sg_dma_address(sg);
- first_block_start = current_block_start;
- /*
- * Find the bits that are different between the physical
- * address and the virtual address for the start of the
- * MR.
- * umem_get aligned the start_va to a page boundary.
- * Therefore, we need to align the start va to the same
- * boundary.
- * misalignment_bits is needed to handle the case of a
- * single memory region. In this case, the rest of the
- * logic will not reduce the block size. If we use a
- * block size which is bigger than the alignment of the
- * misalignment bits, we might use the virtual page
- * number instead of the physical page number, resulting
- * in access to the wrong data.
- */
- misalignment_bits =
- (start_va & (~(((u64)(BIT(umem->page_shift))) - 1ULL)))
- ^ current_block_start;
- block_shift = min(alignment_of(misalignment_bits),
- block_shift);
- }
-
- /*
- * Go over the scatter entries and check if they continue the
- * previous scatter entry.
- */
- next_block_start = sg_dma_address(sg);
- current_block_end = current_block_start + current_block_len;
- /* If we have a split (non-contig.) between two blocks */
- if (current_block_end != next_block_start) {
- block_shift = mlx4_ib_umem_calc_block_mtt
- (next_block_start,
- current_block_end,
- block_shift);
-
- /*
- * If we reached the minimum shift for 4k page we stop
- * the loop.
- */
- if (block_shift <= min_shift)
- goto end;
-
- /*
- * If not saved yet we are in first block - we save the
- * length of first block to calculate the
- * non_aligned_pages number at the end.
- */
- total_len += current_block_len;
-
- /* Start a new block */
- current_block_start = next_block_start;
- current_block_len = sg_dma_len(sg);
- continue;
- }
- /* The scatter entry is another part of the current block,
- * increase the block size.
- * An entry in the scatter can be larger than 4k (page) as of
- * dma mapping which merge some blocks together.
- */
- current_block_len += sg_dma_len(sg);
- }
-
- /* Account for the last block in the total len */
- total_len += current_block_len;
- /* Add to the first block the misalignment that it suffers from. */
- total_len += (first_block_start & ((1ULL << block_shift) - 1ULL));
- last_block_end = current_block_start + current_block_len;
- last_block_aligned_end = round_up(last_block_end, 1ULL << block_shift);
- total_len += (last_block_aligned_end - last_block_end);
-
- if (total_len & ((1ULL << block_shift) - 1ULL))
- pr_warn("misaligned total length detected (%llu, %llu)!",
- total_len, block_shift);
-
- *num_of_mtts = total_len >> block_shift;
-end:
- if (block_shift < min_shift) {
- /*
- * If shift is less than the min we set a warning and return the
- * min shift.
- */
- pr_warn("umem_calc_optimal_mtt_size - unexpected shift %lld\n", block_shift);
-
- block_shift = min_shift;
- }
- return block_shift;
+ return 0;
}
-static struct ib_umem *mlx4_get_umem_mr(struct ib_ucontext *context, u64 start,
- u64 length, u64 virt_addr,
- int access_flags)
+static struct ib_umem *mlx4_get_umem_mr(struct ib_device *device, u64 start,
+ u64 length, int access_flags)
{
/*
* Force registering the memory as writable if the underlying pages
@@ -378,31 +113,33 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_ucontext *context, u64 start,
* again
*/
if (!ib_access_writable(access_flags)) {
+ unsigned long untagged_start = untagged_addr(start);
struct vm_area_struct *vma;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
/*
* FIXME: Ideally this would iterate over all the vmas that
* cover the memory, but for now it requires a single vma to
* entirely cover the MR to support RO mappings.
*/
- vma = find_vma(current->mm, start);
- if (vma && vma->vm_end >= start + length &&
- vma->vm_start <= start) {
+ vma = find_vma(current->mm, untagged_start);
+ if (vma && vma->vm_end >= untagged_start + length &&
+ vma->vm_start <= untagged_start) {
if (vma->vm_flags & VM_WRITE)
access_flags |= IB_ACCESS_LOCAL_WRITE;
} else {
access_flags |= IB_ACCESS_LOCAL_WRITE;
}
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
}
- return ib_umem_get(context, start, length, access_flags, 0);
+ return ib_umem_get(device, start, length, access_flags);
}
struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata)
{
struct mlx4_ib_dev *dev = to_mdev(pd->device);
@@ -411,19 +148,24 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
int err;
int n;
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
- mr->umem = mlx4_get_umem_mr(pd->uobject->context, start, length,
- virt_addr, access_flags);
+ mr->umem = mlx4_get_umem_mr(pd->device, start, length, access_flags);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
goto err_free;
}
- n = ib_umem_page_count(mr->umem);
shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n);
+ if (shift < 0) {
+ err = shift;
+ goto err_umem;
+ }
err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
convert_access(access_flags), n, shift, &mr->mmr);
@@ -439,8 +181,6 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
goto err_mr;
mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
- mr->ibmr.length = length;
- mr->ibmr.iova = virt_addr;
mr->ibmr.page_size = 1U << shift;
return &mr->ibmr;
@@ -457,10 +197,10 @@ err_free:
return ERR_PTR(err);
}
-int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
- u64 start, u64 length, u64 virt_addr,
- int mr_access_flags, struct ib_pd *pd,
- struct ib_udata *udata)
+struct ib_mr *mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, u64 start,
+ u64 length, u64 virt_addr,
+ int mr_access_flags, struct ib_pd *pd,
+ struct ib_udata *udata)
{
struct mlx4_ib_dev *dev = to_mdev(mr->device);
struct mlx4_ib_mr *mmr = to_mmr(mr);
@@ -473,9 +213,8 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
* race exists.
*/
err = mlx4_mr_hw_get_mpt(dev->dev, &mmr->mmr, &pmpt_entry);
-
if (err)
- return err;
+ return ERR_PTR(err);
if (flags & IB_MR_REREG_PD) {
err = mlx4_mr_hw_change_pd(dev->dev, *pmpt_entry,
@@ -505,17 +244,16 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
ib_umem_release(mmr->umem);
- mmr->umem =
- mlx4_get_umem_mr(mr->uobject->context, start, length,
- virt_addr, mr_access_flags);
+ mmr->umem = mlx4_get_umem_mr(mr->device, start, length,
+ mr_access_flags);
if (IS_ERR(mmr->umem)) {
err = PTR_ERR(mmr->umem);
/* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */
mmr->umem = NULL;
goto release_mpt_entry;
}
- n = ib_umem_page_count(mmr->umem);
- shift = mmr->umem->page_shift;
+ n = ib_umem_num_dma_blocks(mmr->umem, PAGE_SIZE);
+ shift = PAGE_SHIFT;
err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr,
virt_addr, length, n, shift,
@@ -544,8 +282,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
release_mpt_entry:
mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry);
-
- return err;
+ if (err)
+ return ERR_PTR(err);
+ return NULL;
}
static int
@@ -596,7 +335,7 @@ mlx4_free_priv_pages(struct mlx4_ib_mr *mr)
}
}
-int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
+int mlx4_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
struct mlx4_ib_mr *mr = to_mmr(ibmr);
int ret;
@@ -613,37 +352,27 @@ int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
return 0;
}
-struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
- struct ib_udata *udata)
+int mlx4_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
{
- struct mlx4_ib_dev *dev = to_mdev(pd->device);
- struct mlx4_ib_mw *mw;
+ struct mlx4_ib_dev *dev = to_mdev(ibmw->device);
+ struct mlx4_ib_mw *mw = to_mmw(ibmw);
int err;
- mw = kmalloc(sizeof(*mw), GFP_KERNEL);
- if (!mw)
- return ERR_PTR(-ENOMEM);
-
- err = mlx4_mw_alloc(dev->dev, to_mpd(pd)->pdn,
- to_mlx4_type(type), &mw->mmw);
+ err = mlx4_mw_alloc(dev->dev, to_mpd(ibmw->pd)->pdn,
+ to_mlx4_type(ibmw->type), &mw->mmw);
if (err)
- goto err_free;
+ return err;
err = mlx4_mw_enable(dev->dev, &mw->mmw);
if (err)
goto err_mw;
- mw->ibmw.rkey = mw->mmw.key;
-
- return &mw->ibmw;
+ ibmw->rkey = mw->mmw.key;
+ return 0;
err_mw:
mlx4_mw_free(dev->dev, &mw->mmw);
-
-err_free:
- kfree(mw);
-
- return ERR_PTR(err);
+ return err;
}
int mlx4_ib_dealloc_mw(struct ib_mw *ibmw)
@@ -651,13 +380,10 @@ int mlx4_ib_dealloc_mw(struct ib_mw *ibmw)
struct mlx4_ib_mw *mw = to_mmw(ibmw);
mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw);
- kfree(mw);
-
return 0;
}
-struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
- enum ib_mr_type mr_type,
+struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg)
{
struct mlx4_ib_dev *dev = to_mdev(pd->device);
@@ -701,99 +427,6 @@ err_free:
return ERR_PTR(err);
}
-struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc,
- struct ib_fmr_attr *fmr_attr)
-{
- struct mlx4_ib_dev *dev = to_mdev(pd->device);
- struct mlx4_ib_fmr *fmr;
- int err = -ENOMEM;
-
- fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
- if (!fmr)
- return ERR_PTR(-ENOMEM);
-
- err = mlx4_fmr_alloc(dev->dev, to_mpd(pd)->pdn, convert_access(acc),
- fmr_attr->max_pages, fmr_attr->max_maps,
- fmr_attr->page_shift, &fmr->mfmr);
- if (err)
- goto err_free;
-
- err = mlx4_fmr_enable(to_mdev(pd->device)->dev, &fmr->mfmr);
- if (err)
- goto err_mr;
-
- fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mfmr.mr.key;
-
- return &fmr->ibfmr;
-
-err_mr:
- (void) mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr);
-
-err_free:
- kfree(fmr);
-
- return ERR_PTR(err);
-}
-
-int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int npages, u64 iova)
-{
- struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
- struct mlx4_ib_dev *dev = to_mdev(ifmr->ibfmr.device);
-
- return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova,
- &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
-}
-
-int mlx4_ib_unmap_fmr(struct list_head *fmr_list)
-{
- struct ib_fmr *ibfmr;
- int err;
- struct mlx4_dev *mdev = NULL;
-
- list_for_each_entry(ibfmr, fmr_list, list) {
- if (mdev && to_mdev(ibfmr->device)->dev != mdev)
- return -EINVAL;
- mdev = to_mdev(ibfmr->device)->dev;
- }
-
- if (!mdev)
- return 0;
-
- list_for_each_entry(ibfmr, fmr_list, list) {
- struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
-
- mlx4_fmr_unmap(mdev, &ifmr->mfmr, &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
- }
-
- /*
- * Make sure all MPT status updates are visible before issuing
- * SYNC_TPT firmware command.
- */
- wmb();
-
- err = mlx4_SYNC_TPT(mdev);
- if (err)
- pr_warn("SYNC_TPT error %d when "
- "unmapping FMRs\n", err);
-
- return 0;
-}
-
-int mlx4_ib_fmr_dealloc(struct ib_fmr *ibfmr)
-{
- struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
- struct mlx4_ib_dev *dev = to_mdev(ibfmr->device);
- int err;
-
- err = mlx4_fmr_free(dev->dev, &ifmr->mfmr);
-
- if (!err)
- kfree(ifmr);
-
- return err;
-}
-
static int mlx4_set_page(struct ib_mr *ibmr, u64 addr)
{
struct mlx4_ib_mr *mr = to_mmr(ibmr);