diff options
Diffstat (limited to 'drivers/infiniband/hw/usnic/usnic_uiom.c')
| -rw-r--r-- | drivers/infiniband/hw/usnic/usnic_uiom.c | 116 |
1 files changed, 26 insertions, 90 deletions
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c index 49275a548751..3fbf99757b11 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom.c @@ -40,15 +40,12 @@ #include <linux/iommu.h> #include <linux/workqueue.h> #include <linux/list.h> -#include <linux/pci.h> #include <rdma/ib_verbs.h> #include "usnic_log.h" #include "usnic_uiom.h" #include "usnic_uiom_interval_tree.h" -static struct workqueue_struct *usnic_uiom_wq; - #define USNIC_UIOM_PAGE_CHUNK \ ((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list)) /\ ((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] - \ @@ -59,7 +56,7 @@ static int usnic_uiom_dma_fault(struct iommu_domain *domain, unsigned long iova, int flags, void *token) { - usnic_err("Device %s iommu fault domain 0x%pK va 0x%lx flags 0x%x\n", + usnic_err("Device %s iommu fault domain 0x%p va 0x%lx flags 0x%x\n", dev_name(dev), domain, iova, flags); return -ENOSYS; @@ -77,9 +74,7 @@ static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty) for_each_sg(chunk->page_list, sg, chunk->nents, i) { page = sg_page(sg); pa = sg_phys(sg); - if (!PageDirty(page) && dirty) - set_page_dirty_lock(page); - put_page(page); + unpin_user_pages_dirty_lock(&page, 1, dirty); usnic_dbg("pa: %pa\n", &pa); } kfree(chunk); @@ -90,6 +85,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, int dmasync, struct usnic_uiom_reg *uiomr) { struct list_head *chunk_list = &uiomr->chunk_list; + unsigned int gup_flags = FOLL_LONGTERM; struct page **page_list; struct scatterlist *sg; struct usnic_uiom_chunk *chunk; @@ -100,9 +96,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, int ret; int off; int i; - int flags; dma_addr_t pa; - unsigned int gup_flags; struct mm_struct *mm; /* @@ -127,9 +121,9 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT; uiomr->owning_mm = mm = current->mm; - down_write(&mm->mmap_sem); + mmap_read_lock(mm); - locked = npages + current->mm->pinned_vm; + locked = atomic64_add_return(npages, ¤t->mm->pinned_vm); lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) { @@ -137,18 +131,16 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, goto out; } - flags = IOMMU_READ | IOMMU_CACHE; - flags |= (writable) ? IOMMU_WRITE : 0; - gup_flags = FOLL_WRITE; - gup_flags |= (writable) ? 0 : FOLL_FORCE; + if (writable) + gup_flags |= FOLL_WRITE; cur_base = addr & PAGE_MASK; ret = 0; while (npages) { - ret = get_user_pages_longterm(cur_base, - min_t(unsigned long, npages, - PAGE_SIZE / sizeof(struct page *)), - gup_flags, page_list, NULL); + ret = pin_user_pages(cur_base, + min_t(unsigned long, npages, + PAGE_SIZE / sizeof(struct page *)), + gup_flags, page_list); if (ret < 0) goto out; @@ -157,9 +149,8 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, off = 0; while (ret) { - chunk = kmalloc(sizeof(*chunk) + - sizeof(struct scatterlist) * - min_t(int, ret, USNIC_UIOM_PAGE_CHUNK), + chunk = kmalloc(struct_size(chunk, page_list, + min_t(int, ret, USNIC_UIOM_PAGE_CHUNK)), GFP_KERNEL); if (!chunk) { ret = -ENOMEM; @@ -185,14 +176,13 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, } out: - if (ret < 0) + if (ret < 0) { usnic_uiom_put_pages(chunk_list, 0); - else { - mm->pinned_vm = locked; + atomic64_sub(npages, ¤t->mm->pinned_vm); + } else mmgrab(uiomr->owning_mm); - } - up_write(&mm->mmap_sem); + mmap_read_unlock(mm); free_page((unsigned long) page_list); return ret; } @@ -287,7 +277,7 @@ iter_chunk: usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x", va_start, &pa_start, size, flags); err = iommu_map(pd->domain, va_start, pa_start, - size, flags); + size, flags, GFP_ATOMIC); if (err) { usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n", va_start, &pa_start, size, err); @@ -304,7 +294,7 @@ iter_chunk: usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n", va_start, &pa_start, size, flags); err = iommu_map(pd->domain, va_start, pa_start, - size, flags); + size, flags, GFP_ATOMIC); if (err) { usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n", va_start, &pa_start, size, err); @@ -436,47 +426,15 @@ static inline size_t usnic_uiom_num_pages(struct usnic_uiom_reg *uiomr) return PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT; } -static void usnic_uiom_release_defer(struct work_struct *work) -{ - struct usnic_uiom_reg *uiomr = - container_of(work, struct usnic_uiom_reg, work); - - down_write(&uiomr->owning_mm->mmap_sem); - uiomr->owning_mm->pinned_vm -= usnic_uiom_num_pages(uiomr); - up_write(&uiomr->owning_mm->mmap_sem); - - __usnic_uiom_release_tail(uiomr); -} - -void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, - struct ib_ucontext *context) +void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr) { __usnic_uiom_reg_release(uiomr->pd, uiomr, 1); - /* - * We may be called with the mm's mmap_sem already held. This - * can happen when a userspace munmap() is the call that drops - * the last reference to our file and calls our release - * method. If there are memory regions to destroy, we'll end - * up here and not be able to take the mmap_sem. In that case - * we defer the vm_locked accounting to a workqueue. - */ - if (context->closing) { - if (!down_write_trylock(&uiomr->owning_mm->mmap_sem)) { - INIT_WORK(&uiomr->work, usnic_uiom_release_defer); - queue_work(usnic_uiom_wq, &uiomr->work); - return; - } - } else { - down_write(&uiomr->owning_mm->mmap_sem); - } - uiomr->owning_mm->pinned_vm -= usnic_uiom_num_pages(uiomr); - up_write(&uiomr->owning_mm->mmap_sem); - + atomic64_sub(usnic_uiom_num_pages(uiomr), &uiomr->owning_mm->pinned_vm); __usnic_uiom_release_tail(uiomr); } -struct usnic_uiom_pd *usnic_uiom_alloc_pd(void) +struct usnic_uiom_pd *usnic_uiom_alloc_pd(struct device *dev) { struct usnic_uiom_pd *pd; void *domain; @@ -485,11 +443,11 @@ struct usnic_uiom_pd *usnic_uiom_alloc_pd(void) if (!pd) return ERR_PTR(-ENOMEM); - pd->domain = domain = iommu_domain_alloc(&pci_bus_type); - if (!domain) { + pd->domain = domain = iommu_paging_domain_alloc(dev); + if (IS_ERR(domain)) { usnic_err("Failed to allocate IOMMU domain"); kfree(pd); - return ERR_PTR(-ENOMEM); + return ERR_CAST(domain); } iommu_set_fault_handler(pd->domain, usnic_uiom_dma_fault, NULL); @@ -520,7 +478,7 @@ int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev) if (err) goto out_free_dev; - if (!iommu_capable(dev->bus, IOMMU_CAP_CACHE_COHERENCY)) { + if (!device_iommu_capable(dev, IOMMU_CAP_CACHE_COHERENCY)) { usnic_err("IOMMU of %s does not support cache coherency\n", dev_name(dev)); err = -EINVAL; @@ -593,25 +551,3 @@ void usnic_uiom_free_dev_list(struct device **devs) { kfree(devs); } - -int usnic_uiom_init(char *drv_name) -{ - if (!iommu_present(&pci_bus_type)) { - usnic_err("IOMMU required but not present or enabled. USNIC QPs will not function w/o enabling IOMMU\n"); - return -EPERM; - } - - usnic_uiom_wq = create_workqueue(drv_name); - if (!usnic_uiom_wq) { - usnic_err("Unable to alloc wq for drv %s\n", drv_name); - return -ENOMEM; - } - - return 0; -} - -void usnic_uiom_fini(void) -{ - flush_workqueue(usnic_uiom_wq); - destroy_workqueue(usnic_uiom_wq); -} |
